1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright 2014 OmniTI Computer Consulting, Inc. All rights reserved.
27 * Copyright (c) 2014, Tegile Systems Inc. All rights reserved.
28 */
29
30 /*
31 * Copyright (c) 2000 to 2010, LSI Corporation.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms of all code within
35 * this file that is exclusively owned by LSI, with or without
36 * modification, is permitted provided that, in addition to the CDDL 1.0
37 * License requirements, the following conditions are met:
38 *
39 * Neither the name of the author nor the names of its contributors may be
40 * used to endorse or promote products derived from this software without
41 * specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
46 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
47 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
48 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
49 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
50 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
51 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
52 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 * DAMAGE.
55 */
56
57 /*
58 * mptsas - This is a driver based on LSI Logic's MPT2.0/2.5 interface.
59 *
60 */
61
62 #if defined(lint) || defined(DEBUG)
63 #define MPTSAS_DEBUG
64 #endif
65
66 /*
67 * standard header files.
68 */
69 #include <sys/note.h>
70 #include <sys/scsi/scsi.h>
71 #include <sys/pci.h>
72 #include <sys/file.h>
73 #include <sys/policy.h>
74 #include <sys/model.h>
75 #include <sys/sysevent.h>
76 #include <sys/sysevent/eventdefs.h>
77 #include <sys/sysevent/dr.h>
78 #include <sys/sata/sata_defs.h>
79 #include <sys/scsi/generic/sas.h>
80 #include <sys/scsi/impl/scsi_sas.h>
81
82 #pragma pack(1)
83 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
84 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
85 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
86 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
87 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
88 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
89 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
90 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
91 #pragma pack()
92
93 /*
94 * private header files.
95 *
96 */
97 #include <sys/scsi/impl/scsi_reset_notify.h>
98 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
99 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
100 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
101 #include <sys/scsi/adapters/mpt_sas/mptsas_hash.h>
102 #include <sys/raidioctl.h>
103
104 #include <sys/fs/dv_node.h> /* devfs_clean */
105
106 /*
107 * FMA header files
108 */
109 #include <sys/ddifm.h>
110 #include <sys/fm/protocol.h>
111 #include <sys/fm/util.h>
112 #include <sys/fm/io/ddi.h>
113
114 /*
115 * autoconfiguration data and routines.
116 */
117 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
118 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
119 static int mptsas_power(dev_info_t *dip, int component, int level);
120
121 /*
122 * cb_ops function
123 */
124 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
125 cred_t *credp, int *rval);
126 #ifdef __sparc
127 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd);
128 #else /* __sparc */
129 static int mptsas_quiesce(dev_info_t *devi);
130 #endif /* __sparc */
131
132 /*
133 * Resource initilaization for hardware
134 */
135 static void mptsas_setup_cmd_reg(mptsas_t *mpt);
136 static void mptsas_disable_bus_master(mptsas_t *mpt);
137 static void mptsas_hba_fini(mptsas_t *mpt);
138 static void mptsas_cfg_fini(mptsas_t *mptsas_blkp);
139 static int mptsas_hba_setup(mptsas_t *mpt);
140 static void mptsas_hba_teardown(mptsas_t *mpt);
141 static int mptsas_config_space_init(mptsas_t *mpt);
142 static void mptsas_config_space_fini(mptsas_t *mpt);
143 static void mptsas_iport_register(mptsas_t *mpt);
144 static int mptsas_smp_setup(mptsas_t *mpt);
145 static void mptsas_smp_teardown(mptsas_t *mpt);
146 static int mptsas_cache_create(mptsas_t *mpt);
147 static void mptsas_cache_destroy(mptsas_t *mpt);
148 static int mptsas_alloc_request_frames(mptsas_t *mpt);
149 static int mptsas_alloc_sense_bufs(mptsas_t *mpt);
150 static int mptsas_alloc_reply_frames(mptsas_t *mpt);
151 static int mptsas_alloc_free_queue(mptsas_t *mpt);
152 static int mptsas_alloc_post_queue(mptsas_t *mpt);
153 static void mptsas_alloc_reply_args(mptsas_t *mpt);
154 static int mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
155 static void mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
156 static int mptsas_init_chip(mptsas_t *mpt, int first_time);
157
158 /*
159 * SCSA function prototypes
160 */
161 static int mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
162 static int mptsas_scsi_reset(struct scsi_address *ap, int level);
163 static int mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
164 static int mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
165 static int mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
166 int tgtonly);
167 static void mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
168 static struct scsi_pkt *mptsas_scsi_init_pkt(struct scsi_address *ap,
169 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
170 int tgtlen, int flags, int (*callback)(), caddr_t arg);
171 static void mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
172 static void mptsas_scsi_destroy_pkt(struct scsi_address *ap,
173 struct scsi_pkt *pkt);
174 static int mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
175 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
176 static void mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
177 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
178 static int mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
179 void (*callback)(caddr_t), caddr_t arg);
180 static int mptsas_get_name(struct scsi_device *sd, char *name, int len);
181 static int mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len);
182 static int mptsas_scsi_quiesce(dev_info_t *dip);
183 static int mptsas_scsi_unquiesce(dev_info_t *dip);
184 static int mptsas_bus_config(dev_info_t *pdip, uint_t flags,
185 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
186
187 /*
188 * SMP functions
189 */
190 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
191
192 /*
193 * internal function prototypes.
194 */
195 static void mptsas_list_add(mptsas_t *mpt);
196 static void mptsas_list_del(mptsas_t *mpt);
197
198 static int mptsas_quiesce_bus(mptsas_t *mpt);
199 static int mptsas_unquiesce_bus(mptsas_t *mpt);
200
201 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
202 static void mptsas_free_handshake_msg(mptsas_t *mpt);
203
204 static void mptsas_ncmds_checkdrain(void *arg);
205
206 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
207 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
208 static int mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
209 static void mptsas_accept_tx_waitq(mptsas_t *mpt);
210
211 static int mptsas_do_detach(dev_info_t *dev);
212 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
213 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
214 struct scsi_pkt *pkt);
215 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp);
216
217 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
218 static void mptsas_handle_event(void *args);
219 static int mptsas_handle_event_sync(void *args);
220 static void mptsas_handle_dr(void *args);
221 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
222 dev_info_t *pdip);
223
224 static void mptsas_restart_cmd(void *);
225
226 static void mptsas_flush_hba(mptsas_t *mpt);
227 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
228 uint8_t tasktype);
229 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
230 uchar_t reason, uint_t stat);
231
232 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
233 static void mptsas_process_intr(mptsas_t *mpt,
234 pMpi2ReplyDescriptorsUnion_t reply_desc_union);
235 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
236 pMpi2ReplyDescriptorsUnion_t reply_desc);
237 static void mptsas_handle_address_reply(mptsas_t *mpt,
238 pMpi2ReplyDescriptorsUnion_t reply_desc);
239 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
240 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
241 uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
242
243 static void mptsas_watch(void *arg);
244 static void mptsas_watchsubr(mptsas_t *mpt);
245 static void mptsas_cmd_timeout(mptsas_t *mpt, mptsas_target_t *ptgt);
246
247 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
248 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
249 uint8_t *data, uint32_t request_size, uint32_t reply_size,
250 uint32_t data_size, uint32_t direction, uint8_t *dataout,
251 uint32_t dataout_size, short timeout, int mode);
252 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
253
254 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt,
255 uint32_t unique_id);
256 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd);
257 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt,
258 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
259 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt,
260 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
261 uint32_t diag_type);
262 static int mptsas_diag_register(mptsas_t *mpt,
263 mptsas_fw_diag_register_t *diag_register, uint32_t *return_code);
264 static int mptsas_diag_unregister(mptsas_t *mpt,
265 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
266 static int mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
267 uint32_t *return_code);
268 static int mptsas_diag_read_buffer(mptsas_t *mpt,
269 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
270 uint32_t *return_code, int ioctl_mode);
271 static int mptsas_diag_release(mptsas_t *mpt,
272 mptsas_fw_diag_release_t *diag_release, uint32_t *return_code);
273 static int mptsas_do_diag_action(mptsas_t *mpt, uint32_t action,
274 uint8_t *diag_action, uint32_t length, uint32_t *return_code,
275 int ioctl_mode);
276 static int mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *data,
277 int mode);
278
279 static int mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
280 int cmdlen, int tgtlen, int statuslen, int kf);
281 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
282
283 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
284 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
285
286 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
287 int kmflags);
288 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
289
290 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
291 mptsas_cmd_t *cmd);
292 static void mptsas_check_task_mgt(mptsas_t *mpt,
293 pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
294 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
295 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
296 int *resid);
297
298 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
299 static void mptsas_free_active_slots(mptsas_t *mpt);
300 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
301
302 static void mptsas_restart_hba(mptsas_t *mpt);
303 static void mptsas_restart_waitq(mptsas_t *mpt);
304
305 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
306 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);
307 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
308
309 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
310 static void mptsas_doneq_empty(mptsas_t *mpt);
311 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
312
313 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
314 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
315 static mptsas_cmd_t *mptsas_tx_waitq_rm(mptsas_t *mpt);
316 static void mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
317
318
319 static void mptsas_start_watch_reset_delay();
320 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
321 static void mptsas_watch_reset_delay(void *arg);
322 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
323
324 /*
325 * helper functions
326 */
327 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
328
329 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
330 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
331 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
332 int lun);
333 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
334 int lun);
335 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
336 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
337
338 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
339 int *lun);
340 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
341
342 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt,
343 mptsas_phymask_t phymask, uint8_t phy);
344 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt,
345 mptsas_phymask_t phymask, uint64_t wwid);
346 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt,
347 mptsas_phymask_t phymask, uint64_t wwid);
348
349 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
350 uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
351
352 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
353 uint16_t *handle, mptsas_target_t **pptgt);
354 static void mptsas_update_phymask(mptsas_t *mpt);
355
356 static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
357 uint32_t *status, uint8_t cmd);
358 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
359 mptsas_phymask_t *phymask);
360 static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr,
361 mptsas_phymask_t phymask);
362 static int mptsas_flush_led_status(mptsas_t *mpt, mptsas_target_t *ptgt);
363
364
365 /*
366 * Enumeration / DR functions
367 */
368 static void mptsas_config_all(dev_info_t *pdip);
369 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
370 dev_info_t **lundip);
371 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
372 dev_info_t **lundip);
373
374 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
375 static int mptsas_offline_target(dev_info_t *pdip, char *name);
376
377 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
378 dev_info_t **dip);
379
380 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
381 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
382 dev_info_t **dip, mptsas_target_t *ptgt);
383
384 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
385 dev_info_t **dip, mptsas_target_t *ptgt, int lun);
386
387 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
388 char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
389 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
390 char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
391 int lun);
392
393 static void mptsas_offline_missed_luns(dev_info_t *pdip,
394 uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
395 static int mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
396 mdi_pathinfo_t *rpip, uint_t flags);
397
398 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
399 dev_info_t **smp_dip);
400 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
401 uint_t flags);
402
403 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
404 int mode, int *rval);
405 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
406 int mode, int *rval);
407 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
408 int mode, int *rval);
409 static void mptsas_record_event(void *args);
410 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
411 int mode);
412
413 mptsas_target_t *mptsas_tgt_alloc(mptsas_t *, uint16_t, uint64_t,
414 uint32_t, mptsas_phymask_t, uint8_t);
415 static mptsas_smp_t *mptsas_smp_alloc(mptsas_t *, mptsas_smp_t *);
416 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
417 dev_info_t **smp_dip);
418
419 /*
420 * Power management functions
421 */
422 static int mptsas_get_pci_cap(mptsas_t *mpt);
423 static int mptsas_init_pm(mptsas_t *mpt);
424
425 /*
426 * MPT MSI tunable:
427 *
428 * By default MSI is enabled on all supported platforms.
429 */
430 boolean_t mptsas_enable_msi = B_TRUE;
431 boolean_t mptsas_physical_bind_failed_page_83 = B_FALSE;
432
433 /*
434 * Global switch for use of MPI2.5 FAST PATH.
435 * We don't really know what FAST PATH actually does, so if it is suspected
436 * to cause problems it can be turned off by setting this variable to B_FALSE.
437 */
438 boolean_t mptsas_use_fastpath = B_TRUE;
439
440 static int mptsas_register_intrs(mptsas_t *);
441 static void mptsas_unregister_intrs(mptsas_t *);
442 static int mptsas_add_intrs(mptsas_t *, int);
443 static void mptsas_rem_intrs(mptsas_t *);
444
445 /*
446 * FMA Prototypes
447 */
448 static void mptsas_fm_init(mptsas_t *mpt);
449 static void mptsas_fm_fini(mptsas_t *mpt);
450 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
451
452 extern pri_t minclsyspri, maxclsyspri;
453
454 /*
455 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
456 * under this device that the paths to a physical device are created when
457 * MPxIO is used.
458 */
459 extern dev_info_t *scsi_vhci_dip;
460
461 /*
462 * Tunable timeout value for Inquiry VPD page 0x83
463 * By default the value is 30 seconds.
464 */
465 int mptsas_inq83_retry_timeout = 30;
466
467 /*
468 * This is used to allocate memory for message frame storage, not for
469 * data I/O DMA. All message frames must be stored in the first 4G of
470 * physical memory.
471 */
472 ddi_dma_attr_t mptsas_dma_attrs = {
473 DMA_ATTR_V0, /* attribute layout version */
474 0x0ull, /* address low - should be 0 (longlong) */
475 0xffffffffull, /* address high - 32-bit max range */
476 0x00ffffffull, /* count max - max DMA object size */
477 4, /* allocation alignment requirements */
478 0x78, /* burstsizes - binary encoded values */
479 1, /* minxfer - gran. of DMA engine */
480 0x00ffffffull, /* maxxfer - gran. of DMA engine */
481 0xffffffffull, /* max segment size (DMA boundary) */
482 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
483 512, /* granularity - device transfer size */
484 0 /* flags, set to 0 */
485 };
486
487 /*
488 * This is used for data I/O DMA memory allocation. (full 64-bit DMA
489 * physical addresses are supported.)
490 */
491 ddi_dma_attr_t mptsas_dma_attrs64 = {
492 DMA_ATTR_V0, /* attribute layout version */
493 0x0ull, /* address low - should be 0 (longlong) */
494 0xffffffffffffffffull, /* address high - 64-bit max */
495 0x00ffffffull, /* count max - max DMA object size */
496 4, /* allocation alignment requirements */
497 0x78, /* burstsizes - binary encoded values */
498 1, /* minxfer - gran. of DMA engine */
499 0x00ffffffull, /* maxxfer - gran. of DMA engine */
500 0xffffffffull, /* max segment size (DMA boundary) */
501 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
502 512, /* granularity - device transfer size */
503 0 /* flags, set to 0 */
504 };
505
506 ddi_device_acc_attr_t mptsas_dev_attr = {
507 DDI_DEVICE_ATTR_V1,
508 DDI_STRUCTURE_LE_ACC,
509 DDI_STRICTORDER_ACC,
510 DDI_DEFAULT_ACC
511 };
512
513 static struct cb_ops mptsas_cb_ops = {
514 scsi_hba_open, /* open */
515 scsi_hba_close, /* close */
516 nodev, /* strategy */
517 nodev, /* print */
518 nodev, /* dump */
519 nodev, /* read */
520 nodev, /* write */
521 mptsas_ioctl, /* ioctl */
522 nodev, /* devmap */
523 nodev, /* mmap */
524 nodev, /* segmap */
525 nochpoll, /* chpoll */
526 ddi_prop_op, /* cb_prop_op */
527 NULL, /* streamtab */
528 D_MP, /* cb_flag */
529 CB_REV, /* rev */
530 nodev, /* aread */
531 nodev /* awrite */
532 };
533
534 static struct dev_ops mptsas_ops = {
535 DEVO_REV, /* devo_rev, */
536 0, /* refcnt */
537 ddi_no_info, /* info */
538 nulldev, /* identify */
539 nulldev, /* probe */
540 mptsas_attach, /* attach */
541 mptsas_detach, /* detach */
542 #ifdef __sparc
543 mptsas_reset,
544 #else
545 nodev, /* reset */
546 #endif /* __sparc */
547 &mptsas_cb_ops, /* driver operations */
548 NULL, /* bus operations */
549 mptsas_power, /* power management */
550 #ifdef __sparc
551 ddi_quiesce_not_needed
552 #else
553 mptsas_quiesce /* quiesce */
554 #endif /* __sparc */
555 };
556
557
558 #define MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24"
559
560 static struct modldrv modldrv = {
561 &mod_driverops, /* Type of module. This one is a driver */
562 MPTSAS_MOD_STRING, /* Name of the module. */
563 &mptsas_ops, /* driver ops */
564 };
565
566 static struct modlinkage modlinkage = {
567 MODREV_1, &modldrv, NULL
568 };
569 #define TARGET_PROP "target"
570 #define LUN_PROP "lun"
571 #define LUN64_PROP "lun64"
572 #define SAS_PROP "sas-mpt"
573 #define MDI_GUID "wwn"
574 #define NDI_GUID "guid"
575 #define MPTSAS_DEV_GONE "mptsas_dev_gone"
576
577 /*
578 * Local static data
579 */
580 #if defined(MPTSAS_DEBUG)
581 /*
582 * Flags to indicate which debug messages are to be printed and which go to the
583 * debug log ring buffer. Default is to not print anything, and to log
584 * everything except the watchsubr() output which normally happens every second.
585 */
586 uint32_t mptsas_debugprt_flags = 0x0;
587 uint32_t mptsas_debuglog_flags = ~(1U << 30);
588 #endif /* defined(MPTSAS_DEBUG) */
589 uint32_t mptsas_debug_resets = 0;
590
591 static kmutex_t mptsas_global_mutex;
592 static void *mptsas_state; /* soft state ptr */
593 static krwlock_t mptsas_global_rwlock;
594
595 static kmutex_t mptsas_log_mutex;
596 static char mptsas_log_buf[256];
597 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex, mptsas_log_buf))
598
599 static mptsas_t *mptsas_head, *mptsas_tail;
600 static clock_t mptsas_scsi_watchdog_tick;
601 static clock_t mptsas_tick;
602 static timeout_id_t mptsas_reset_watch;
603 static timeout_id_t mptsas_timeout_id;
604 static int mptsas_timeouts_enabled = 0;
605
606 /*
607 * Default length for extended auto request sense buffers.
608 * All sense buffers need to be under the same alloc because there
609 * is only one common top 32bits (of 64bits) address register.
610 * Most requests only require 32 bytes, but some request >256.
611 * We use rmalloc()/rmfree() on this additional memory to manage the
612 * "extended" requests.
613 */
614 int mptsas_extreq_sense_bufsize = 256*64;
615
616 /*
617 * We believe that all software resrictions of having to run with DMA
618 * attributes to limit allocation to the first 4G are removed.
619 * However, this flag remains to enable quick switchback should suspicious
620 * problems emerge.
621 * Note that scsi_alloc_consistent_buf() does still adhere to allocating
622 * 32 bit addressable memory, but we can cope if that is changed now.
623 */
624 int mptsas_use_64bit_msgaddr = 1;
625
626 /*
627 * warlock directives
628 */
629 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \
630 mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status))
631 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt))
632 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
633 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private))
634 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private))
635
636 /*
637 * SM - HBA statics
638 */
639 char *mptsas_driver_rev = MPTSAS_MOD_STRING;
640
641 #ifdef MPTSAS_DEBUG
642 void debug_enter(char *);
643 #endif
644
645 /*
646 * Notes:
647 * - scsi_hba_init(9F) initializes SCSI HBA modules
648 * - must call scsi_hba_fini(9F) if modload() fails
649 */
650 int
651 _init(void)
652 {
653 int status;
654 /* CONSTCOND */
655 ASSERT(NO_COMPETING_THREADS);
656
657 NDBG0(("_init"));
658
659 status = ddi_soft_state_init(&mptsas_state, MPTSAS_SIZE,
660 MPTSAS_INITIAL_SOFT_SPACE);
661 if (status != 0) {
662 return (status);
663 }
664
665 if ((status = scsi_hba_init(&modlinkage)) != 0) {
666 ddi_soft_state_fini(&mptsas_state);
667 return (status);
668 }
669
670 mutex_init(&mptsas_global_mutex, NULL, MUTEX_DRIVER, NULL);
671 rw_init(&mptsas_global_rwlock, NULL, RW_DRIVER, NULL);
672 mutex_init(&mptsas_log_mutex, NULL, MUTEX_DRIVER, NULL);
673
674 if ((status = mod_install(&modlinkage)) != 0) {
675 mutex_destroy(&mptsas_log_mutex);
676 rw_destroy(&mptsas_global_rwlock);
677 mutex_destroy(&mptsas_global_mutex);
678 ddi_soft_state_fini(&mptsas_state);
679 scsi_hba_fini(&modlinkage);
680 }
681
682 return (status);
683 }
684
685 /*
686 * Notes:
687 * - scsi_hba_fini(9F) uninitializes SCSI HBA modules
688 */
689 int
690 _fini(void)
691 {
692 int status;
693 /* CONSTCOND */
694 ASSERT(NO_COMPETING_THREADS);
695
696 NDBG0(("_fini"));
697
698 if ((status = mod_remove(&modlinkage)) == 0) {
699 ddi_soft_state_fini(&mptsas_state);
700 scsi_hba_fini(&modlinkage);
701 mutex_destroy(&mptsas_global_mutex);
702 rw_destroy(&mptsas_global_rwlock);
703 mutex_destroy(&mptsas_log_mutex);
704 }
705 return (status);
706 }
707
708 /*
709 * The loadable-module _info(9E) entry point
710 */
711 int
712 _info(struct modinfo *modinfop)
713 {
714 /* CONSTCOND */
715 ASSERT(NO_COMPETING_THREADS);
716 NDBG0(("mptsas _info"));
717
718 return (mod_info(&modlinkage, modinfop));
719 }
720
721 static int
722 mptsas_target_eval_devhdl(const void *op, void *arg)
723 {
724 uint16_t dh = *(uint16_t *)arg;
725 const mptsas_target_t *tp = op;
726
727 return ((int)tp->m_devhdl - (int)dh);
728 }
729
730 static int
731 mptsas_target_eval_slot(const void *op, void *arg)
732 {
733 mptsas_led_control_t *lcp = arg;
734 const mptsas_target_t *tp = op;
735
736 if (tp->m_enclosure != lcp->Enclosure)
737 return ((int)tp->m_enclosure - (int)lcp->Enclosure);
738
739 return ((int)tp->m_slot_num - (int)lcp->Slot);
740 }
741
742 static int
743 mptsas_target_eval_nowwn(const void *op, void *arg)
744 {
745 uint8_t phy = *(uint8_t *)arg;
746 const mptsas_target_t *tp = op;
747
748 if (tp->m_addr.mta_wwn != 0)
749 return (-1);
750
751 return ((int)tp->m_phynum - (int)phy);
752 }
753
754 static int
755 mptsas_smp_eval_devhdl(const void *op, void *arg)
756 {
757 uint16_t dh = *(uint16_t *)arg;
758 const mptsas_smp_t *sp = op;
759
760 return ((int)sp->m_devhdl - (int)dh);
761 }
762
763 static uint64_t
764 mptsas_target_addr_hash(const void *tp)
765 {
766 const mptsas_target_addr_t *tap = tp;
767
768 return ((tap->mta_wwn & 0xffffffffffffULL) |
769 ((uint64_t)tap->mta_phymask << 48));
770 }
771
772 static int
773 mptsas_target_addr_cmp(const void *a, const void *b)
774 {
775 const mptsas_target_addr_t *aap = a;
776 const mptsas_target_addr_t *bap = b;
777
778 if (aap->mta_wwn < bap->mta_wwn)
779 return (-1);
780 if (aap->mta_wwn > bap->mta_wwn)
781 return (1);
782 return ((int)bap->mta_phymask - (int)aap->mta_phymask);
783 }
784
785 static void
786 mptsas_target_free(void *op)
787 {
788 kmem_free(op, sizeof (mptsas_target_t));
789 }
790
791 static void
792 mptsas_smp_free(void *op)
793 {
794 kmem_free(op, sizeof (mptsas_smp_t));
795 }
796
797 static void
798 mptsas_destroy_hashes(mptsas_t *mpt)
799 {
800 mptsas_target_t *tp;
801 mptsas_smp_t *sp;
802
803 for (tp = refhash_first(mpt->m_targets); tp != NULL;
804 tp = refhash_next(mpt->m_targets, tp)) {
805 refhash_remove(mpt->m_targets, tp);
806 }
807 for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
808 sp = refhash_next(mpt->m_smp_targets, sp)) {
809 refhash_remove(mpt->m_smp_targets, sp);
810 }
811 refhash_destroy(mpt->m_targets);
812 refhash_destroy(mpt->m_smp_targets);
813 mpt->m_targets = NULL;
814 mpt->m_smp_targets = NULL;
815 }
816
817 static int
818 mptsas_iport_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
819 {
820 dev_info_t *pdip;
821 mptsas_t *mpt;
822 scsi_hba_tran_t *hba_tran;
823 char *iport = NULL;
824 char phymask[MPTSAS_MAX_PHYS];
825 mptsas_phymask_t phy_mask = 0;
826 int dynamic_port = 0;
827 uint32_t page_address;
828 char initiator_wwnstr[MPTSAS_WWN_STRLEN];
829 int rval = DDI_FAILURE;
830 int i = 0;
831 uint8_t numphys = 0;
832 uint8_t phy_id;
833 uint8_t phy_port = 0;
834 uint16_t attached_devhdl = 0;
835 uint32_t dev_info;
836 uint64_t attached_sas_wwn;
837 uint16_t dev_hdl;
838 uint16_t pdev_hdl;
839 uint16_t bay_num, enclosure, io_flags;
840 char attached_wwnstr[MPTSAS_WWN_STRLEN];
841
842 /* CONSTCOND */
843 ASSERT(NO_COMPETING_THREADS);
844
845 switch (cmd) {
846 case DDI_ATTACH:
847 break;
848
849 case DDI_RESUME:
850 /*
851 * If this a scsi-iport node, nothing to do here.
852 */
853 return (DDI_SUCCESS);
854
855 default:
856 return (DDI_FAILURE);
857 }
858
859 pdip = ddi_get_parent(dip);
860
861 if ((hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SCSI_DEVICE)) ==
862 NULL) {
863 cmn_err(CE_WARN, "Failed attach iport because fail to "
864 "get tran vector for the HBA node");
865 return (DDI_FAILURE);
866 }
867
868 mpt = TRAN2MPT(hba_tran);
869 ASSERT(mpt != NULL);
870 if (mpt == NULL)
871 return (DDI_FAILURE);
872
873 if ((hba_tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) ==
874 NULL) {
875 mptsas_log(mpt, CE_WARN, "Failed attach iport because fail to "
876 "get tran vector for the iport node");
877 return (DDI_FAILURE);
878 }
879
880 /*
881 * Overwrite parent's tran_hba_private to iport's tran vector
882 */
883 hba_tran->tran_hba_private = mpt;
884
885 ddi_report_dev(dip);
886
887 /*
888 * Get SAS address for initiator port according dev_handle
889 */
890 iport = ddi_get_name_addr(dip);
891 if (iport && strncmp(iport, "v0", 2) == 0) {
892 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
893 MPTSAS_VIRTUAL_PORT, 1) !=
894 DDI_PROP_SUCCESS) {
895 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
896 MPTSAS_VIRTUAL_PORT);
897 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
898 "prop update failed");
899 return (DDI_FAILURE);
900 }
901 return (DDI_SUCCESS);
902 }
903
904 mutex_enter(&mpt->m_mutex);
905 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
906 bzero(phymask, sizeof (phymask));
907 (void) sprintf(phymask,
908 "%x", mpt->m_phy_info[i].phy_mask);
909 if (strcmp(phymask, iport) == 0) {
910 break;
911 }
912 }
913
914 if (i == MPTSAS_MAX_PHYS) {
915 mptsas_log(mpt, CE_WARN, "Failed attach port %s because port"
916 "seems not exist", iport);
917 mutex_exit(&mpt->m_mutex);
918 return (DDI_FAILURE);
919 }
920
921 phy_mask = mpt->m_phy_info[i].phy_mask;
922
923 if (mpt->m_phy_info[i].port_flags & AUTO_PORT_CONFIGURATION)
924 dynamic_port = 1;
925 else
926 dynamic_port = 0;
927
928 /*
929 * Update PHY info for smhba
930 */
931 if (mptsas_smhba_phy_init(mpt)) {
932 mutex_exit(&mpt->m_mutex);
933 mptsas_log(mpt, CE_WARN, "mptsas phy update "
934 "failed");
935 return (DDI_FAILURE);
936 }
937
938 mutex_exit(&mpt->m_mutex);
939
940 numphys = 0;
941 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
942 if ((phy_mask >> i) & 0x01) {
943 numphys++;
944 }
945 }
946
947 bzero(initiator_wwnstr, sizeof (initiator_wwnstr));
948 (void) sprintf(initiator_wwnstr, "w%016"PRIx64,
949 mpt->un.m_base_wwid);
950
951 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
952 SCSI_ADDR_PROP_INITIATOR_PORT, initiator_wwnstr) !=
953 DDI_PROP_SUCCESS) {
954 (void) ddi_prop_remove(DDI_DEV_T_NONE,
955 dip, SCSI_ADDR_PROP_INITIATOR_PORT);
956 mptsas_log(mpt, CE_WARN, "mptsas Initiator port "
957 "prop update failed");
958 return (DDI_FAILURE);
959 }
960 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
961 MPTSAS_NUM_PHYS, numphys) !=
962 DDI_PROP_SUCCESS) {
963 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, MPTSAS_NUM_PHYS);
964 return (DDI_FAILURE);
965 }
966
967 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
968 "phymask", phy_mask) !=
969 DDI_PROP_SUCCESS) {
970 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "phymask");
971 mptsas_log(mpt, CE_WARN, "mptsas phy mask "
972 "prop update failed");
973 return (DDI_FAILURE);
974 }
975
976 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
977 "dynamic-port", dynamic_port) !=
978 DDI_PROP_SUCCESS) {
979 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "dynamic-port");
980 mptsas_log(mpt, CE_WARN, "mptsas dynamic port "
981 "prop update failed");
982 return (DDI_FAILURE);
983 }
984 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
985 MPTSAS_VIRTUAL_PORT, 0) !=
986 DDI_PROP_SUCCESS) {
987 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
988 MPTSAS_VIRTUAL_PORT);
989 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
990 "prop update failed");
991 return (DDI_FAILURE);
992 }
993 mptsas_smhba_set_all_phy_props(mpt, dip, numphys, phy_mask,
994 &attached_devhdl);
995
996 mutex_enter(&mpt->m_mutex);
997 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
998 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)attached_devhdl;
999 rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
1000 &attached_sas_wwn, &dev_info, &phy_port, &phy_id,
1001 &pdev_hdl, &bay_num, &enclosure, &io_flags);
1002 if (rval != DDI_SUCCESS) {
1003 mptsas_log(mpt, CE_WARN,
1004 "Failed to get device page0 for handle:%d",
1005 attached_devhdl);
1006 mutex_exit(&mpt->m_mutex);
1007 return (DDI_FAILURE);
1008 }
1009
1010 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1011 bzero(phymask, sizeof (phymask));
1012 (void) sprintf(phymask, "%x", mpt->m_phy_info[i].phy_mask);
1013 if (strcmp(phymask, iport) == 0) {
1014 (void) sprintf(&mpt->m_phy_info[i].smhba_info.path[0],
1015 "%x",
1016 mpt->m_phy_info[i].phy_mask);
1017 }
1018 }
1019 mutex_exit(&mpt->m_mutex);
1020
1021 bzero(attached_wwnstr, sizeof (attached_wwnstr));
1022 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
1023 attached_sas_wwn);
1024 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
1025 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
1026 DDI_PROP_SUCCESS) {
1027 (void) ddi_prop_remove(DDI_DEV_T_NONE,
1028 dip, SCSI_ADDR_PROP_ATTACHED_PORT);
1029 return (DDI_FAILURE);
1030 }
1031
1032 /* Create kstats for each phy on this iport */
1033
1034 mptsas_create_phy_stats(mpt, iport, dip);
1035
1036 /*
1037 * register sas hba iport with mdi (MPxIO/vhci)
1038 */
1039 if (mdi_phci_register(MDI_HCI_CLASS_SCSI,
1040 dip, 0) == MDI_SUCCESS) {
1041 mpt->m_mpxio_enable = TRUE;
1042 }
1043 return (DDI_SUCCESS);
1044 }
1045
1046 /*
1047 * Notes:
1048 * Set up all device state and allocate data structures,
1049 * mutexes, condition variables, etc. for device operation.
1050 * Add interrupts needed.
1051 * Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
1052 */
1053 static int
1054 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1055 {
1056 mptsas_t *mpt = NULL;
1057 int instance, i, j;
1058 int doneq_thread_num;
1059 char intr_added = 0;
1060 char map_setup = 0;
1061 char config_setup = 0;
1062 char hba_attach_setup = 0;
1063 char smp_attach_setup = 0;
1064 char mutex_init_done = 0;
1065 char event_taskq_create = 0;
1066 char reset_taskq_create = 0;
1067 char dr_taskq_create = 0;
1068 char doneq_thread_create = 0;
1069 char added_watchdog = 0;
1070 scsi_hba_tran_t *hba_tran;
1071 uint_t mem_bar = MEM_SPACE;
1072 int rval = DDI_FAILURE;
1073
1074 /* CONSTCOND */
1075 ASSERT(NO_COMPETING_THREADS);
1076
1077 if (scsi_hba_iport_unit_address(dip)) {
1078 return (mptsas_iport_attach(dip, cmd));
1079 }
1080
1081 switch (cmd) {
1082 case DDI_ATTACH:
1083 break;
1084
1085 case DDI_RESUME:
1086 if ((hba_tran = ddi_get_driver_private(dip)) == NULL)
1087 return (DDI_FAILURE);
1088
1089 mpt = TRAN2MPT(hba_tran);
1090
1091 if (!mpt) {
1092 return (DDI_FAILURE);
1093 }
1094
1095 /*
1096 * Reset hardware and softc to "no outstanding commands"
1097 * Note that a check condition can result on first command
1098 * to a target.
1099 */
1100 mutex_enter(&mpt->m_mutex);
1101
1102 /*
1103 * raise power.
1104 */
1105 if (mpt->m_options & MPTSAS_OPT_PM) {
1106 mutex_exit(&mpt->m_mutex);
1107 (void) pm_busy_component(dip, 0);
1108 rval = pm_power_has_changed(dip, 0, PM_LEVEL_D0);
1109 if (rval == DDI_SUCCESS) {
1110 mutex_enter(&mpt->m_mutex);
1111 } else {
1112 /*
1113 * The pm_raise_power() call above failed,
1114 * and that can only occur if we were unable
1115 * to reset the hardware. This is probably
1116 * due to unhealty hardware, and because
1117 * important filesystems(such as the root
1118 * filesystem) could be on the attached disks,
1119 * it would not be a good idea to continue,
1120 * as we won't be entirely certain we are
1121 * writing correct data. So we panic() here
1122 * to not only prevent possible data corruption,
1123 * but to give developers or end users a hope
1124 * of identifying and correcting any problems.
1125 */
1126 fm_panic("mptsas could not reset hardware "
1127 "during resume");
1128 }
1129 }
1130
1131 mpt->m_suspended = 0;
1132
1133 /*
1134 * Reinitialize ioc
1135 */
1136 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1137 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
1138 mutex_exit(&mpt->m_mutex);
1139 if (mpt->m_options & MPTSAS_OPT_PM) {
1140 (void) pm_idle_component(dip, 0);
1141 }
1142 fm_panic("mptsas init chip fail during resume");
1143 }
1144 /*
1145 * mptsas_update_driver_data needs interrupts so enable them
1146 * first.
1147 */
1148 MPTSAS_ENABLE_INTR(mpt);
1149 mptsas_update_driver_data(mpt);
1150
1151 /* start requests, if possible */
1152 mptsas_restart_hba(mpt);
1153
1154 mutex_exit(&mpt->m_mutex);
1155
1156 /*
1157 * Restart watch thread
1158 */
1159 mutex_enter(&mptsas_global_mutex);
1160 if (mptsas_timeout_id == 0) {
1161 mptsas_timeout_id = timeout(mptsas_watch, NULL,
1162 mptsas_tick);
1163 mptsas_timeouts_enabled = 1;
1164 }
1165 mutex_exit(&mptsas_global_mutex);
1166
1167 /* report idle status to pm framework */
1168 if (mpt->m_options & MPTSAS_OPT_PM) {
1169 (void) pm_idle_component(dip, 0);
1170 }
1171
1172 return (DDI_SUCCESS);
1173
1174 default:
1175 return (DDI_FAILURE);
1176
1177 }
1178
1179 instance = ddi_get_instance(dip);
1180
1181 /*
1182 * Allocate softc information.
1183 */
1184 if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
1185 mptsas_log(NULL, CE_WARN,
1186 "mptsas%d: cannot allocate soft state", instance);
1187 goto fail;
1188 }
1189
1190 mpt = ddi_get_soft_state(mptsas_state, instance);
1191
1192 if (mpt == NULL) {
1193 mptsas_log(NULL, CE_WARN,
1194 "mptsas%d: cannot get soft state", instance);
1195 goto fail;
1196 }
1197
1198 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
1199 scsi_size_clean(dip);
1200
1201 mpt->m_dip = dip;
1202 mpt->m_instance = instance;
1203
1204 /* Make a per-instance copy of the structures */
1205 mpt->m_io_dma_attr = mptsas_dma_attrs64;
1206 if (mptsas_use_64bit_msgaddr) {
1207 mpt->m_msg_dma_attr = mptsas_dma_attrs64;
1208 } else {
1209 mpt->m_msg_dma_attr = mptsas_dma_attrs;
1210 }
1211 mpt->m_reg_acc_attr = mptsas_dev_attr;
1212 mpt->m_dev_acc_attr = mptsas_dev_attr;
1213
1214 /*
1215 * Size of individual request sense buffer
1216 */
1217 mpt->m_req_sense_size = EXTCMDS_STATUS_SIZE;
1218
1219 /*
1220 * Initialize FMA
1221 */
1222 mpt->m_fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, mpt->m_dip,
1223 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
1224 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1225 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1226
1227 mptsas_fm_init(mpt);
1228
1229 if (mptsas_alloc_handshake_msg(mpt,
1230 sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) {
1231 mptsas_log(mpt, CE_WARN, "cannot initialize handshake msg.");
1232 goto fail;
1233 }
1234
1235 /*
1236 * Setup configuration space
1237 */
1238 if (mptsas_config_space_init(mpt) == FALSE) {
1239 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init failed");
1240 goto fail;
1241 }
1242 config_setup++;
1243
1244 if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&mpt->m_reg,
1245 0, 0, &mpt->m_reg_acc_attr, &mpt->m_datap) != DDI_SUCCESS) {
1246 mptsas_log(mpt, CE_WARN, "map setup failed");
1247 goto fail;
1248 }
1249 map_setup++;
1250
1251 /*
1252 * A taskq is created for dealing with the event handler
1253 */
1254 if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
1255 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1256 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
1257 goto fail;
1258 }
1259 event_taskq_create++;
1260
1261 /*
1262 * A taskq is created for dealing with dr events
1263 */
1264 if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1265 "mptsas_dr_taskq",
1266 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1267 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1268 "failed");
1269 goto fail;
1270 }
1271 dr_taskq_create++;
1272
1273 /*
1274 * A taskq is created for dealing with reset events
1275 */
1276 if ((mpt->m_reset_taskq = ddi_taskq_create(dip,
1277 "mptsas_reset_taskq",
1278 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1279 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for reset "
1280 "failed");
1281 goto fail;
1282 }
1283 reset_taskq_create++;
1284
1285 mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1286 0, "mptsas_doneq_thread_threshold_prop", 10);
1287 mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1288 0, "mptsas_doneq_length_threshold_prop", 8);
1289 mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1290 0, "mptsas_doneq_thread_n_prop", 8);
1291
1292 if (mpt->m_doneq_thread_n) {
1293 cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1294 mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1295
1296 mutex_enter(&mpt->m_doneq_mutex);
1297 mpt->m_doneq_thread_id =
1298 kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1299 * mpt->m_doneq_thread_n, KM_SLEEP);
1300
1301 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1302 cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1303 CV_DRIVER, NULL);
1304 mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1305 MUTEX_DRIVER, NULL);
1306 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1307 mpt->m_doneq_thread_id[j].flag |=
1308 MPTSAS_DONEQ_THREAD_ACTIVE;
1309 mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1310 mpt->m_doneq_thread_id[j].arg.t = j;
1311 mpt->m_doneq_thread_id[j].threadp =
1312 thread_create(NULL, 0, mptsas_doneq_thread,
1313 &mpt->m_doneq_thread_id[j].arg,
1314 0, &p0, TS_RUN, minclsyspri);
1315 mpt->m_doneq_thread_id[j].donetail =
1316 &mpt->m_doneq_thread_id[j].doneq;
1317 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1318 }
1319 mutex_exit(&mpt->m_doneq_mutex);
1320 doneq_thread_create++;
1321 }
1322
1323 /*
1324 * Disable hardware interrupt since we're not ready to
1325 * handle it yet.
1326 */
1327 MPTSAS_DISABLE_INTR(mpt);
1328 if (mptsas_register_intrs(mpt) == FALSE)
1329 goto fail;
1330 intr_added++;
1331
1332 /* Initialize mutex used in interrupt handler */
1333 mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1334 DDI_INTR_PRI(mpt->m_intr_pri));
1335 mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1336 mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1337 DDI_INTR_PRI(mpt->m_intr_pri));
1338 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1339 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1340 NULL, MUTEX_DRIVER,
1341 DDI_INTR_PRI(mpt->m_intr_pri));
1342 }
1343
1344 cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1345 cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1346 cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1347 cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1348 cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1349 mutex_init_done++;
1350
1351 mutex_enter(&mpt->m_mutex);
1352 /*
1353 * Initialize power management component
1354 */
1355 if (mpt->m_options & MPTSAS_OPT_PM) {
1356 if (mptsas_init_pm(mpt)) {
1357 mutex_exit(&mpt->m_mutex);
1358 mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1359 "failed");
1360 goto fail;
1361 }
1362 }
1363
1364 /*
1365 * Initialize chip using Message Unit Reset, if allowed
1366 */
1367 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1368 if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1369 mutex_exit(&mpt->m_mutex);
1370 mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1371 goto fail;
1372 }
1373
1374 mpt->m_targets = refhash_create(MPTSAS_TARGET_BUCKET_COUNT,
1375 mptsas_target_addr_hash, mptsas_target_addr_cmp,
1376 mptsas_target_free, sizeof (mptsas_target_t),
1377 offsetof(mptsas_target_t, m_link),
1378 offsetof(mptsas_target_t, m_addr), KM_SLEEP);
1379
1380 /*
1381 * Fill in the phy_info structure and get the base WWID
1382 */
1383 if (mptsas_get_manufacture_page5(mpt) == DDI_FAILURE) {
1384 mptsas_log(mpt, CE_WARN,
1385 "mptsas_get_manufacture_page5 failed!");
1386 goto fail;
1387 }
1388
1389 if (mptsas_get_sas_io_unit_page_hndshk(mpt)) {
1390 mptsas_log(mpt, CE_WARN,
1391 "mptsas_get_sas_io_unit_page_hndshk failed!");
1392 goto fail;
1393 }
1394
1395 if (mptsas_get_manufacture_page0(mpt) == DDI_FAILURE) {
1396 mptsas_log(mpt, CE_WARN,
1397 "mptsas_get_manufacture_page0 failed!");
1398 goto fail;
1399 }
1400
1401 mutex_exit(&mpt->m_mutex);
1402
1403 /*
1404 * Register the iport for multiple port HBA
1405 */
1406 mptsas_iport_register(mpt);
1407
1408 /*
1409 * initialize SCSI HBA transport structure
1410 */
1411 if (mptsas_hba_setup(mpt) == FALSE)
1412 goto fail;
1413 hba_attach_setup++;
1414
1415 if (mptsas_smp_setup(mpt) == FALSE)
1416 goto fail;
1417 smp_attach_setup++;
1418
1419 if (mptsas_cache_create(mpt) == FALSE)
1420 goto fail;
1421
1422 mpt->m_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1423 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
1424 if (mpt->m_scsi_reset_delay == 0) {
1425 mptsas_log(mpt, CE_NOTE,
1426 "scsi_reset_delay of 0 is not recommended,"
1427 " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1428 mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1429 }
1430
1431 /*
1432 * Initialize the wait and done FIFO queue
1433 */
1434 mpt->m_donetail = &mpt->m_doneq;
1435 mpt->m_waitqtail = &mpt->m_waitq;
1436 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
1437 mpt->m_tx_draining = 0;
1438
1439 /*
1440 * ioc cmd queue initialize
1441 */
1442 mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1443 mpt->m_dev_handle = 0xFFFF;
1444
1445 MPTSAS_ENABLE_INTR(mpt);
1446
1447 /*
1448 * enable event notification
1449 */
1450 mutex_enter(&mpt->m_mutex);
1451 if (mptsas_ioc_enable_event_notification(mpt)) {
1452 mutex_exit(&mpt->m_mutex);
1453 goto fail;
1454 }
1455 mutex_exit(&mpt->m_mutex);
1456
1457 /*
1458 * used for mptsas_watch
1459 */
1460 mptsas_list_add(mpt);
1461
1462 mutex_enter(&mptsas_global_mutex);
1463 if (mptsas_timeouts_enabled == 0) {
1464 mptsas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
1465 dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
1466
1467 mptsas_tick = mptsas_scsi_watchdog_tick *
1468 drv_usectohz((clock_t)1000000);
1469
1470 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
1471 mptsas_timeouts_enabled = 1;
1472 }
1473 mutex_exit(&mptsas_global_mutex);
1474 added_watchdog++;
1475
1476 /*
1477 * Initialize PHY info for smhba.
1478 * This requires watchdog to be enabled otherwise if interrupts
1479 * don't work the system will hang.
1480 */
1481 if (mptsas_smhba_setup(mpt)) {
1482 mptsas_log(mpt, CE_WARN, "mptsas phy initialization "
1483 "failed");
1484 goto fail;
1485 }
1486
1487 /* Check all dma handles allocated in attach */
1488 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl)
1489 != DDI_SUCCESS) ||
1490 (mptsas_check_dma_handle(mpt->m_dma_req_sense_hdl)
1491 != DDI_SUCCESS) ||
1492 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl)
1493 != DDI_SUCCESS) ||
1494 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl)
1495 != DDI_SUCCESS) ||
1496 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl)
1497 != DDI_SUCCESS) ||
1498 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl)
1499 != DDI_SUCCESS)) {
1500 goto fail;
1501 }
1502
1503 /* Check all acc handles allocated in attach */
1504 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
1505 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl)
1506 != DDI_SUCCESS) ||
1507 (mptsas_check_acc_handle(mpt->m_acc_req_sense_hdl)
1508 != DDI_SUCCESS) ||
1509 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl)
1510 != DDI_SUCCESS) ||
1511 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl)
1512 != DDI_SUCCESS) ||
1513 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl)
1514 != DDI_SUCCESS) ||
1515 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl)
1516 != DDI_SUCCESS) ||
1517 (mptsas_check_acc_handle(mpt->m_config_handle)
1518 != DDI_SUCCESS)) {
1519 goto fail;
1520 }
1521
1522 /*
1523 * After this point, we are not going to fail the attach.
1524 */
1525
1526 /* Print message of HBA present */
1527 ddi_report_dev(dip);
1528
1529 /* report idle status to pm framework */
1530 if (mpt->m_options & MPTSAS_OPT_PM) {
1531 (void) pm_idle_component(dip, 0);
1532 }
1533
1534 return (DDI_SUCCESS);
1535
1536 fail:
1537 mptsas_log(mpt, CE_WARN, "attach failed");
1538 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
1539 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
1540 if (mpt) {
1541 /* deallocate in reverse order */
1542 if (added_watchdog) {
1543 mptsas_list_del(mpt);
1544 mutex_enter(&mptsas_global_mutex);
1545
1546 if (mptsas_timeout_id && (mptsas_head == NULL)) {
1547 timeout_id_t tid = mptsas_timeout_id;
1548 mptsas_timeouts_enabled = 0;
1549 mptsas_timeout_id = 0;
1550 mutex_exit(&mptsas_global_mutex);
1551 (void) untimeout(tid);
1552 mutex_enter(&mptsas_global_mutex);
1553 }
1554 mutex_exit(&mptsas_global_mutex);
1555 }
1556
1557 mptsas_cache_destroy(mpt);
1558
1559 if (smp_attach_setup) {
1560 mptsas_smp_teardown(mpt);
1561 }
1562 if (hba_attach_setup) {
1563 mptsas_hba_teardown(mpt);
1564 }
1565
1566 if (mpt->m_targets)
1567 refhash_destroy(mpt->m_targets);
1568 if (mpt->m_smp_targets)
1569 refhash_destroy(mpt->m_smp_targets);
1570
1571 if (mpt->m_active) {
1572 mptsas_free_active_slots(mpt);
1573 }
1574 if (intr_added) {
1575 mptsas_unregister_intrs(mpt);
1576 }
1577
1578 if (doneq_thread_create) {
1579 mutex_enter(&mpt->m_doneq_mutex);
1580 doneq_thread_num = mpt->m_doneq_thread_n;
1581 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1582 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1583 mpt->m_doneq_thread_id[j].flag &=
1584 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1585 cv_signal(&mpt->m_doneq_thread_id[j].cv);
1586 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1587 }
1588 while (mpt->m_doneq_thread_n) {
1589 cv_wait(&mpt->m_doneq_thread_cv,
1590 &mpt->m_doneq_mutex);
1591 }
1592 for (j = 0; j < doneq_thread_num; j++) {
1593 cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1594 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1595 }
1596 kmem_free(mpt->m_doneq_thread_id,
1597 sizeof (mptsas_doneq_thread_list_t)
1598 * doneq_thread_num);
1599 mutex_exit(&mpt->m_doneq_mutex);
1600 cv_destroy(&mpt->m_doneq_thread_cv);
1601 mutex_destroy(&mpt->m_doneq_mutex);
1602 }
1603 if (event_taskq_create) {
1604 ddi_taskq_destroy(mpt->m_event_taskq);
1605 }
1606 if (dr_taskq_create) {
1607 ddi_taskq_destroy(mpt->m_dr_taskq);
1608 }
1609 if (reset_taskq_create) {
1610 ddi_taskq_destroy(mpt->m_reset_taskq);
1611 }
1612 if (mutex_init_done) {
1613 mutex_destroy(&mpt->m_tx_waitq_mutex);
1614 mutex_destroy(&mpt->m_passthru_mutex);
1615 mutex_destroy(&mpt->m_mutex);
1616 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1617 mutex_destroy(
1618 &mpt->m_phy_info[i].smhba_info.phy_mutex);
1619 }
1620 cv_destroy(&mpt->m_cv);
1621 cv_destroy(&mpt->m_passthru_cv);
1622 cv_destroy(&mpt->m_fw_cv);
1623 cv_destroy(&mpt->m_config_cv);
1624 cv_destroy(&mpt->m_fw_diag_cv);
1625 }
1626
1627 if (map_setup) {
1628 mptsas_cfg_fini(mpt);
1629 }
1630 if (config_setup) {
1631 mptsas_config_space_fini(mpt);
1632 }
1633 mptsas_free_handshake_msg(mpt);
1634 mptsas_hba_fini(mpt);
1635
1636 mptsas_fm_fini(mpt);
1637 ddi_soft_state_free(mptsas_state, instance);
1638 ddi_prop_remove_all(dip);
1639 }
1640 return (DDI_FAILURE);
1641 }
1642
1643 static int
1644 mptsas_suspend(dev_info_t *devi)
1645 {
1646 mptsas_t *mpt, *g;
1647 scsi_hba_tran_t *tran;
1648
1649 if (scsi_hba_iport_unit_address(devi)) {
1650 return (DDI_SUCCESS);
1651 }
1652
1653 if ((tran = ddi_get_driver_private(devi)) == NULL)
1654 return (DDI_SUCCESS);
1655
1656 mpt = TRAN2MPT(tran);
1657 if (!mpt) {
1658 return (DDI_SUCCESS);
1659 }
1660
1661 mutex_enter(&mpt->m_mutex);
1662
1663 if (mpt->m_suspended++) {
1664 mutex_exit(&mpt->m_mutex);
1665 return (DDI_SUCCESS);
1666 }
1667
1668 /*
1669 * Cancel timeout threads for this mpt
1670 */
1671 if (mpt->m_quiesce_timeid) {
1672 timeout_id_t tid = mpt->m_quiesce_timeid;
1673 mpt->m_quiesce_timeid = 0;
1674 mutex_exit(&mpt->m_mutex);
1675 (void) untimeout(tid);
1676 mutex_enter(&mpt->m_mutex);
1677 }
1678
1679 if (mpt->m_restart_cmd_timeid) {
1680 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1681 mpt->m_restart_cmd_timeid = 0;
1682 mutex_exit(&mpt->m_mutex);
1683 (void) untimeout(tid);
1684 mutex_enter(&mpt->m_mutex);
1685 }
1686
1687 mutex_exit(&mpt->m_mutex);
1688
1689 (void) pm_idle_component(mpt->m_dip, 0);
1690
1691 /*
1692 * Cancel watch threads if all mpts suspended
1693 */
1694 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1695 for (g = mptsas_head; g != NULL; g = g->m_next) {
1696 if (!g->m_suspended)
1697 break;
1698 }
1699 rw_exit(&mptsas_global_rwlock);
1700
1701 mutex_enter(&mptsas_global_mutex);
1702 if (g == NULL) {
1703 timeout_id_t tid;
1704
1705 mptsas_timeouts_enabled = 0;
1706 if (mptsas_timeout_id) {
1707 tid = mptsas_timeout_id;
1708 mptsas_timeout_id = 0;
1709 mutex_exit(&mptsas_global_mutex);
1710 (void) untimeout(tid);
1711 mutex_enter(&mptsas_global_mutex);
1712 }
1713 if (mptsas_reset_watch) {
1714 tid = mptsas_reset_watch;
1715 mptsas_reset_watch = 0;
1716 mutex_exit(&mptsas_global_mutex);
1717 (void) untimeout(tid);
1718 mutex_enter(&mptsas_global_mutex);
1719 }
1720 }
1721 mutex_exit(&mptsas_global_mutex);
1722
1723 mutex_enter(&mpt->m_mutex);
1724
1725 /*
1726 * If this mpt is not in full power(PM_LEVEL_D0), just return.
1727 */
1728 if ((mpt->m_options & MPTSAS_OPT_PM) &&
1729 (mpt->m_power_level != PM_LEVEL_D0)) {
1730 mutex_exit(&mpt->m_mutex);
1731 return (DDI_SUCCESS);
1732 }
1733
1734 /* Disable HBA interrupts in hardware */
1735 MPTSAS_DISABLE_INTR(mpt);
1736 /*
1737 * Send RAID action system shutdown to sync IR
1738 */
1739 mptsas_raid_action_system_shutdown(mpt);
1740
1741 mutex_exit(&mpt->m_mutex);
1742
1743 /* drain the taskq */
1744 ddi_taskq_wait(mpt->m_reset_taskq);
1745 ddi_taskq_wait(mpt->m_event_taskq);
1746 ddi_taskq_wait(mpt->m_dr_taskq);
1747
1748 return (DDI_SUCCESS);
1749 }
1750
1751 #ifdef __sparc
1752 /*ARGSUSED*/
1753 static int
1754 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd)
1755 {
1756 mptsas_t *mpt;
1757 scsi_hba_tran_t *tran;
1758
1759 /*
1760 * If this call is for iport, just return.
1761 */
1762 if (scsi_hba_iport_unit_address(devi))
1763 return (DDI_SUCCESS);
1764
1765 if ((tran = ddi_get_driver_private(devi)) == NULL)
1766 return (DDI_SUCCESS);
1767
1768 if ((mpt = TRAN2MPT(tran)) == NULL)
1769 return (DDI_SUCCESS);
1770
1771 /*
1772 * Send RAID action system shutdown to sync IR. Disable HBA
1773 * interrupts in hardware first.
1774 */
1775 MPTSAS_DISABLE_INTR(mpt);
1776 mptsas_raid_action_system_shutdown(mpt);
1777
1778 return (DDI_SUCCESS);
1779 }
1780 #else /* __sparc */
1781 /*
1782 * quiesce(9E) entry point.
1783 *
1784 * This function is called when the system is single-threaded at high
1785 * PIL with preemption disabled. Therefore, this function must not be
1786 * blocked.
1787 *
1788 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1789 * DDI_FAILURE indicates an error condition and should almost never happen.
1790 */
1791 static int
1792 mptsas_quiesce(dev_info_t *devi)
1793 {
1794 mptsas_t *mpt;
1795 scsi_hba_tran_t *tran;
1796
1797 /*
1798 * If this call is for iport, just return.
1799 */
1800 if (scsi_hba_iport_unit_address(devi))
1801 return (DDI_SUCCESS);
1802
1803 if ((tran = ddi_get_driver_private(devi)) == NULL)
1804 return (DDI_SUCCESS);
1805
1806 if ((mpt = TRAN2MPT(tran)) == NULL)
1807 return (DDI_SUCCESS);
1808
1809 /* Disable HBA interrupts in hardware */
1810 MPTSAS_DISABLE_INTR(mpt);
1811 /* Send RAID action system shutdonw to sync IR */
1812 mptsas_raid_action_system_shutdown(mpt);
1813
1814 return (DDI_SUCCESS);
1815 }
1816 #endif /* __sparc */
1817
1818 /*
1819 * detach(9E). Remove all device allocations and system resources;
1820 * disable device interrupts.
1821 * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
1822 */
1823 static int
1824 mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1825 {
1826 /* CONSTCOND */
1827 ASSERT(NO_COMPETING_THREADS);
1828 NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi, (void *)cmd));
1829
1830 switch (cmd) {
1831 case DDI_DETACH:
1832 return (mptsas_do_detach(devi));
1833
1834 case DDI_SUSPEND:
1835 return (mptsas_suspend(devi));
1836
1837 default:
1838 return (DDI_FAILURE);
1839 }
1840 /* NOTREACHED */
1841 }
1842
1843 static int
1844 mptsas_do_detach(dev_info_t *dip)
1845 {
1846 mptsas_t *mpt;
1847 scsi_hba_tran_t *tran;
1848 int circ = 0;
1849 int circ1 = 0;
1850 mdi_pathinfo_t *pip = NULL;
1851 int i;
1852 int doneq_thread_num = 0;
1853
1854 NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1855
1856 if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1857 return (DDI_FAILURE);
1858
1859 mpt = TRAN2MPT(tran);
1860 if (!mpt) {
1861 return (DDI_FAILURE);
1862 }
1863 /*
1864 * Still have pathinfo child, should not detach mpt driver
1865 */
1866 if (scsi_hba_iport_unit_address(dip)) {
1867 if (mpt->m_mpxio_enable) {
1868 /*
1869 * MPxIO enabled for the iport
1870 */
1871 ndi_devi_enter(scsi_vhci_dip, &circ1);
1872 ndi_devi_enter(dip, &circ);
1873 while (pip = mdi_get_next_client_path(dip, NULL)) {
1874 if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1875 continue;
1876 }
1877 ndi_devi_exit(dip, circ);
1878 ndi_devi_exit(scsi_vhci_dip, circ1);
1879 NDBG12(("detach failed because of "
1880 "outstanding path info"));
1881 return (DDI_FAILURE);
1882 }
1883 ndi_devi_exit(dip, circ);
1884 ndi_devi_exit(scsi_vhci_dip, circ1);
1885 (void) mdi_phci_unregister(dip, 0);
1886 }
1887
1888 ddi_prop_remove_all(dip);
1889
1890 return (DDI_SUCCESS);
1891 }
1892
1893 /* Make sure power level is D0 before accessing registers */
1894 if (mpt->m_options & MPTSAS_OPT_PM) {
1895 (void) pm_busy_component(dip, 0);
1896 if (mpt->m_power_level != PM_LEVEL_D0) {
1897 if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1898 DDI_SUCCESS) {
1899 mptsas_log(mpt, CE_WARN,
1900 "mptsas%d: Raise power request failed.",
1901 mpt->m_instance);
1902 (void) pm_idle_component(dip, 0);
1903 return (DDI_FAILURE);
1904 }
1905 }
1906 }
1907
1908 /*
1909 * Send RAID action system shutdown to sync IR. After action, send a
1910 * Message Unit Reset. Since after that DMA resource will be freed,
1911 * set ioc to READY state will avoid HBA initiated DMA operation.
1912 */
1913 mutex_enter(&mpt->m_mutex);
1914 MPTSAS_DISABLE_INTR(mpt);
1915 mptsas_raid_action_system_shutdown(mpt);
1916 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1917 (void) mptsas_ioc_reset(mpt, FALSE);
1918 mutex_exit(&mpt->m_mutex);
1919 mptsas_rem_intrs(mpt);
1920 ddi_taskq_destroy(mpt->m_reset_taskq);
1921 ddi_taskq_destroy(mpt->m_event_taskq);
1922 ddi_taskq_destroy(mpt->m_dr_taskq);
1923
1924 if (mpt->m_doneq_thread_n) {
1925 mutex_enter(&mpt->m_doneq_mutex);
1926 doneq_thread_num = mpt->m_doneq_thread_n;
1927 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1928 mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1929 mpt->m_doneq_thread_id[i].flag &=
1930 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1931 cv_signal(&mpt->m_doneq_thread_id[i].cv);
1932 mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1933 }
1934 while (mpt->m_doneq_thread_n) {
1935 cv_wait(&mpt->m_doneq_thread_cv,
1936 &mpt->m_doneq_mutex);
1937 }
1938 for (i = 0; i < doneq_thread_num; i++) {
1939 cv_destroy(&mpt->m_doneq_thread_id[i].cv);
1940 mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);
1941 }
1942 kmem_free(mpt->m_doneq_thread_id,
1943 sizeof (mptsas_doneq_thread_list_t)
1944 * doneq_thread_num);
1945 mutex_exit(&mpt->m_doneq_mutex);
1946 cv_destroy(&mpt->m_doneq_thread_cv);
1947 mutex_destroy(&mpt->m_doneq_mutex);
1948 }
1949
1950 scsi_hba_reset_notify_tear_down(mpt->m_reset_notify_listf);
1951
1952 mptsas_list_del(mpt);
1953
1954 /*
1955 * Cancel timeout threads for this mpt
1956 */
1957 mutex_enter(&mpt->m_mutex);
1958 if (mpt->m_quiesce_timeid) {
1959 timeout_id_t tid = mpt->m_quiesce_timeid;
1960 mpt->m_quiesce_timeid = 0;
1961 mutex_exit(&mpt->m_mutex);
1962 (void) untimeout(tid);
1963 mutex_enter(&mpt->m_mutex);
1964 }
1965
1966 if (mpt->m_restart_cmd_timeid) {
1967 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1968 mpt->m_restart_cmd_timeid = 0;
1969 mutex_exit(&mpt->m_mutex);
1970 (void) untimeout(tid);
1971 mutex_enter(&mpt->m_mutex);
1972 }
1973
1974 mutex_exit(&mpt->m_mutex);
1975
1976 /*
1977 * last mpt? ... if active, CANCEL watch threads.
1978 */
1979 mutex_enter(&mptsas_global_mutex);
1980 if (mptsas_head == NULL) {
1981 timeout_id_t tid;
1982 /*
1983 * Clear mptsas_timeouts_enable so that the watch thread
1984 * gets restarted on DDI_ATTACH
1985 */
1986 mptsas_timeouts_enabled = 0;
1987 if (mptsas_timeout_id) {
1988 tid = mptsas_timeout_id;
1989 mptsas_timeout_id = 0;
1990 mutex_exit(&mptsas_global_mutex);
1991 (void) untimeout(tid);
1992 mutex_enter(&mptsas_global_mutex);
1993 }
1994 if (mptsas_reset_watch) {
1995 tid = mptsas_reset_watch;
1996 mptsas_reset_watch = 0;
1997 mutex_exit(&mptsas_global_mutex);
1998 (void) untimeout(tid);
1999 mutex_enter(&mptsas_global_mutex);
2000 }
2001 }
2002 mutex_exit(&mptsas_global_mutex);
2003
2004 /*
2005 * Delete Phy stats
2006 */
2007 mptsas_destroy_phy_stats(mpt);
2008
2009 mptsas_destroy_hashes(mpt);
2010
2011 /*
2012 * Delete nt_active.
2013 */
2014 mutex_enter(&mpt->m_mutex);
2015 mptsas_free_active_slots(mpt);
2016 mutex_exit(&mpt->m_mutex);
2017
2018 /* deallocate everything that was allocated in mptsas_attach */
2019 mptsas_cache_destroy(mpt);
2020
2021 mptsas_hba_fini(mpt);
2022 mptsas_cfg_fini(mpt);
2023
2024 /* Lower the power informing PM Framework */
2025 if (mpt->m_options & MPTSAS_OPT_PM) {
2026 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
2027 mptsas_log(mpt, CE_WARN,
2028 "!mptsas%d: Lower power request failed "
2029 "during detach, ignoring.",
2030 mpt->m_instance);
2031 }
2032
2033 mutex_destroy(&mpt->m_tx_waitq_mutex);
2034 mutex_destroy(&mpt->m_passthru_mutex);
2035 mutex_destroy(&mpt->m_mutex);
2036 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
2037 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
2038 }
2039 cv_destroy(&mpt->m_cv);
2040 cv_destroy(&mpt->m_passthru_cv);
2041 cv_destroy(&mpt->m_fw_cv);
2042 cv_destroy(&mpt->m_config_cv);
2043 cv_destroy(&mpt->m_fw_diag_cv);
2044
2045
2046 mptsas_smp_teardown(mpt);
2047 mptsas_hba_teardown(mpt);
2048
2049 mptsas_config_space_fini(mpt);
2050
2051 mptsas_free_handshake_msg(mpt);
2052
2053 mptsas_fm_fini(mpt);
2054 ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
2055 ddi_prop_remove_all(dip);
2056
2057 return (DDI_SUCCESS);
2058 }
2059
2060 static void
2061 mptsas_list_add(mptsas_t *mpt)
2062 {
2063 rw_enter(&mptsas_global_rwlock, RW_WRITER);
2064
2065 if (mptsas_head == NULL) {
2066 mptsas_head = mpt;
2067 } else {
2068 mptsas_tail->m_next = mpt;
2069 }
2070 mptsas_tail = mpt;
2071 rw_exit(&mptsas_global_rwlock);
2072 }
2073
2074 static void
2075 mptsas_list_del(mptsas_t *mpt)
2076 {
2077 mptsas_t *m;
2078 /*
2079 * Remove device instance from the global linked list
2080 */
2081 rw_enter(&mptsas_global_rwlock, RW_WRITER);
2082 if (mptsas_head == mpt) {
2083 m = mptsas_head = mpt->m_next;
2084 } else {
2085 for (m = mptsas_head; m != NULL; m = m->m_next) {
2086 if (m->m_next == mpt) {
2087 m->m_next = mpt->m_next;
2088 break;
2089 }
2090 }
2091 if (m == NULL) {
2092 mptsas_log(mpt, CE_PANIC, "Not in softc list!");
2093 }
2094 }
2095
2096 if (mptsas_tail == mpt) {
2097 mptsas_tail = m;
2098 }
2099 rw_exit(&mptsas_global_rwlock);
2100 }
2101
2102 static int
2103 mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size)
2104 {
2105 ddi_dma_attr_t task_dma_attrs;
2106
2107 mpt->m_hshk_dma_size = 0;
2108 task_dma_attrs = mpt->m_msg_dma_attr;
2109 task_dma_attrs.dma_attr_sgllen = 1;
2110 task_dma_attrs.dma_attr_granular = (uint32_t)(alloc_size);
2111
2112 /* allocate Task Management ddi_dma resources */
2113 if (mptsas_dma_addr_create(mpt, task_dma_attrs,
2114 &mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl, &mpt->m_hshk_memp,
2115 alloc_size, NULL) == FALSE) {
2116 return (DDI_FAILURE);
2117 }
2118 mpt->m_hshk_dma_size = alloc_size;
2119
2120 return (DDI_SUCCESS);
2121 }
2122
2123 static void
2124 mptsas_free_handshake_msg(mptsas_t *mpt)
2125 {
2126 if (mpt->m_hshk_dma_size == 0)
2127 return;
2128 mptsas_dma_addr_destroy(&mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl);
2129 mpt->m_hshk_dma_size = 0;
2130 }
2131
2132 static int
2133 mptsas_hba_setup(mptsas_t *mpt)
2134 {
2135 scsi_hba_tran_t *hba_tran;
2136 int tran_flags;
2137
2138 /* Allocate a transport structure */
2139 hba_tran = mpt->m_tran = scsi_hba_tran_alloc(mpt->m_dip,
2140 SCSI_HBA_CANSLEEP);
2141 ASSERT(mpt->m_tran != NULL);
2142
2143 hba_tran->tran_hba_private = mpt;
2144 hba_tran->tran_tgt_private = NULL;
2145
2146 hba_tran->tran_tgt_init = mptsas_scsi_tgt_init;
2147 hba_tran->tran_tgt_free = mptsas_scsi_tgt_free;
2148
2149 hba_tran->tran_start = mptsas_scsi_start;
2150 hba_tran->tran_reset = mptsas_scsi_reset;
2151 hba_tran->tran_abort = mptsas_scsi_abort;
2152 hba_tran->tran_getcap = mptsas_scsi_getcap;
2153 hba_tran->tran_setcap = mptsas_scsi_setcap;
2154 hba_tran->tran_init_pkt = mptsas_scsi_init_pkt;
2155 hba_tran->tran_destroy_pkt = mptsas_scsi_destroy_pkt;
2156
2157 hba_tran->tran_dmafree = mptsas_scsi_dmafree;
2158 hba_tran->tran_sync_pkt = mptsas_scsi_sync_pkt;
2159 hba_tran->tran_reset_notify = mptsas_scsi_reset_notify;
2160
2161 hba_tran->tran_get_bus_addr = mptsas_get_bus_addr;
2162 hba_tran->tran_get_name = mptsas_get_name;
2163
2164 hba_tran->tran_quiesce = mptsas_scsi_quiesce;
2165 hba_tran->tran_unquiesce = mptsas_scsi_unquiesce;
2166 hba_tran->tran_bus_reset = NULL;
2167
2168 hba_tran->tran_add_eventcall = NULL;
2169 hba_tran->tran_get_eventcookie = NULL;
2170 hba_tran->tran_post_event = NULL;
2171 hba_tran->tran_remove_eventcall = NULL;
2172
2173 hba_tran->tran_bus_config = mptsas_bus_config;
2174
2175 hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
2176
2177 /*
2178 * All children of the HBA are iports. We need tran was cloned.
2179 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
2180 * inherited to iport's tran vector.
2181 */
2182 tran_flags = (SCSI_HBA_HBA | SCSI_HBA_TRAN_CLONE);
2183
2184 if (scsi_hba_attach_setup(mpt->m_dip, &mpt->m_msg_dma_attr,
2185 hba_tran, tran_flags) != DDI_SUCCESS) {
2186 mptsas_log(mpt, CE_WARN, "hba attach setup failed");
2187 scsi_hba_tran_free(hba_tran);
2188 mpt->m_tran = NULL;
2189 return (FALSE);
2190 }
2191 return (TRUE);
2192 }
2193
2194 static void
2195 mptsas_hba_teardown(mptsas_t *mpt)
2196 {
2197 (void) scsi_hba_detach(mpt->m_dip);
2198 if (mpt->m_tran != NULL) {
2199 scsi_hba_tran_free(mpt->m_tran);
2200 mpt->m_tran = NULL;
2201 }
2202 }
2203
2204 static void
2205 mptsas_iport_register(mptsas_t *mpt)
2206 {
2207 int i, j;
2208 mptsas_phymask_t mask = 0x0;
2209 /*
2210 * initial value of mask is 0
2211 */
2212 mutex_enter(&mpt->m_mutex);
2213 for (i = 0; i < mpt->m_num_phys; i++) {
2214 mptsas_phymask_t phy_mask = 0x0;
2215 char phy_mask_name[MPTSAS_MAX_PHYS];
2216 uint8_t current_port;
2217
2218 if (mpt->m_phy_info[i].attached_devhdl == 0)
2219 continue;
2220
2221 bzero(phy_mask_name, sizeof (phy_mask_name));
2222
2223 current_port = mpt->m_phy_info[i].port_num;
2224
2225 if ((mask & (1 << i)) != 0)
2226 continue;
2227
2228 for (j = 0; j < mpt->m_num_phys; j++) {
2229 if (mpt->m_phy_info[j].attached_devhdl &&
2230 (mpt->m_phy_info[j].port_num == current_port)) {
2231 phy_mask |= (1 << j);
2232 }
2233 }
2234 mask = mask | phy_mask;
2235
2236 for (j = 0; j < mpt->m_num_phys; j++) {
2237 if ((phy_mask >> j) & 0x01) {
2238 mpt->m_phy_info[j].phy_mask = phy_mask;
2239 }
2240 }
2241
2242 (void) sprintf(phy_mask_name, "%x", phy_mask);
2243
2244 mutex_exit(&mpt->m_mutex);
2245 /*
2246 * register a iport
2247 */
2248 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
2249 mutex_enter(&mpt->m_mutex);
2250 }
2251 mutex_exit(&mpt->m_mutex);
2252 /*
2253 * register a virtual port for RAID volume always
2254 */
2255 (void) scsi_hba_iport_register(mpt->m_dip, "v0");
2256
2257 }
2258
2259 static int
2260 mptsas_smp_setup(mptsas_t *mpt)
2261 {
2262 mpt->m_smptran = smp_hba_tran_alloc(mpt->m_dip);
2263 ASSERT(mpt->m_smptran != NULL);
2264 mpt->m_smptran->smp_tran_hba_private = mpt;
2265 mpt->m_smptran->smp_tran_start = mptsas_smp_start;
2266 if (smp_hba_attach_setup(mpt->m_dip, mpt->m_smptran) != DDI_SUCCESS) {
2267 mptsas_log(mpt, CE_WARN, "smp attach setup failed");
2268 smp_hba_tran_free(mpt->m_smptran);
2269 mpt->m_smptran = NULL;
2270 return (FALSE);
2271 }
2272 /*
2273 * Initialize smp hash table
2274 */
2275 mpt->m_smp_targets = refhash_create(MPTSAS_SMP_BUCKET_COUNT,
2276 mptsas_target_addr_hash, mptsas_target_addr_cmp,
2277 mptsas_smp_free, sizeof (mptsas_smp_t),
2278 offsetof(mptsas_smp_t, m_link), offsetof(mptsas_smp_t, m_addr),
2279 KM_SLEEP);
2280 mpt->m_smp_devhdl = 0xFFFF;
2281
2282 return (TRUE);
2283 }
2284
2285 static void
2286 mptsas_smp_teardown(mptsas_t *mpt)
2287 {
2288 (void) smp_hba_detach(mpt->m_dip);
2289 if (mpt->m_smptran != NULL) {
2290 smp_hba_tran_free(mpt->m_smptran);
2291 mpt->m_smptran = NULL;
2292 }
2293 mpt->m_smp_devhdl = 0;
2294 }
2295
2296 static int
2297 mptsas_cache_create(mptsas_t *mpt)
2298 {
2299 int instance = mpt->m_instance;
2300 char buf[64];
2301
2302 /*
2303 * create kmem cache for packets
2304 */
2305 (void) sprintf(buf, "mptsas%d_cache", instance);
2306 mpt->m_kmem_cache = kmem_cache_create(buf,
2307 sizeof (struct mptsas_cmd) + scsi_pkt_size(), 8,
2308 mptsas_kmem_cache_constructor, mptsas_kmem_cache_destructor,
2309 NULL, (void *)mpt, NULL, 0);
2310
2311 if (mpt->m_kmem_cache == NULL) {
2312 mptsas_log(mpt, CE_WARN, "creating kmem cache failed");
2313 return (FALSE);
2314 }
2315
2316 /*
2317 * create kmem cache for extra SGL frames if SGL cannot
2318 * be accomodated into main request frame.
2319 */
2320 (void) sprintf(buf, "mptsas%d_cache_frames", instance);
2321 mpt->m_cache_frames = kmem_cache_create(buf,
2322 sizeof (mptsas_cache_frames_t), 8,
2323 mptsas_cache_frames_constructor, mptsas_cache_frames_destructor,
2324 NULL, (void *)mpt, NULL, 0);
2325
2326 if (mpt->m_cache_frames == NULL) {
2327 mptsas_log(mpt, CE_WARN, "creating cache for frames failed");
2328 return (FALSE);
2329 }
2330
2331 return (TRUE);
2332 }
2333
2334 static void
2335 mptsas_cache_destroy(mptsas_t *mpt)
2336 {
2337 /* deallocate in reverse order */
2338 if (mpt->m_cache_frames) {
2339 kmem_cache_destroy(mpt->m_cache_frames);
2340 mpt->m_cache_frames = NULL;
2341 }
2342 if (mpt->m_kmem_cache) {
2343 kmem_cache_destroy(mpt->m_kmem_cache);
2344 mpt->m_kmem_cache = NULL;
2345 }
2346 }
2347
2348 static int
2349 mptsas_power(dev_info_t *dip, int component, int level)
2350 {
2351 #ifndef __lock_lint
2352 _NOTE(ARGUNUSED(component))
2353 #endif
2354 mptsas_t *mpt;
2355 int rval = DDI_SUCCESS;
2356 int polls = 0;
2357 uint32_t ioc_status;
2358
2359 if (scsi_hba_iport_unit_address(dip) != 0)
2360 return (DDI_SUCCESS);
2361
2362 mpt = ddi_get_soft_state(mptsas_state, ddi_get_instance(dip));
2363 if (mpt == NULL) {
2364 return (DDI_FAILURE);
2365 }
2366
2367 mutex_enter(&mpt->m_mutex);
2368
2369 /*
2370 * If the device is busy, don't lower its power level
2371 */
2372 if (mpt->m_busy && (mpt->m_power_level > level)) {
2373 mutex_exit(&mpt->m_mutex);
2374 return (DDI_FAILURE);
2375 }
2376 switch (level) {
2377 case PM_LEVEL_D0:
2378 NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
2379 MPTSAS_POWER_ON(mpt);
2380 /*
2381 * Wait up to 30 seconds for IOC to come out of reset.
2382 */
2383 while (((ioc_status = ddi_get32(mpt->m_datap,
2384 &mpt->m_reg->Doorbell)) &
2385 MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2386 if (polls++ > 3000) {
2387 break;
2388 }
2389 delay(drv_usectohz(10000));
2390 }
2391 /*
2392 * If IOC is not in operational state, try to hard reset it.
2393 */
2394 if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2395 MPI2_IOC_STATE_OPERATIONAL) {
2396 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2397 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2398 mptsas_log(mpt, CE_WARN,
2399 "mptsas_power: hard reset failed");
2400 mutex_exit(&mpt->m_mutex);
2401 return (DDI_FAILURE);
2402 }
2403 }
2404 mpt->m_power_level = PM_LEVEL_D0;
2405 break;
2406 case PM_LEVEL_D3:
2407 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2408 MPTSAS_POWER_OFF(mpt);
2409 break;
2410 default:
2411 mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2412 mpt->m_instance, level);
2413 rval = DDI_FAILURE;
2414 break;
2415 }
2416 mutex_exit(&mpt->m_mutex);
2417 return (rval);
2418 }
2419
2420 /*
2421 * Initialize configuration space and figure out which
2422 * chip and revison of the chip the mpt driver is using.
2423 */
2424 static int
2425 mptsas_config_space_init(mptsas_t *mpt)
2426 {
2427 NDBG0(("mptsas_config_space_init"));
2428
2429 if (mpt->m_config_handle != NULL)
2430 return (TRUE);
2431
2432 if (pci_config_setup(mpt->m_dip,
2433 &mpt->m_config_handle) != DDI_SUCCESS) {
2434 mptsas_log(mpt, CE_WARN, "cannot map configuration space.");
2435 return (FALSE);
2436 }
2437
2438 /*
2439 * This is a workaround for a XMITS ASIC bug which does not
2440 * drive the CBE upper bits.
2441 */
2442 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) &
2443 PCI_STAT_PERROR) {
2444 pci_config_put16(mpt->m_config_handle, PCI_CONF_STAT,
2445 PCI_STAT_PERROR);
2446 }
2447
2448 mptsas_setup_cmd_reg(mpt);
2449
2450 /*
2451 * Get the chip device id:
2452 */
2453 mpt->m_devid = pci_config_get16(mpt->m_config_handle, PCI_CONF_DEVID);
2454
2455 /*
2456 * Save the revision.
2457 */
2458 mpt->m_revid = pci_config_get8(mpt->m_config_handle, PCI_CONF_REVID);
2459
2460 /*
2461 * Save the SubSystem Vendor and Device IDs
2462 */
2463 mpt->m_svid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBVENID);
2464 mpt->m_ssid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBSYSID);
2465
2466 /*
2467 * Set the latency timer to 0x40 as specified by the upa -> pci
2468 * bridge chip design team. This may be done by the sparc pci
2469 * bus nexus driver, but the driver should make sure the latency
2470 * timer is correct for performance reasons.
2471 */
2472 pci_config_put8(mpt->m_config_handle, PCI_CONF_LATENCY_TIMER,
2473 MPTSAS_LATENCY_TIMER);
2474
2475 (void) mptsas_get_pci_cap(mpt);
2476 return (TRUE);
2477 }
2478
2479 static void
2480 mptsas_config_space_fini(mptsas_t *mpt)
2481 {
2482 if (mpt->m_config_handle != NULL) {
2483 mptsas_disable_bus_master(mpt);
2484 pci_config_teardown(&mpt->m_config_handle);
2485 mpt->m_config_handle = NULL;
2486 }
2487 }
2488
2489 static void
2490 mptsas_setup_cmd_reg(mptsas_t *mpt)
2491 {
2492 ushort_t cmdreg;
2493
2494 /*
2495 * Set the command register to the needed values.
2496 */
2497 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2498 cmdreg |= (PCI_COMM_ME | PCI_COMM_SERR_ENABLE |
2499 PCI_COMM_PARITY_DETECT | PCI_COMM_MAE);
2500 cmdreg &= ~PCI_COMM_IO;
2501 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2502 }
2503
2504 static void
2505 mptsas_disable_bus_master(mptsas_t *mpt)
2506 {
2507 ushort_t cmdreg;
2508
2509 /*
2510 * Clear the master enable bit in the PCI command register.
2511 * This prevents any bus mastering activity like DMA.
2512 */
2513 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2514 cmdreg &= ~PCI_COMM_ME;
2515 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2516 }
2517
2518 int
2519 mptsas_dma_alloc(mptsas_t *mpt, mptsas_dma_alloc_state_t *dma_statep)
2520 {
2521 ddi_dma_attr_t attrs;
2522
2523 attrs = mpt->m_io_dma_attr;
2524 attrs.dma_attr_sgllen = 1;
2525
2526 ASSERT(dma_statep != NULL);
2527
2528 if (mptsas_dma_addr_create(mpt, attrs, &dma_statep->handle,
2529 &dma_statep->accessp, &dma_statep->memp, dma_statep->size,
2530 &dma_statep->cookie) == FALSE) {
2531 return (DDI_FAILURE);
2532 }
2533
2534 return (DDI_SUCCESS);
2535 }
2536
2537 void
2538 mptsas_dma_free(mptsas_dma_alloc_state_t *dma_statep)
2539 {
2540 ASSERT(dma_statep != NULL);
2541 mptsas_dma_addr_destroy(&dma_statep->handle, &dma_statep->accessp);
2542 dma_statep->size = 0;
2543 }
2544
2545 int
2546 mptsas_do_dma(mptsas_t *mpt, uint32_t size, int var, int (*callback)())
2547 {
2548 ddi_dma_attr_t attrs;
2549 ddi_dma_handle_t dma_handle;
2550 caddr_t memp;
2551 ddi_acc_handle_t accessp;
2552 int rval;
2553
2554 ASSERT(mutex_owned(&mpt->m_mutex));
2555
2556 attrs = mpt->m_msg_dma_attr;
2557 attrs.dma_attr_sgllen = 1;
2558 attrs.dma_attr_granular = size;
2559
2560 if (mptsas_dma_addr_create(mpt, attrs, &dma_handle,
2561 &accessp, &memp, size, NULL) == FALSE) {
2562 return (DDI_FAILURE);
2563 }
2564
2565 rval = (*callback) (mpt, memp, var, accessp);
2566
2567 if ((mptsas_check_dma_handle(dma_handle) != DDI_SUCCESS) ||
2568 (mptsas_check_acc_handle(accessp) != DDI_SUCCESS)) {
2569 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
2570 rval = DDI_FAILURE;
2571 }
2572
2573 mptsas_dma_addr_destroy(&dma_handle, &accessp);
2574 return (rval);
2575
2576 }
2577
2578 static int
2579 mptsas_alloc_request_frames(mptsas_t *mpt)
2580 {
2581 ddi_dma_attr_t frame_dma_attrs;
2582 caddr_t memp;
2583 ddi_dma_cookie_t cookie;
2584 size_t mem_size;
2585
2586 /*
2587 * re-alloc when it has already alloced
2588 */
2589 if (mpt->m_dma_req_frame_hdl)
2590 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2591 &mpt->m_acc_req_frame_hdl);
2592
2593 /*
2594 * The size of the request frame pool is:
2595 * Number of Request Frames * Request Frame Size
2596 */
2597 mem_size = mpt->m_max_requests * mpt->m_req_frame_size;
2598
2599 /*
2600 * set the DMA attributes. System Request Message Frames must be
2601 * aligned on a 16-byte boundry.
2602 */
2603 frame_dma_attrs = mpt->m_msg_dma_attr;
2604 frame_dma_attrs.dma_attr_align = 16;
2605 frame_dma_attrs.dma_attr_sgllen = 1;
2606
2607 /*
2608 * allocate the request frame pool.
2609 */
2610 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2611 &mpt->m_dma_req_frame_hdl, &mpt->m_acc_req_frame_hdl, &memp,
2612 mem_size, &cookie) == FALSE) {
2613 return (DDI_FAILURE);
2614 }
2615
2616 /*
2617 * Store the request frame memory address. This chip uses this
2618 * address to dma to and from the driver's frame. The second
2619 * address is the address mpt uses to fill in the frame.
2620 */
2621 mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2622 mpt->m_req_frame = memp;
2623
2624 /*
2625 * Clear the request frame pool.
2626 */
2627 bzero(mpt->m_req_frame, mem_size);
2628
2629 return (DDI_SUCCESS);
2630 }
2631
2632 static int
2633 mptsas_alloc_sense_bufs(mptsas_t *mpt)
2634 {
2635 ddi_dma_attr_t sense_dma_attrs;
2636 caddr_t memp;
2637 ddi_dma_cookie_t cookie;
2638 size_t mem_size;
2639 int num_extrqsense_bufs;
2640
2641 /*
2642 * re-alloc when it has already alloced
2643 */
2644 if (mpt->m_dma_req_sense_hdl) {
2645 rmfreemap(mpt->m_erqsense_map);
2646 mptsas_dma_addr_destroy(&mpt->m_dma_req_sense_hdl,
2647 &mpt->m_acc_req_sense_hdl);
2648 }
2649
2650 /*
2651 * The size of the request sense pool is:
2652 * (Number of Request Frames - 2 ) * Request Sense Size +
2653 * extra memory for extended sense requests.
2654 */
2655 mem_size = ((mpt->m_max_requests - 2) * mpt->m_req_sense_size) +
2656 mptsas_extreq_sense_bufsize;
2657
2658 /*
2659 * set the DMA attributes. ARQ buffers
2660 * aligned on a 16-byte boundry.
2661 */
2662 sense_dma_attrs = mpt->m_msg_dma_attr;
2663 sense_dma_attrs.dma_attr_align = 16;
2664 sense_dma_attrs.dma_attr_sgllen = 1;
2665
2666 /*
2667 * allocate the request sense buffer pool.
2668 */
2669 if (mptsas_dma_addr_create(mpt, sense_dma_attrs,
2670 &mpt->m_dma_req_sense_hdl, &mpt->m_acc_req_sense_hdl, &memp,
2671 mem_size, &cookie) == FALSE) {
2672 return (DDI_FAILURE);
2673 }
2674
2675 /*
2676 * Store the request sense base memory address. This chip uses this
2677 * address to dma the request sense data. The second
2678 * address is the address mpt uses to access the data.
2679 * The third is the base for the extended rqsense buffers.
2680 */
2681 mpt->m_req_sense_dma_addr = cookie.dmac_laddress;
2682 mpt->m_req_sense = memp;
2683 memp += (mpt->m_max_requests - 2) * mpt->m_req_sense_size;
2684 mpt->m_extreq_sense = memp;
2685
2686 /*
2687 * The extra memory is divided up into multiples of the base
2688 * buffer size in order to allocate via rmalloc().
2689 * Note that the rmallocmap cannot start at zero!
2690 */
2691 num_extrqsense_bufs = mptsas_extreq_sense_bufsize /
2692 mpt->m_req_sense_size;
2693 mpt->m_erqsense_map = rmallocmap_wait(num_extrqsense_bufs);
2694 rmfree(mpt->m_erqsense_map, num_extrqsense_bufs, 1);
2695
2696 /*
2697 * Clear the pool.
2698 */
2699 bzero(mpt->m_req_sense, mem_size);
2700
2701 return (DDI_SUCCESS);
2702 }
2703
2704 static int
2705 mptsas_alloc_reply_frames(mptsas_t *mpt)
2706 {
2707 ddi_dma_attr_t frame_dma_attrs;
2708 caddr_t memp;
2709 ddi_dma_cookie_t cookie;
2710 size_t mem_size;
2711
2712 /*
2713 * re-alloc when it has already alloced
2714 */
2715 if (mpt->m_dma_reply_frame_hdl) {
2716 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2717 &mpt->m_acc_reply_frame_hdl);
2718 }
2719
2720 /*
2721 * The size of the reply frame pool is:
2722 * Number of Reply Frames * Reply Frame Size
2723 */
2724 mem_size = mpt->m_max_replies * mpt->m_reply_frame_size;
2725
2726 /*
2727 * set the DMA attributes. System Reply Message Frames must be
2728 * aligned on a 4-byte boundry. This is the default.
2729 */
2730 frame_dma_attrs = mpt->m_msg_dma_attr;
2731 frame_dma_attrs.dma_attr_sgllen = 1;
2732
2733 /*
2734 * allocate the reply frame pool
2735 */
2736 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2737 &mpt->m_dma_reply_frame_hdl, &mpt->m_acc_reply_frame_hdl, &memp,
2738 mem_size, &cookie) == FALSE) {
2739 return (DDI_FAILURE);
2740 }
2741
2742 /*
2743 * Store the reply frame memory address. This chip uses this
2744 * address to dma to and from the driver's frame. The second
2745 * address is the address mpt uses to process the frame.
2746 */
2747 mpt->m_reply_frame_dma_addr = cookie.dmac_laddress;
2748 mpt->m_reply_frame = memp;
2749
2750 /*
2751 * Clear the reply frame pool.
2752 */
2753 bzero(mpt->m_reply_frame, mem_size);
2754
2755 return (DDI_SUCCESS);
2756 }
2757
2758 static int
2759 mptsas_alloc_free_queue(mptsas_t *mpt)
2760 {
2761 ddi_dma_attr_t frame_dma_attrs;
2762 caddr_t memp;
2763 ddi_dma_cookie_t cookie;
2764 size_t mem_size;
2765
2766 /*
2767 * re-alloc when it has already alloced
2768 */
2769 if (mpt->m_dma_free_queue_hdl) {
2770 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2771 &mpt->m_acc_free_queue_hdl);
2772 }
2773
2774 /*
2775 * The reply free queue size is:
2776 * Reply Free Queue Depth * 4
2777 * The "4" is the size of one 32 bit address (low part of 64-bit
2778 * address)
2779 */
2780 mem_size = mpt->m_free_queue_depth * 4;
2781
2782 /*
2783 * set the DMA attributes The Reply Free Queue must be aligned on a
2784 * 16-byte boundry.
2785 */
2786 frame_dma_attrs = mpt->m_msg_dma_attr;
2787 frame_dma_attrs.dma_attr_align = 16;
2788 frame_dma_attrs.dma_attr_sgllen = 1;
2789
2790 /*
2791 * allocate the reply free queue
2792 */
2793 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2794 &mpt->m_dma_free_queue_hdl, &mpt->m_acc_free_queue_hdl, &memp,
2795 mem_size, &cookie) == FALSE) {
2796 return (DDI_FAILURE);
2797 }
2798
2799 /*
2800 * Store the reply free queue memory address. This chip uses this
2801 * address to read from the reply free queue. The second address
2802 * is the address mpt uses to manage the queue.
2803 */
2804 mpt->m_free_queue_dma_addr = cookie.dmac_laddress;
2805 mpt->m_free_queue = memp;
2806
2807 /*
2808 * Clear the reply free queue memory.
2809 */
2810 bzero(mpt->m_free_queue, mem_size);
2811
2812 return (DDI_SUCCESS);
2813 }
2814
2815 static int
2816 mptsas_alloc_post_queue(mptsas_t *mpt)
2817 {
2818 ddi_dma_attr_t frame_dma_attrs;
2819 caddr_t memp;
2820 ddi_dma_cookie_t cookie;
2821 size_t mem_size;
2822
2823 /*
2824 * re-alloc when it has already alloced
2825 */
2826 if (mpt->m_dma_post_queue_hdl) {
2827 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2828 &mpt->m_acc_post_queue_hdl);
2829 }
2830
2831 /*
2832 * The reply descriptor post queue size is:
2833 * Reply Descriptor Post Queue Depth * 8
2834 * The "8" is the size of each descriptor (8 bytes or 64 bits).
2835 */
2836 mem_size = mpt->m_post_queue_depth * 8;
2837
2838 /*
2839 * set the DMA attributes. The Reply Descriptor Post Queue must be
2840 * aligned on a 16-byte boundry.
2841 */
2842 frame_dma_attrs = mpt->m_msg_dma_attr;
2843 frame_dma_attrs.dma_attr_align = 16;
2844 frame_dma_attrs.dma_attr_sgllen = 1;
2845
2846 /*
2847 * allocate the reply post queue
2848 */
2849 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2850 &mpt->m_dma_post_queue_hdl, &mpt->m_acc_post_queue_hdl, &memp,
2851 mem_size, &cookie) == FALSE) {
2852 return (DDI_FAILURE);
2853 }
2854
2855 /*
2856 * Store the reply descriptor post queue memory address. This chip
2857 * uses this address to write to the reply descriptor post queue. The
2858 * second address is the address mpt uses to manage the queue.
2859 */
2860 mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
2861 mpt->m_post_queue = memp;
2862
2863 /*
2864 * Clear the reply post queue memory.
2865 */
2866 bzero(mpt->m_post_queue, mem_size);
2867
2868 return (DDI_SUCCESS);
2869 }
2870
2871 static void
2872 mptsas_alloc_reply_args(mptsas_t *mpt)
2873 {
2874 if (mpt->m_replyh_args == NULL) {
2875 mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
2876 mpt->m_max_replies, KM_SLEEP);
2877 }
2878 }
2879
2880 static int
2881 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2882 {
2883 mptsas_cache_frames_t *frames = NULL;
2884 if (cmd->cmd_extra_frames == NULL) {
2885 frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
2886 if (frames == NULL) {
2887 return (DDI_FAILURE);
2888 }
2889 cmd->cmd_extra_frames = frames;
2890 }
2891 return (DDI_SUCCESS);
2892 }
2893
2894 static void
2895 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2896 {
2897 if (cmd->cmd_extra_frames) {
2898 kmem_cache_free(mpt->m_cache_frames,
2899 (void *)cmd->cmd_extra_frames);
2900 cmd->cmd_extra_frames = NULL;
2901 }
2902 }
2903
2904 static void
2905 mptsas_cfg_fini(mptsas_t *mpt)
2906 {
2907 NDBG0(("mptsas_cfg_fini"));
2908 ddi_regs_map_free(&mpt->m_datap);
2909 }
2910
2911 static void
2912 mptsas_hba_fini(mptsas_t *mpt)
2913 {
2914 NDBG0(("mptsas_hba_fini"));
2915
2916 /*
2917 * Free up any allocated memory
2918 */
2919 if (mpt->m_dma_req_frame_hdl) {
2920 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2921 &mpt->m_acc_req_frame_hdl);
2922 }
2923
2924 if (mpt->m_dma_req_sense_hdl) {
2925 rmfreemap(mpt->m_erqsense_map);
2926 mptsas_dma_addr_destroy(&mpt->m_dma_req_sense_hdl,
2927 &mpt->m_acc_req_sense_hdl);
2928 }
2929
2930 if (mpt->m_dma_reply_frame_hdl) {
2931 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2932 &mpt->m_acc_reply_frame_hdl);
2933 }
2934
2935 if (mpt->m_dma_free_queue_hdl) {
2936 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2937 &mpt->m_acc_free_queue_hdl);
2938 }
2939
2940 if (mpt->m_dma_post_queue_hdl) {
2941 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2942 &mpt->m_acc_post_queue_hdl);
2943 }
2944
2945 if (mpt->m_replyh_args != NULL) {
2946 kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
2947 * mpt->m_max_replies);
2948 }
2949 }
2950
2951 static int
2952 mptsas_name_child(dev_info_t *lun_dip, char *name, int len)
2953 {
2954 int lun = 0;
2955 char *sas_wwn = NULL;
2956 int phynum = -1;
2957 int reallen = 0;
2958
2959 /* Get the target num */
2960 lun = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
2961 LUN_PROP, 0);
2962
2963 if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
2964 DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
2965 /*
2966 * Stick in the address of form "pPHY,LUN"
2967 */
2968 reallen = snprintf(name, len, "p%x,%x", phynum, lun);
2969 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
2970 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn)
2971 == DDI_PROP_SUCCESS) {
2972 /*
2973 * Stick in the address of the form "wWWN,LUN"
2974 */
2975 reallen = snprintf(name, len, "%s,%x", sas_wwn, lun);
2976 ddi_prop_free(sas_wwn);
2977 } else {
2978 return (DDI_FAILURE);
2979 }
2980
2981 ASSERT(reallen < len);
2982 if (reallen >= len) {
2983 mptsas_log(0, CE_WARN, "!mptsas_get_name: name parameter "
2984 "length too small, it needs to be %d bytes", reallen + 1);
2985 }
2986 return (DDI_SUCCESS);
2987 }
2988
2989 /*
2990 * tran_tgt_init(9E) - target device instance initialization
2991 */
2992 static int
2993 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2994 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2995 {
2996 #ifndef __lock_lint
2997 _NOTE(ARGUNUSED(hba_tran))
2998 #endif
2999
3000 /*
3001 * At this point, the scsi_device structure already exists
3002 * and has been initialized.
3003 *
3004 * Use this function to allocate target-private data structures,
3005 * if needed by this HBA. Add revised flow-control and queue
3006 * properties for child here, if desired and if you can tell they
3007 * support tagged queueing by now.
3008 */
3009 mptsas_t *mpt;
3010 int lun = sd->sd_address.a_lun;
3011 mdi_pathinfo_t *pip = NULL;
3012 mptsas_tgt_private_t *tgt_private = NULL;
3013 mptsas_target_t *ptgt = NULL;
3014 char *psas_wwn = NULL;
3015 mptsas_phymask_t phymask = 0;
3016 uint64_t sas_wwn = 0;
3017 mptsas_target_addr_t addr;
3018 mpt = SDEV2MPT(sd);
3019
3020 ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
3021
3022 NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
3023 (void *)hba_dip, (void *)tgt_dip, lun));
3024
3025 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
3026 (void) ndi_merge_node(tgt_dip, mptsas_name_child);
3027 ddi_set_name_addr(tgt_dip, NULL);
3028 return (DDI_FAILURE);
3029 }
3030 /*
3031 * phymask is 0 means the virtual port for RAID
3032 */
3033 phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
3034 "phymask", 0);
3035 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
3036 if ((pip = (void *)(sd->sd_private)) == NULL) {
3037 /*
3038 * Very bad news if this occurs. Somehow scsi_vhci has
3039 * lost the pathinfo node for this target.
3040 */
3041 return (DDI_NOT_WELL_FORMED);
3042 }
3043
3044 if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
3045 DDI_PROP_SUCCESS) {
3046 mptsas_log(mpt, CE_WARN, "Get lun property failed\n");
3047 return (DDI_FAILURE);
3048 }
3049
3050 if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
3051 &psas_wwn) == MDI_SUCCESS) {
3052 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
3053 sas_wwn = 0;
3054 }
3055 (void) mdi_prop_free(psas_wwn);
3056 }
3057 } else {
3058 lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
3059 DDI_PROP_DONTPASS, LUN_PROP, 0);
3060 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
3061 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
3062 DDI_PROP_SUCCESS) {
3063 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
3064 sas_wwn = 0;
3065 }
3066 ddi_prop_free(psas_wwn);
3067 } else {
3068 sas_wwn = 0;
3069 }
3070 }
3071
3072 ASSERT((sas_wwn != 0) || (phymask != 0));
3073 addr.mta_wwn = sas_wwn;
3074 addr.mta_phymask = phymask;
3075 mutex_enter(&mpt->m_mutex);
3076 ptgt = refhash_lookup(mpt->m_targets, &addr);
3077 mutex_exit(&mpt->m_mutex);
3078 if (ptgt == NULL) {
3079 mptsas_log(mpt, CE_WARN, "!tgt_init: target doesn't exist or "
3080 "gone already! phymask:%x, saswwn %"PRIx64, phymask,
3081 sas_wwn);
3082 return (DDI_FAILURE);
3083 }
3084 if (hba_tran->tran_tgt_private == NULL) {
3085 tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
3086 KM_SLEEP);
3087 tgt_private->t_lun = lun;
3088 tgt_private->t_private = ptgt;
3089 hba_tran->tran_tgt_private = tgt_private;
3090 }
3091
3092 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
3093 return (DDI_SUCCESS);
3094 }
3095 mutex_enter(&mpt->m_mutex);
3096
3097 if (ptgt->m_deviceinfo &
3098 (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
3099 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
3100 uchar_t *inq89 = NULL;
3101 int inq89_len = 0x238;
3102 int reallen = 0;
3103 int rval = 0;
3104 struct sata_id *sid = NULL;
3105 char model[SATA_ID_MODEL_LEN + 1];
3106 char fw[SATA_ID_FW_LEN + 1];
3107 char *vid, *pid;
3108 int i;
3109
3110 mutex_exit(&mpt->m_mutex);
3111 /*
3112 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
3113 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
3114 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
3115 */
3116 inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
3117 rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
3118 inq89, inq89_len, &reallen, 1);
3119
3120 if (rval != 0) {
3121 if (inq89 != NULL) {
3122 kmem_free(inq89, inq89_len);
3123 }
3124
3125 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
3126 "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
3127 return (DDI_SUCCESS);
3128 }
3129 sid = (void *)(&inq89[60]);
3130
3131 swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
3132 swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
3133
3134 model[SATA_ID_MODEL_LEN] = 0;
3135 fw[SATA_ID_FW_LEN] = 0;
3136
3137 /*
3138 * split model into into vid/pid
3139 */
3140 for (i = 0, pid = model; i < SATA_ID_MODEL_LEN; i++, pid++)
3141 if ((*pid == ' ') || (*pid == '\t'))
3142 break;
3143 if (i < SATA_ID_MODEL_LEN) {
3144 vid = model;
3145 /*
3146 * terminate vid, establish pid
3147 */
3148 *pid++ = 0;
3149 } else {
3150 /*
3151 * vid will stay "ATA ", the rule is same
3152 * as sata framework implementation.
3153 */
3154 vid = NULL;
3155 /*
3156 * model is all pid
3157 */
3158 pid = model;
3159 }
3160
3161 /*
3162 * override SCSA "inquiry-*" properties
3163 */
3164 if (vid)
3165 (void) scsi_device_prop_update_inqstring(sd,
3166 INQUIRY_VENDOR_ID, vid, strlen(vid));
3167 if (pid)
3168 (void) scsi_device_prop_update_inqstring(sd,
3169 INQUIRY_PRODUCT_ID, pid, strlen(pid));
3170 (void) scsi_device_prop_update_inqstring(sd,
3171 INQUIRY_REVISION_ID, fw, strlen(fw));
3172
3173 if (inq89 != NULL) {
3174 kmem_free(inq89, inq89_len);
3175 }
3176 } else {
3177 mutex_exit(&mpt->m_mutex);
3178 }
3179
3180 return (DDI_SUCCESS);
3181 }
3182 /*
3183 * tran_tgt_free(9E) - target device instance deallocation
3184 */
3185 static void
3186 mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3187 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
3188 {
3189 #ifndef __lock_lint
3190 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran, sd))
3191 #endif
3192
3193 mptsas_tgt_private_t *tgt_private = hba_tran->tran_tgt_private;
3194
3195 if (tgt_private != NULL) {
3196 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
3197 hba_tran->tran_tgt_private = NULL;
3198 }
3199 }
3200
3201 /*
3202 * scsi_pkt handling
3203 *
3204 * Visible to the external world via the transport structure.
3205 */
3206
3207 /*
3208 * Notes:
3209 * - transport the command to the addressed SCSI target/lun device
3210 * - normal operation is to schedule the command to be transported,
3211 * and return TRAN_ACCEPT if this is successful.
3212 * - if NO_INTR, tran_start must poll device for command completion
3213 */
3214 static int
3215 mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
3216 {
3217 #ifndef __lock_lint
3218 _NOTE(ARGUNUSED(ap))
3219 #endif
3220 mptsas_t *mpt = PKT2MPT(pkt);
3221 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3222 int rval;
3223 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3224
3225 NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt));
3226 ASSERT(ptgt);
3227 if (ptgt == NULL)
3228 return (TRAN_FATAL_ERROR);
3229
3230 /*
3231 * prepare the pkt before taking mutex.
3232 */
3233 rval = mptsas_prepare_pkt(cmd);
3234 if (rval != TRAN_ACCEPT) {
3235 return (rval);
3236 }
3237
3238 /*
3239 * Send the command to target/lun, however your HBA requires it.
3240 * If busy, return TRAN_BUSY; if there's some other formatting error
3241 * in the packet, return TRAN_BADPKT; otherwise, fall through to the
3242 * return of TRAN_ACCEPT.
3243 *
3244 * Remember that access to shared resources, including the mptsas_t
3245 * data structure and the HBA hardware registers, must be protected
3246 * with mutexes, here and everywhere.
3247 *
3248 * Also remember that at interrupt time, you'll get an argument
3249 * to the interrupt handler which is a pointer to your mptsas_t
3250 * structure; you'll have to remember which commands are outstanding
3251 * and which scsi_pkt is the currently-running command so the
3252 * interrupt handler can refer to the pkt to set completion
3253 * status, call the target driver back through pkt_comp, etc.
3254 *
3255 * If the instance lock is held by other thread, don't spin to wait
3256 * for it. Instead, queue the cmd and next time when the instance lock
3257 * is not held, accept all the queued cmd. A extra tx_waitq is
3258 * introduced to protect the queue.
3259 *
3260 * The polled cmd will not be queud and accepted as usual.
3261 *
3262 * Under the tx_waitq mutex, record whether a thread is draining
3263 * the tx_waitq. An IO requesting thread that finds the instance
3264 * mutex contended appends to the tx_waitq and while holding the
3265 * tx_wait mutex, if the draining flag is not set, sets it and then
3266 * proceeds to spin for the instance mutex. This scheme ensures that
3267 * the last cmd in a burst be processed.
3268 *
3269 * we enable this feature only when the helper threads are enabled,
3270 * at which we think the loads are heavy.
3271 *
3272 * per instance mutex m_tx_waitq_mutex is introduced to protect the
3273 * m_tx_waitqtail, m_tx_waitq, m_tx_draining.
3274 */
3275
3276 if (mpt->m_doneq_thread_n) {
3277 if (mutex_tryenter(&mpt->m_mutex) != 0) {
3278 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3279 mutex_exit(&mpt->m_mutex);
3280 } else if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3281 mutex_enter(&mpt->m_mutex);
3282 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3283 mutex_exit(&mpt->m_mutex);
3284 } else {
3285 mutex_enter(&mpt->m_tx_waitq_mutex);
3286 /*
3287 * ptgt->m_dr_flag is protected by m_mutex or
3288 * m_tx_waitq_mutex. In this case, m_tx_waitq_mutex
3289 * is acquired.
3290 */
3291 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3292 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3293 /*
3294 * The command should be allowed to
3295 * retry by returning TRAN_BUSY to
3296 * to stall the I/O's which come from
3297 * scsi_vhci since the device/path is
3298 * in unstable state now.
3299 */
3300 mutex_exit(&mpt->m_tx_waitq_mutex);
3301 return (TRAN_BUSY);
3302 } else {
3303 /*
3304 * The device is offline, just fail the
3305 * command by returning
3306 * TRAN_FATAL_ERROR.
3307 */
3308 mutex_exit(&mpt->m_tx_waitq_mutex);
3309 return (TRAN_FATAL_ERROR);
3310 }
3311 }
3312 if (mpt->m_tx_draining) {
3313 cmd->cmd_flags |= CFLAG_TXQ;
3314 *mpt->m_tx_waitqtail = cmd;
3315 mpt->m_tx_waitqtail = &cmd->cmd_linkp;
3316 mutex_exit(&mpt->m_tx_waitq_mutex);
3317 } else { /* drain the queue */
3318 mpt->m_tx_draining = 1;
3319 mutex_exit(&mpt->m_tx_waitq_mutex);
3320 mutex_enter(&mpt->m_mutex);
3321 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3322 mutex_exit(&mpt->m_mutex);
3323 }
3324 }
3325 } else {
3326 mutex_enter(&mpt->m_mutex);
3327 /*
3328 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3329 * in this case, m_mutex is acquired.
3330 */
3331 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3332 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3333 /*
3334 * commands should be allowed to retry by
3335 * returning TRAN_BUSY to stall the I/O's
3336 * which come from scsi_vhci since the device/
3337 * path is in unstable state now.
3338 */
3339 mutex_exit(&mpt->m_mutex);
3340 return (TRAN_BUSY);
3341 } else {
3342 /*
3343 * The device is offline, just fail the
3344 * command by returning TRAN_FATAL_ERROR.
3345 */
3346 mutex_exit(&mpt->m_mutex);
3347 return (TRAN_FATAL_ERROR);
3348 }
3349 }
3350 rval = mptsas_accept_pkt(mpt, cmd);
3351 mutex_exit(&mpt->m_mutex);
3352 }
3353
3354 return (rval);
3355 }
3356
3357 /*
3358 * Accept all the queued cmds(if any) before accept the current one.
3359 */
3360 static int
3361 mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3362 {
3363 int rval;
3364 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3365
3366 ASSERT(mutex_owned(&mpt->m_mutex));
3367 /*
3368 * The call to mptsas_accept_tx_waitq() must always be performed
3369 * because that is where mpt->m_tx_draining is cleared.
3370 */
3371 mutex_enter(&mpt->m_tx_waitq_mutex);
3372 mptsas_accept_tx_waitq(mpt);
3373 mutex_exit(&mpt->m_tx_waitq_mutex);
3374 /*
3375 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3376 * in this case, m_mutex is acquired.
3377 */
3378 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3379 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3380 /*
3381 * The command should be allowed to retry by returning
3382 * TRAN_BUSY to stall the I/O's which come from
3383 * scsi_vhci since the device/path is in unstable state
3384 * now.
3385 */
3386 return (TRAN_BUSY);
3387 } else {
3388 /*
3389 * The device is offline, just fail the command by
3390 * return TRAN_FATAL_ERROR.
3391 */
3392 return (TRAN_FATAL_ERROR);
3393 }
3394 }
3395 rval = mptsas_accept_pkt(mpt, cmd);
3396
3397 return (rval);
3398 }
3399
3400 static int
3401 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3402 {
3403 int rval = TRAN_ACCEPT;
3404 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3405
3406 NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3407
3408 ASSERT(mutex_owned(&mpt->m_mutex));
3409
3410 if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3411 rval = mptsas_prepare_pkt(cmd);
3412 if (rval != TRAN_ACCEPT) {
3413 cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3414 return (rval);
3415 }
3416 }
3417
3418 /*
3419 * reset the throttle if we were draining
3420 */
3421 if ((ptgt->m_t_ncmds == 0) &&
3422 (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3423 NDBG23(("reset throttle"));
3424 ASSERT(ptgt->m_reset_delay == 0);
3425 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3426 }
3427
3428 /*
3429 * If HBA is being reset, the DevHandles are being re-initialized,
3430 * which means that they could be invalid even if the target is still
3431 * attached. Check if being reset and if DevHandle is being
3432 * re-initialized. If this is the case, return BUSY so the I/O can be
3433 * retried later.
3434 */
3435 if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3436 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
3437 if (cmd->cmd_flags & CFLAG_TXQ) {
3438 mptsas_doneq_add(mpt, cmd);
3439 mptsas_doneq_empty(mpt);
3440 return (rval);
3441 } else {
3442 return (TRAN_BUSY);
3443 }
3444 }
3445
3446 /*
3447 * If device handle has already been invalidated, just
3448 * fail the command. In theory, command from scsi_vhci
3449 * client is impossible send down command with invalid
3450 * devhdl since devhdl is set after path offline, target
3451 * driver is not suppose to select a offlined path.
3452 */
3453 if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3454 NDBG3(("rejecting command, it might because invalid devhdl "
3455 "request."));
3456 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3457 if (cmd->cmd_flags & CFLAG_TXQ) {
3458 mptsas_doneq_add(mpt, cmd);
3459 mptsas_doneq_empty(mpt);
3460 return (rval);
3461 } else {
3462 return (TRAN_FATAL_ERROR);
3463 }
3464 }
3465 /*
3466 * The first case is the normal case. mpt gets a command from the
3467 * target driver and starts it.
3468 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3469 * commands is m_max_requests - 2.
3470 */
3471 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
3472 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
3473 (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3474 (ptgt->m_reset_delay == 0) &&
3475 (ptgt->m_t_nwait == 0) &&
3476 ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3477 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3478 (void) mptsas_start_cmd(mpt, cmd);
3479 } else {
3480 mptsas_waitq_add(mpt, cmd);
3481 }
3482 } else {
3483 /*
3484 * Add this pkt to the work queue
3485 */
3486 mptsas_waitq_add(mpt, cmd);
3487
3488 if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3489 (void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3490
3491 /*
3492 * Only flush the doneq if this is not a TM
3493 * cmd. For TM cmds the flushing of the
3494 * doneq will be done in those routines.
3495 */
3496 if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
3497 mptsas_doneq_empty(mpt);
3498 }
3499 }
3500 }
3501 return (rval);
3502 }
3503
3504 int
3505 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3506 {
3507 mptsas_slots_t *slots = mpt->m_active;
3508 uint_t slot, start_rotor;
3509 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3510
3511 ASSERT(MUTEX_HELD(&mpt->m_mutex));
3512
3513 /*
3514 * Account for reserved TM request slot and reserved SMID of 0.
3515 */
3516 ASSERT(slots->m_n_normal == (mpt->m_max_requests - 2));
3517
3518 /*
3519 * Find the next available slot, beginning at m_rotor. If no slot is
3520 * available, we'll return FALSE to indicate that. This mechanism
3521 * considers only the normal slots, not the reserved slot 0 nor the
3522 * task management slot m_n_normal + 1. The rotor is left to point to
3523 * the normal slot after the one we select, unless we select the last
3524 * normal slot in which case it returns to slot 1.
3525 */
3526 start_rotor = slots->m_rotor;
3527 do {
3528 slot = slots->m_rotor++;
3529 if (slots->m_rotor > slots->m_n_normal)
3530 slots->m_rotor = 1;
3531
3532 if (slots->m_rotor == start_rotor)
3533 break;
3534 } while (slots->m_slot[slot] != NULL);
3535
3536 if (slots->m_slot[slot] != NULL)
3537 return (FALSE);
3538
3539 ASSERT(slot != 0 && slot <= slots->m_n_normal);
3540
3541 cmd->cmd_slot = slot;
3542 slots->m_slot[slot] = cmd;
3543 mpt->m_ncmds++;
3544
3545 /*
3546 * only increment per target ncmds if this is not a
3547 * command that has no target associated with it (i.e. a
3548 * event acknoledgment)
3549 */
3550 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3551 /*
3552 * Expiration time is set in mptsas_start_cmd
3553 */
3554 ptgt->m_t_ncmds++;
3555 cmd->cmd_active_expiration = 0;
3556 } else {
3557 /*
3558 * Initialize expiration time for passthrough commands,
3559 */
3560 cmd->cmd_active_expiration = gethrtime() +
3561 (hrtime_t)cmd->cmd_pkt->pkt_time * NANOSEC;
3562 }
3563 return (TRUE);
3564 }
3565
3566 /*
3567 * prepare the pkt:
3568 * the pkt may have been resubmitted or just reused so
3569 * initialize some fields and do some checks.
3570 */
3571 static int
3572 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3573 {
3574 struct scsi_pkt *pkt = CMD2PKT(cmd);
3575
3576 NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3577
3578 /*
3579 * Reinitialize some fields that need it; the packet may
3580 * have been resubmitted
3581 */
3582 pkt->pkt_reason = CMD_CMPLT;
3583 pkt->pkt_state = 0;
3584 pkt->pkt_statistics = 0;
3585 pkt->pkt_resid = 0;
3586 cmd->cmd_age = 0;
3587 cmd->cmd_pkt_flags = pkt->pkt_flags;
3588
3589 /*
3590 * zero status byte.
3591 */
3592 *(pkt->pkt_scbp) = 0;
3593
3594 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3595 pkt->pkt_resid = cmd->cmd_dmacount;
3596
3597 /*
3598 * consistent packets need to be sync'ed first
3599 * (only for data going out)
3600 */
3601 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
3602 (cmd->cmd_flags & CFLAG_DMASEND)) {
3603 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
3604 DDI_DMA_SYNC_FORDEV);
3605 }
3606 }
3607
3608 cmd->cmd_flags =
3609 (cmd->cmd_flags & ~(CFLAG_TRANFLAG)) |
3610 CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
3611
3612 return (TRAN_ACCEPT);
3613 }
3614
3615 /*
3616 * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3617 *
3618 * One of three possibilities:
3619 * - allocate scsi_pkt
3620 * - allocate scsi_pkt and DMA resources
3621 * - allocate DMA resources to an already-allocated pkt
3622 */
3623 static struct scsi_pkt *
3624 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3625 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3626 int (*callback)(), caddr_t arg)
3627 {
3628 mptsas_cmd_t *cmd, *new_cmd;
3629 mptsas_t *mpt = ADDR2MPT(ap);
3630 int failure = 1;
3631 uint_t oldcookiec;
3632 mptsas_target_t *ptgt = NULL;
3633 int rval;
3634 mptsas_tgt_private_t *tgt_private;
3635 int kf;
3636
3637 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3638
3639 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3640 tran_tgt_private;
3641 ASSERT(tgt_private != NULL);
3642 if (tgt_private == NULL) {
3643 return (NULL);
3644 }
3645 ptgt = tgt_private->t_private;
3646 ASSERT(ptgt != NULL);
3647 if (ptgt == NULL)
3648 return (NULL);
3649 ap->a_target = ptgt->m_devhdl;
3650 ap->a_lun = tgt_private->t_lun;
3651
3652 ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3653 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3654 statuslen *= 100; tgtlen *= 4;
3655 #endif
3656 NDBG3(("mptsas_scsi_init_pkt:\n"
3657 "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3658 ap->a_target, (void *)pkt, (void *)bp,
3659 cmdlen, statuslen, tgtlen, flags));
3660
3661 /*
3662 * Allocate the new packet.
3663 */
3664 if (pkt == NULL) {
3665 ddi_dma_handle_t save_dma_handle;
3666
3667 cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3668
3669 if (cmd) {
3670 save_dma_handle = cmd->cmd_dmahandle;
3671 bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3672 cmd->cmd_dmahandle = save_dma_handle;
3673
3674 pkt = (void *)((uchar_t *)cmd +
3675 sizeof (struct mptsas_cmd));
3676 pkt->pkt_ha_private = (opaque_t)cmd;
3677 pkt->pkt_address = *ap;
3678 pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3679 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3680 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3681 cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3682 cmd->cmd_cdblen = (uchar_t)cmdlen;
3683 cmd->cmd_scblen = statuslen;
3684 cmd->cmd_rqslen = SENSE_LENGTH;
3685 cmd->cmd_tgt_addr = ptgt;
3686 failure = 0;
3687 }
3688
3689 if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
3690 (tgtlen > PKT_PRIV_LEN) ||
3691 (statuslen > EXTCMDS_STATUS_SIZE)) {
3692 if (failure == 0) {
3693 /*
3694 * if extern alloc fails, all will be
3695 * deallocated, including cmd
3696 */
3697 failure = mptsas_pkt_alloc_extern(mpt, cmd,
3698 cmdlen, tgtlen, statuslen, kf);
3699 }
3700 if (failure) {
3701 /*
3702 * if extern allocation fails, it will
3703 * deallocate the new pkt as well
3704 */
3705 return (NULL);
3706 }
3707 }
3708 new_cmd = cmd;
3709
3710 } else {
3711 cmd = PKT2CMD(pkt);
3712 new_cmd = NULL;
3713 }
3714
3715
3716 /* grab cmd->cmd_cookiec here as oldcookiec */
3717
3718 oldcookiec = cmd->cmd_cookiec;
3719
3720 /*
3721 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3722 * greater than 0 and we'll need to grab the next dma window
3723 */
3724 /*
3725 * SLM-not doing extra command frame right now; may add later
3726 */
3727
3728 if (cmd->cmd_nwin > 0) {
3729
3730 /*
3731 * Make sure we havn't gone past the the total number
3732 * of windows
3733 */
3734 if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3735 return (NULL);
3736 }
3737 if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3738 &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
3739 &cmd->cmd_cookie, &cmd->cmd_cookiec) == DDI_FAILURE) {
3740 return (NULL);
3741 }
3742 goto get_dma_cookies;
3743 }
3744
3745
3746 if (flags & PKT_XARQ) {
3747 cmd->cmd_flags |= CFLAG_XARQ;
3748 }
3749
3750 /*
3751 * DMA resource allocation. This version assumes your
3752 * HBA has some sort of bus-mastering or onboard DMA capability, with a
3753 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
3754 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
3755 */
3756 if (bp && (bp->b_bcount != 0) &&
3757 (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
3758
3759 int cnt, dma_flags;
3760 mptti_t *dmap; /* ptr to the S/G list */
3761
3762 /*
3763 * Set up DMA memory and position to the next DMA segment.
3764 */
3765 ASSERT(cmd->cmd_dmahandle != NULL);
3766
3767 if (bp->b_flags & B_READ) {
3768 dma_flags = DDI_DMA_READ;
3769 cmd->cmd_flags &= ~CFLAG_DMASEND;
3770 } else {
3771 dma_flags = DDI_DMA_WRITE;
3772 cmd->cmd_flags |= CFLAG_DMASEND;
3773 }
3774 if (flags & PKT_CONSISTENT) {
3775 cmd->cmd_flags |= CFLAG_CMDIOPB;
3776 dma_flags |= DDI_DMA_CONSISTENT;
3777 }
3778
3779 if (flags & PKT_DMA_PARTIAL) {
3780 dma_flags |= DDI_DMA_PARTIAL;
3781 }
3782
3783 /*
3784 * workaround for byte hole issue on psycho and
3785 * schizo pre 2.1
3786 */
3787 if ((bp->b_flags & B_READ) && ((bp->b_flags &
3788 (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
3789 ((uintptr_t)bp->b_un.b_addr & 0x7)) {
3790 dma_flags |= DDI_DMA_CONSISTENT;
3791 }
3792
3793 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
3794 dma_flags, callback, arg,
3795 &cmd->cmd_cookie, &cmd->cmd_cookiec);
3796 if (rval == DDI_DMA_PARTIAL_MAP) {
3797 (void) ddi_dma_numwin(cmd->cmd_dmahandle,
3798 &cmd->cmd_nwin);
3799 cmd->cmd_winindex = 0;
3800 (void) ddi_dma_getwin(cmd->cmd_dmahandle,
3801 cmd->cmd_winindex, &cmd->cmd_dma_offset,
3802 &cmd->cmd_dma_len, &cmd->cmd_cookie,
3803 &cmd->cmd_cookiec);
3804 } else if (rval && (rval != DDI_DMA_MAPPED)) {
3805 switch (rval) {
3806 case DDI_DMA_NORESOURCES:
3807 bioerror(bp, 0);
3808 break;
3809 case DDI_DMA_BADATTR:
3810 case DDI_DMA_NOMAPPING:
3811 bioerror(bp, EFAULT);
3812 break;
3813 case DDI_DMA_TOOBIG:
3814 default:
3815 bioerror(bp, EINVAL);
3816 break;
3817 }
3818 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3819 if (new_cmd) {
3820 mptsas_scsi_destroy_pkt(ap, pkt);
3821 }
3822 return ((struct scsi_pkt *)NULL);
3823 }
3824
3825 get_dma_cookies:
3826 cmd->cmd_flags |= CFLAG_DMAVALID;
3827 ASSERT(cmd->cmd_cookiec > 0);
3828
3829 if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
3830 mptsas_log(mpt, CE_NOTE, "large cookiec received %d\n",
3831 cmd->cmd_cookiec);
3832 bioerror(bp, EINVAL);
3833 if (new_cmd) {
3834 mptsas_scsi_destroy_pkt(ap, pkt);
3835 }
3836 return ((struct scsi_pkt *)NULL);
3837 }
3838
3839 /*
3840 * Allocate extra SGL buffer if needed.
3841 */
3842 if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
3843 (cmd->cmd_extra_frames == NULL)) {
3844 if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
3845 DDI_FAILURE) {
3846 mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
3847 "failed");
3848 bioerror(bp, ENOMEM);
3849 if (new_cmd) {
3850 mptsas_scsi_destroy_pkt(ap, pkt);
3851 }
3852 return ((struct scsi_pkt *)NULL);
3853 }
3854 }
3855
3856 /*
3857 * Always use scatter-gather transfer
3858 * Use the loop below to store physical addresses of
3859 * DMA segments, from the DMA cookies, into your HBA's
3860 * scatter-gather list.
3861 * We need to ensure we have enough kmem alloc'd
3862 * for the sg entries since we are no longer using an
3863 * array inside mptsas_cmd_t.
3864 *
3865 * We check cmd->cmd_cookiec against oldcookiec so
3866 * the scatter-gather list is correctly allocated
3867 */
3868
3869 if (oldcookiec != cmd->cmd_cookiec) {
3870 if (cmd->cmd_sg != (mptti_t *)NULL) {
3871 kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
3872 oldcookiec);
3873 cmd->cmd_sg = NULL;
3874 }
3875 }
3876
3877 if (cmd->cmd_sg == (mptti_t *)NULL) {
3878 cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3879 cmd->cmd_cookiec), kf);
3880
3881 if (cmd->cmd_sg == (mptti_t *)NULL) {
3882 mptsas_log(mpt, CE_WARN,
3883 "unable to kmem_alloc enough memory "
3884 "for scatter/gather list");
3885 /*
3886 * if we have an ENOMEM condition we need to behave
3887 * the same way as the rest of this routine
3888 */
3889
3890 bioerror(bp, ENOMEM);
3891 if (new_cmd) {
3892 mptsas_scsi_destroy_pkt(ap, pkt);
3893 }
3894 return ((struct scsi_pkt *)NULL);
3895 }
3896 }
3897
3898 dmap = cmd->cmd_sg;
3899
3900 ASSERT(cmd->cmd_cookie.dmac_size != 0);
3901
3902 /*
3903 * store the first segment into the S/G list
3904 */
3905 dmap->count = cmd->cmd_cookie.dmac_size;
3906 dmap->addr.address64.Low = (uint32_t)
3907 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3908 dmap->addr.address64.High = (uint32_t)
3909 (cmd->cmd_cookie.dmac_laddress >> 32);
3910
3911 /*
3912 * dmacount counts the size of the dma for this window
3913 * (if partial dma is being used). totaldmacount
3914 * keeps track of the total amount of dma we have
3915 * transferred for all the windows (needed to calculate
3916 * the resid value below).
3917 */
3918 cmd->cmd_dmacount = cmd->cmd_cookie.dmac_size;
3919 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3920
3921 /*
3922 * We already stored the first DMA scatter gather segment,
3923 * start at 1 if we need to store more.
3924 */
3925 for (cnt = 1; cnt < cmd->cmd_cookiec; cnt++) {
3926 /*
3927 * Get next DMA cookie
3928 */
3929 ddi_dma_nextcookie(cmd->cmd_dmahandle,
3930 &cmd->cmd_cookie);
3931 dmap++;
3932
3933 cmd->cmd_dmacount += cmd->cmd_cookie.dmac_size;
3934 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3935
3936 /*
3937 * store the segment parms into the S/G list
3938 */
3939 dmap->count = cmd->cmd_cookie.dmac_size;
3940 dmap->addr.address64.Low = (uint32_t)
3941 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3942 dmap->addr.address64.High = (uint32_t)
3943 (cmd->cmd_cookie.dmac_laddress >> 32);
3944 }
3945
3946 /*
3947 * If this was partially allocated we set the resid
3948 * the amount of data NOT transferred in this window
3949 * If there is only one window, the resid will be 0
3950 */
3951 pkt->pkt_resid = (bp->b_bcount - cmd->cmd_totaldmacount);
3952 NDBG3(("mptsas_scsi_init_pkt: cmd_dmacount=%d.",
3953 cmd->cmd_dmacount));
3954 }
3955 return (pkt);
3956 }
3957
3958 /*
3959 * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
3960 *
3961 * Notes:
3962 * - also frees DMA resources if allocated
3963 * - implicit DMA synchonization
3964 */
3965 static void
3966 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
3967 {
3968 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3969 mptsas_t *mpt = ADDR2MPT(ap);
3970
3971 NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
3972 ap->a_target, (void *)pkt));
3973
3974 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3975 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
3976 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3977 }
3978
3979 if (cmd->cmd_sg) {
3980 kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
3981 cmd->cmd_sg = NULL;
3982 }
3983
3984 mptsas_free_extra_sgl_frame(mpt, cmd);
3985
3986 if ((cmd->cmd_flags &
3987 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
3988 CFLAG_SCBEXTERN)) == 0) {
3989 cmd->cmd_flags = CFLAG_FREE;
3990 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
3991 } else {
3992 mptsas_pkt_destroy_extern(mpt, cmd);
3993 }
3994 }
3995
3996 /*
3997 * kmem cache constructor and destructor:
3998 * When constructing, we bzero the cmd and allocate the dma handle
3999 * When destructing, just free the dma handle
4000 */
4001 static int
4002 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
4003 {
4004 mptsas_cmd_t *cmd = buf;
4005 mptsas_t *mpt = cdrarg;
4006 int (*callback)(caddr_t);
4007
4008 callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
4009
4010 NDBG4(("mptsas_kmem_cache_constructor"));
4011
4012 /*
4013 * allocate a dma handle
4014 */
4015 if ((ddi_dma_alloc_handle(mpt->m_dip, &mpt->m_io_dma_attr, callback,
4016 NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
4017 cmd->cmd_dmahandle = NULL;
4018 return (-1);
4019 }
4020 return (0);
4021 }
4022
4023 static void
4024 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
4025 {
4026 #ifndef __lock_lint
4027 _NOTE(ARGUNUSED(cdrarg))
4028 #endif
4029 mptsas_cmd_t *cmd = buf;
4030
4031 NDBG4(("mptsas_kmem_cache_destructor"));
4032
4033 if (cmd->cmd_dmahandle) {
4034 ddi_dma_free_handle(&cmd->cmd_dmahandle);
4035 cmd->cmd_dmahandle = NULL;
4036 }
4037 }
4038
4039 static int
4040 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
4041 {
4042 mptsas_cache_frames_t *p = buf;
4043 mptsas_t *mpt = cdrarg;
4044 ddi_dma_attr_t frame_dma_attr;
4045 size_t mem_size, alloc_len;
4046 ddi_dma_cookie_t cookie;
4047 uint_t ncookie;
4048 int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
4049 ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
4050
4051 frame_dma_attr = mpt->m_msg_dma_attr;
4052 frame_dma_attr.dma_attr_align = 0x10;
4053 frame_dma_attr.dma_attr_sgllen = 1;
4054
4055 if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
4056 &p->m_dma_hdl) != DDI_SUCCESS) {
4057 mptsas_log(mpt, CE_WARN, "Unable to allocate dma handle for"
4058 " extra SGL.");
4059 return (DDI_FAILURE);
4060 }
4061
4062 mem_size = (mpt->m_max_request_frames - 1) * mpt->m_req_frame_size;
4063
4064 if (ddi_dma_mem_alloc(p->m_dma_hdl, mem_size, &mpt->m_dev_acc_attr,
4065 DDI_DMA_CONSISTENT, callback, NULL, (caddr_t *)&p->m_frames_addr,
4066 &alloc_len, &p->m_acc_hdl) != DDI_SUCCESS) {
4067 ddi_dma_free_handle(&p->m_dma_hdl);
4068 p->m_dma_hdl = NULL;
4069 mptsas_log(mpt, CE_WARN, "Unable to allocate dma memory for"
4070 " extra SGL.");
4071 return (DDI_FAILURE);
4072 }
4073
4074 if (ddi_dma_addr_bind_handle(p->m_dma_hdl, NULL, p->m_frames_addr,
4075 alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
4076 &cookie, &ncookie) != DDI_DMA_MAPPED) {
4077 (void) ddi_dma_mem_free(&p->m_acc_hdl);
4078 ddi_dma_free_handle(&p->m_dma_hdl);
4079 p->m_dma_hdl = NULL;
4080 mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources for"
4081 " extra SGL");
4082 return (DDI_FAILURE);
4083 }
4084
4085 /*
4086 * Store the SGL memory address. This chip uses this
4087 * address to dma to and from the driver. The second
4088 * address is the address mpt uses to fill in the SGL.
4089 */
4090 p->m_phys_addr = cookie.dmac_laddress;
4091
4092 return (DDI_SUCCESS);
4093 }
4094
4095 static void
4096 mptsas_cache_frames_destructor(void *buf, void *cdrarg)
4097 {
4098 #ifndef __lock_lint
4099 _NOTE(ARGUNUSED(cdrarg))
4100 #endif
4101 mptsas_cache_frames_t *p = buf;
4102 if (p->m_dma_hdl != NULL) {
4103 (void) ddi_dma_unbind_handle(p->m_dma_hdl);
4104 (void) ddi_dma_mem_free(&p->m_acc_hdl);
4105 ddi_dma_free_handle(&p->m_dma_hdl);
4106 p->m_phys_addr = NULL;
4107 p->m_frames_addr = NULL;
4108 p->m_dma_hdl = NULL;
4109 p->m_acc_hdl = NULL;
4110 }
4111
4112 }
4113
4114 /*
4115 * Figure out if we need to use a different method for the request
4116 * sense buffer and allocate from the map if necessary.
4117 */
4118 static boolean_t
4119 mptsas_cmdarqsize(mptsas_t *mpt, mptsas_cmd_t *cmd, size_t senselength, int kf)
4120 {
4121 if (senselength > mpt->m_req_sense_size) {
4122 unsigned long i;
4123
4124 /* Sense length is limited to an 8 bit value in MPI Spec. */
4125 if (senselength > 255)
4126 senselength = 255;
4127 cmd->cmd_extrqschunks = (senselength +
4128 (mpt->m_req_sense_size - 1))/mpt->m_req_sense_size;
4129 i = (kf == KM_SLEEP ? rmalloc_wait : rmalloc)
4130 (mpt->m_erqsense_map, cmd->cmd_extrqschunks);
4131
4132 if (i == 0)
4133 return (B_FALSE);
4134
4135 cmd->cmd_extrqslen = (uint16_t)senselength;
4136 cmd->cmd_extrqsidx = i - 1;
4137 cmd->cmd_arq_buf = mpt->m_extreq_sense +
4138 (cmd->cmd_extrqsidx * mpt->m_req_sense_size);
4139 } else {
4140 cmd->cmd_rqslen = (uchar_t)senselength;
4141 }
4142
4143 return (B_TRUE);
4144 }
4145
4146 /*
4147 * allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
4148 * for non-standard length cdb, pkt_private, status areas
4149 * if allocation fails, then deallocate all external space and the pkt
4150 */
4151 /* ARGSUSED */
4152 static int
4153 mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
4154 int cmdlen, int tgtlen, int statuslen, int kf)
4155 {
4156 caddr_t cdbp, scbp, tgt;
4157
4158 NDBG3(("mptsas_pkt_alloc_extern: "
4159 "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
4160 (void *)cmd, cmdlen, tgtlen, statuslen, kf));
4161
4162 tgt = cdbp = scbp = NULL;
4163 cmd->cmd_scblen = statuslen;
4164 cmd->cmd_privlen = (uchar_t)tgtlen;
4165
4166 if (cmdlen > sizeof (cmd->cmd_cdb)) {
4167 if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
4168 goto fail;
4169 }
4170 cmd->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
4171 cmd->cmd_flags |= CFLAG_CDBEXTERN;
4172 }
4173 if (tgtlen > PKT_PRIV_LEN) {
4174 if ((tgt = kmem_zalloc((size_t)tgtlen, kf)) == NULL) {
4175 goto fail;
4176 }
4177 cmd->cmd_flags |= CFLAG_PRIVEXTERN;
4178 cmd->cmd_pkt->pkt_private = tgt;
4179 }
4180 if (statuslen > EXTCMDS_STATUS_SIZE) {
4181 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
4182 goto fail;
4183 }
4184 cmd->cmd_flags |= CFLAG_SCBEXTERN;
4185 cmd->cmd_pkt->pkt_scbp = (opaque_t)scbp;
4186
4187 /* allocate sense data buf for DMA */
4188 if (mptsas_cmdarqsize(mpt, cmd, statuslen -
4189 MPTSAS_GET_ITEM_OFF(struct scsi_arq_status, sts_sensedata),
4190 kf) == B_FALSE)
4191 goto fail;
4192 }
4193 return (0);
4194 fail:
4195 mptsas_pkt_destroy_extern(mpt, cmd);
4196 return (1);
4197 }
4198
4199 /*
4200 * deallocate external pkt space and deallocate the pkt
4201 */
4202 static void
4203 mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd)
4204 {
4205 NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd));
4206
4207 if (cmd->cmd_flags & CFLAG_FREE) {
4208 mptsas_log(mpt, CE_PANIC,
4209 "mptsas_pkt_destroy_extern: freeing free packet");
4210 _NOTE(NOT_REACHED)
4211 /* NOTREACHED */
4212 }
4213 if (cmd->cmd_extrqslen != 0) {
4214 rmfree(mpt->m_erqsense_map, cmd->cmd_extrqschunks,
4215 cmd->cmd_extrqsidx + 1);
4216 }
4217 if (cmd->cmd_flags & CFLAG_CDBEXTERN) {
4218 kmem_free(cmd->cmd_pkt->pkt_cdbp, (size_t)cmd->cmd_cdblen);
4219 }
4220 if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
4221 kmem_free(cmd->cmd_pkt->pkt_scbp, (size_t)cmd->cmd_scblen);
4222 }
4223 if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
4224 kmem_free(cmd->cmd_pkt->pkt_private, (size_t)cmd->cmd_privlen);
4225 }
4226 cmd->cmd_flags = CFLAG_FREE;
4227 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4228 }
4229
4230 /*
4231 * tran_sync_pkt(9E) - explicit DMA synchronization
4232 */
4233 /*ARGSUSED*/
4234 static void
4235 mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4236 {
4237 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4238
4239 NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
4240 ap->a_target, (void *)pkt));
4241
4242 if (cmd->cmd_dmahandle) {
4243 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4244 (cmd->cmd_flags & CFLAG_DMASEND) ?
4245 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
4246 }
4247 }
4248
4249 /*
4250 * tran_dmafree(9E) - deallocate DMA resources allocated for command
4251 */
4252 /*ARGSUSED*/
4253 static void
4254 mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
4255 {
4256 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4257 mptsas_t *mpt = ADDR2MPT(ap);
4258
4259 NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
4260 ap->a_target, (void *)pkt));
4261
4262 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4263 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4264 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4265 }
4266
4267 mptsas_free_extra_sgl_frame(mpt, cmd);
4268 }
4269
4270 static void
4271 mptsas_pkt_comp(struct scsi_pkt *pkt, mptsas_cmd_t *cmd)
4272 {
4273 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
4274 (!(cmd->cmd_flags & CFLAG_DMASEND))) {
4275 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4276 DDI_DMA_SYNC_FORCPU);
4277 }
4278 (*pkt->pkt_comp)(pkt);
4279 }
4280
4281 static void
4282 mptsas_sge_mainframe(mptsas_cmd_t *cmd, pMpi2SCSIIORequest_t frame,
4283 ddi_acc_handle_t acc_hdl, uint_t cookiec, uint32_t end_flags)
4284 {
4285 pMpi2SGESimple64_t sge;
4286 mptti_t *dmap;
4287 uint32_t flags;
4288
4289 dmap = cmd->cmd_sg;
4290
4291 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4292 while (cookiec--) {
4293 ddi_put32(acc_hdl,
4294 &sge->Address.Low, dmap->addr.address64.Low);
4295 ddi_put32(acc_hdl,
4296 &sge->Address.High, dmap->addr.address64.High);
4297 ddi_put32(acc_hdl, &sge->FlagsLength,
4298 dmap->count);
4299 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4300 flags |= ((uint32_t)
4301 (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4302 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4303 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4304 MPI2_SGE_FLAGS_SHIFT);
4305
4306 /*
4307 * If this is the last cookie, we set the flags
4308 * to indicate so
4309 */
4310 if (cookiec == 0) {
4311 flags |= end_flags;
4312 }
4313 if (cmd->cmd_flags & CFLAG_DMASEND) {
4314 flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4315 MPI2_SGE_FLAGS_SHIFT);
4316 } else {
4317 flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4318 MPI2_SGE_FLAGS_SHIFT);
4319 }
4320 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4321 dmap++;
4322 sge++;
4323 }
4324 }
4325
4326 static void
4327 mptsas_sge_chain(mptsas_t *mpt, mptsas_cmd_t *cmd,
4328 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4329 {
4330 pMpi2SGESimple64_t sge;
4331 pMpi2SGEChain64_t sgechain;
4332 uint64_t nframe_phys_addr;
4333 uint_t cookiec;
4334 mptti_t *dmap;
4335 uint32_t flags;
4336
4337 /*
4338 * Save the number of entries in the DMA
4339 * Scatter/Gather list
4340 */
4341 cookiec = cmd->cmd_cookiec;
4342
4343 /*
4344 * Hereby we start to deal with multiple frames.
4345 * The process is as follows:
4346 * 1. Determine how many frames are needed for SGL element
4347 * storage; Note that all frames are stored in contiguous
4348 * memory space and in 64-bit DMA mode each element is
4349 * 3 double-words (12 bytes) long.
4350 * 2. Fill up the main frame. We need to do this separately
4351 * since it contains the SCSI IO request header and needs
4352 * dedicated processing. Note that the last 4 double-words
4353 * of the SCSI IO header is for SGL element storage
4354 * (MPI2_SGE_IO_UNION).
4355 * 3. Fill the chain element in the main frame, so the DMA
4356 * engine can use the following frames.
4357 * 4. Enter a loop to fill the remaining frames. Note that the
4358 * last frame contains no chain element. The remaining
4359 * frames go into the mpt SGL buffer allocated on the fly,
4360 * not immediately following the main message frame, as in
4361 * Gen1.
4362 * Some restrictions:
4363 * 1. For 64-bit DMA, the simple element and chain element
4364 * are both of 3 double-words (12 bytes) in size, even
4365 * though all frames are stored in the first 4G of mem
4366 * range and the higher 32-bits of the address are always 0.
4367 * 2. On some controllers (like the 1064/1068), a frame can
4368 * hold SGL elements with the last 1 or 2 double-words
4369 * (4 or 8 bytes) un-used. On these controllers, we should
4370 * recognize that there's not enough room for another SGL
4371 * element and move the sge pointer to the next frame.
4372 */
4373 int i, j, k, l, frames, sgemax;
4374 int temp;
4375 uint8_t chainflags;
4376 uint16_t chainlength;
4377 mptsas_cache_frames_t *p;
4378
4379 /*
4380 * Sgemax is the number of SGE's that will fit
4381 * each extra frame and frames is total
4382 * number of frames we'll need. 1 sge entry per
4383 * frame is reseverd for the chain element thus the -1 below.
4384 */
4385 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64))
4386 - 1);
4387 temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4388
4389 /*
4390 * A little check to see if we need to round up the number
4391 * of frames we need
4392 */
4393 if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4394 sgemax) > 1) {
4395 frames = (temp + 1);
4396 } else {
4397 frames = temp;
4398 }
4399 dmap = cmd->cmd_sg;
4400 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4401
4402 /*
4403 * First fill in the main frame
4404 */
4405 j = MPTSAS_MAX_FRAME_SGES64(mpt) - 1;
4406 mptsas_sge_mainframe(cmd, frame, acc_hdl, j,
4407 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4408 MPI2_SGE_FLAGS_SHIFT));
4409 dmap += j;
4410 sge += j;
4411 j++;
4412
4413 /*
4414 * Fill in the chain element in the main frame.
4415 * About calculation on ChainOffset:
4416 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4417 * in the end reserved for SGL element storage
4418 * (MPI2_SGE_IO_UNION); we should count it in our
4419 * calculation. See its definition in the header file.
4420 * 2. Constant j is the counter of the current SGL element
4421 * that will be processed, and (j - 1) is the number of
4422 * SGL elements that have been processed (stored in the
4423 * main frame).
4424 * 3. ChainOffset value should be in units of double-words (4
4425 * bytes) so the last value should be divided by 4.
4426 */
4427 ddi_put8(acc_hdl, &frame->ChainOffset,
4428 (sizeof (MPI2_SCSI_IO_REQUEST) -
4429 sizeof (MPI2_SGE_IO_UNION) +
4430 (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4431 sgechain = (pMpi2SGEChain64_t)sge;
4432 chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4433 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4434 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4435 ddi_put8(acc_hdl, &sgechain->Flags, chainflags);
4436
4437 /*
4438 * The size of the next frame is the accurate size of space
4439 * (in bytes) used to store the SGL elements. j is the counter
4440 * of SGL elements. (j - 1) is the number of SGL elements that
4441 * have been processed (stored in frames).
4442 */
4443 if (frames >= 2) {
4444 ASSERT(mpt->m_req_frame_size >= sizeof (MPI2_SGE_SIMPLE64));
4445 chainlength = mpt->m_req_frame_size /
4446 sizeof (MPI2_SGE_SIMPLE64) *
4447 sizeof (MPI2_SGE_SIMPLE64);
4448 } else {
4449 chainlength = ((cookiec - (j - 1)) *
4450 sizeof (MPI2_SGE_SIMPLE64));
4451 }
4452
4453 p = cmd->cmd_extra_frames;
4454
4455 ddi_put16(acc_hdl, &sgechain->Length, chainlength);
4456 ddi_put32(acc_hdl, &sgechain->Address.Low, p->m_phys_addr);
4457 ddi_put32(acc_hdl, &sgechain->Address.High, p->m_phys_addr >> 32);
4458
4459 /*
4460 * If there are more than 2 frames left we have to
4461 * fill in the next chain offset to the location of
4462 * the chain element in the next frame.
4463 * sgemax is the number of simple elements in an extra
4464 * frame. Note that the value NextChainOffset should be
4465 * in double-words (4 bytes).
4466 */
4467 if (frames >= 2) {
4468 ddi_put8(acc_hdl, &sgechain->NextChainOffset,
4469 (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4470 } else {
4471 ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0);
4472 }
4473
4474 /*
4475 * Jump to next frame;
4476 * Starting here, chain buffers go into the per command SGL.
4477 * This buffer is allocated when chain buffers are needed.
4478 */
4479 sge = (pMpi2SGESimple64_t)p->m_frames_addr;
4480 i = cookiec;
4481
4482 /*
4483 * Start filling in frames with SGE's. If we
4484 * reach the end of frame and still have SGE's
4485 * to fill we need to add a chain element and
4486 * use another frame. j will be our counter
4487 * for what cookie we are at and i will be
4488 * the total cookiec. k is the current frame
4489 */
4490 for (k = 1; k <= frames; k++) {
4491 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4492
4493 /*
4494 * If we have reached the end of frame
4495 * and we have more SGE's to fill in
4496 * we have to fill the final entry
4497 * with a chain element and then
4498 * continue to the next frame
4499 */
4500 if ((l == (sgemax + 1)) && (k != frames)) {
4501 sgechain = (pMpi2SGEChain64_t)sge;
4502 j--;
4503 chainflags = (
4504 MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4505 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4506 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4507 ddi_put8(p->m_acc_hdl,
4508 &sgechain->Flags, chainflags);
4509 /*
4510 * k is the frame counter and (k + 1)
4511 * is the number of the next frame.
4512 * Note that frames are in contiguous
4513 * memory space.
4514 */
4515 nframe_phys_addr = p->m_phys_addr +
4516 (mpt->m_req_frame_size * k);
4517 ddi_put32(p->m_acc_hdl,
4518 &sgechain->Address.Low,
4519 nframe_phys_addr);
4520 ddi_put32(p->m_acc_hdl,
4521 &sgechain->Address.High,
4522 nframe_phys_addr >> 32);
4523
4524 /*
4525 * If there are more than 2 frames left
4526 * we have to next chain offset to
4527 * the location of the chain element
4528 * in the next frame and fill in the
4529 * length of the next chain
4530 */
4531 if ((frames - k) >= 2) {
4532 ddi_put8(p->m_acc_hdl,
4533 &sgechain->NextChainOffset,
4534 (sgemax *
4535 sizeof (MPI2_SGE_SIMPLE64))
4536 >> 2);
4537 ddi_put16(p->m_acc_hdl,
4538 &sgechain->Length,
4539 mpt->m_req_frame_size /
4540 sizeof (MPI2_SGE_SIMPLE64) *
4541 sizeof (MPI2_SGE_SIMPLE64));
4542 } else {
4543 /*
4544 * This is the last frame. Set
4545 * the NextChainOffset to 0 and
4546 * Length is the total size of
4547 * all remaining simple elements
4548 */
4549 ddi_put8(p->m_acc_hdl,
4550 &sgechain->NextChainOffset,
4551 0);
4552 ddi_put16(p->m_acc_hdl,
4553 &sgechain->Length,
4554 (cookiec - j) *
4555 sizeof (MPI2_SGE_SIMPLE64));
4556 }
4557
4558 /* Jump to the next frame */
4559 sge = (pMpi2SGESimple64_t)
4560 ((char *)p->m_frames_addr +
4561 (int)mpt->m_req_frame_size * k);
4562
4563 continue;
4564 }
4565
4566 ddi_put32(p->m_acc_hdl,
4567 &sge->Address.Low,
4568 dmap->addr.address64.Low);
4569 ddi_put32(p->m_acc_hdl,
4570 &sge->Address.High,
4571 dmap->addr.address64.High);
4572 ddi_put32(p->m_acc_hdl,
4573 &sge->FlagsLength, dmap->count);
4574 flags = ddi_get32(p->m_acc_hdl,
4575 &sge->FlagsLength);
4576 flags |= ((uint32_t)(
4577 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4578 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4579 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4580 MPI2_SGE_FLAGS_SHIFT);
4581
4582 /*
4583 * If we are at the end of the frame and
4584 * there is another frame to fill in
4585 * we set the last simple element as last
4586 * element
4587 */
4588 if ((l == sgemax) && (k != frames)) {
4589 flags |= ((uint32_t)
4590 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4591 MPI2_SGE_FLAGS_SHIFT);
4592 }
4593
4594 /*
4595 * If this is the final cookie we
4596 * indicate it by setting the flags
4597 */
4598 if (j == i) {
4599 flags |= ((uint32_t)
4600 (MPI2_SGE_FLAGS_LAST_ELEMENT |
4601 MPI2_SGE_FLAGS_END_OF_BUFFER |
4602 MPI2_SGE_FLAGS_END_OF_LIST) <<
4603 MPI2_SGE_FLAGS_SHIFT);
4604 }
4605 if (cmd->cmd_flags & CFLAG_DMASEND) {
4606 flags |=
4607 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4608 MPI2_SGE_FLAGS_SHIFT);
4609 } else {
4610 flags |=
4611 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4612 MPI2_SGE_FLAGS_SHIFT);
4613 }
4614 ddi_put32(p->m_acc_hdl,
4615 &sge->FlagsLength, flags);
4616 dmap++;
4617 sge++;
4618 }
4619 }
4620
4621 /*
4622 * Sync DMA with the chain buffers that were just created
4623 */
4624 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4625 }
4626
4627 static void
4628 mptsas_ieee_sge_mainframe(mptsas_cmd_t *cmd, pMpi2SCSIIORequest_t frame,
4629 ddi_acc_handle_t acc_hdl, uint_t cookiec, uint8_t end_flag)
4630 {
4631 pMpi2IeeeSgeSimple64_t ieeesge;
4632 mptti_t *dmap;
4633 uint8_t flags;
4634
4635 dmap = cmd->cmd_sg;
4636
4637 NDBG1(("mptsas_ieee_sge_mainframe: cookiec=%d, %s", cookiec,
4638 cmd->cmd_flags & CFLAG_DMASEND?"Out":"In"));
4639
4640 ieeesge = (pMpi2IeeeSgeSimple64_t)(&frame->SGL);
4641 while (cookiec--) {
4642 ddi_put32(acc_hdl,
4643 &ieeesge->Address.Low, dmap->addr.address64.Low);
4644 ddi_put32(acc_hdl,
4645 &ieeesge->Address.High, dmap->addr.address64.High);
4646 ddi_put32(acc_hdl, &ieeesge->Length,
4647 dmap->count);
4648 NDBG1(("mptsas_ieee_sge_mainframe: len=%d", dmap->count));
4649 flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
4650 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4651
4652 /*
4653 * If this is the last cookie, we set the flags
4654 * to indicate so
4655 */
4656 if (cookiec == 0) {
4657 flags |= end_flag;
4658 }
4659
4660 ddi_put8(acc_hdl, &ieeesge->Flags, flags);
4661 dmap++;
4662 ieeesge++;
4663 }
4664 }
4665
4666 static void
4667 mptsas_ieee_sge_chain(mptsas_t *mpt, mptsas_cmd_t *cmd,
4668 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4669 {
4670 pMpi2IeeeSgeSimple64_t ieeesge;
4671 pMpi25IeeeSgeChain64_t ieeesgechain;
4672 uint64_t nframe_phys_addr;
4673 uint_t cookiec;
4674 mptti_t *dmap;
4675 uint8_t flags;
4676
4677 /*
4678 * Save the number of entries in the DMA
4679 * Scatter/Gather list
4680 */
4681 cookiec = cmd->cmd_cookiec;
4682
4683 NDBG1(("mptsas_ieee_sge_chain: cookiec=%d", cookiec));
4684
4685 /*
4686 * Hereby we start to deal with multiple frames.
4687 * The process is as follows:
4688 * 1. Determine how many frames are needed for SGL element
4689 * storage; Note that all frames are stored in contiguous
4690 * memory space and in 64-bit DMA mode each element is
4691 * 4 double-words (16 bytes) long.
4692 * 2. Fill up the main frame. We need to do this separately
4693 * since it contains the SCSI IO request header and needs
4694 * dedicated processing. Note that the last 4 double-words
4695 * of the SCSI IO header is for SGL element storage
4696 * (MPI2_SGE_IO_UNION).
4697 * 3. Fill the chain element in the main frame, so the DMA
4698 * engine can use the following frames.
4699 * 4. Enter a loop to fill the remaining frames. Note that the
4700 * last frame contains no chain element. The remaining
4701 * frames go into the mpt SGL buffer allocated on the fly,
4702 * not immediately following the main message frame, as in
4703 * Gen1.
4704 * Restrictions:
4705 * For 64-bit DMA, the simple element and chain element
4706 * are both of 4 double-words (16 bytes) in size, even
4707 * though all frames are stored in the first 4G of mem
4708 * range and the higher 32-bits of the address are always 0.
4709 */
4710 int i, j, k, l, frames, sgemax;
4711 int temp;
4712 uint8_t chainflags;
4713 uint32_t chainlength;
4714 mptsas_cache_frames_t *p;
4715
4716 /*
4717 * Sgemax is the number of SGE's that will fit
4718 * each extra frame and frames is total
4719 * number of frames we'll need. 1 sge entry per
4720 * frame is reseverd for the chain element thus the -1 below.
4721 */
4722 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_IEEE_SGE_SIMPLE64))
4723 - 1);
4724 temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4725
4726 /*
4727 * A little check to see if we need to round up the number
4728 * of frames we need
4729 */
4730 if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4731 sgemax) > 1) {
4732 frames = (temp + 1);
4733 } else {
4734 frames = temp;
4735 }
4736 NDBG1(("mptsas_ieee_sge_chain: temp=%d, frames=%d", temp, frames));
4737 dmap = cmd->cmd_sg;
4738 ieeesge = (pMpi2IeeeSgeSimple64_t)(&frame->SGL);
4739
4740 /*
4741 * First fill in the main frame
4742 */
4743 j = MPTSAS_MAX_FRAME_SGES64(mpt) - 1;
4744 mptsas_ieee_sge_mainframe(cmd, frame, acc_hdl, j, 0);
4745 dmap += j;
4746 ieeesge += j;
4747 j++;
4748
4749 /*
4750 * Fill in the chain element in the main frame.
4751 * About calculation on ChainOffset:
4752 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4753 * in the end reserved for SGL element storage
4754 * (MPI2_SGE_IO_UNION); we should count it in our
4755 * calculation. See its definition in the header file.
4756 * 2. Constant j is the counter of the current SGL element
4757 * that will be processed, and (j - 1) is the number of
4758 * SGL elements that have been processed (stored in the
4759 * main frame).
4760 * 3. ChainOffset value should be in units of quad-words (16
4761 * bytes) so the last value should be divided by 16.
4762 */
4763 ddi_put8(acc_hdl, &frame->ChainOffset,
4764 (sizeof (MPI2_SCSI_IO_REQUEST) -
4765 sizeof (MPI2_SGE_IO_UNION) +
4766 (j - 1) * sizeof (MPI2_IEEE_SGE_SIMPLE64)) >> 4);
4767 ieeesgechain = (pMpi25IeeeSgeChain64_t)ieeesge;
4768 chainflags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
4769 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4770 ddi_put8(acc_hdl, &ieeesgechain->Flags, chainflags);
4771
4772 /*
4773 * The size of the next frame is the accurate size of space
4774 * (in bytes) used to store the SGL elements. j is the counter
4775 * of SGL elements. (j - 1) is the number of SGL elements that
4776 * have been processed (stored in frames).
4777 */
4778 if (frames >= 2) {
4779 ASSERT(mpt->m_req_frame_size >=
4780 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4781 chainlength = mpt->m_req_frame_size /
4782 sizeof (MPI2_IEEE_SGE_SIMPLE64) *
4783 sizeof (MPI2_IEEE_SGE_SIMPLE64);
4784 } else {
4785 chainlength = ((cookiec - (j - 1)) *
4786 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4787 }
4788
4789 p = cmd->cmd_extra_frames;
4790
4791 ddi_put32(acc_hdl, &ieeesgechain->Length, chainlength);
4792 ddi_put32(acc_hdl, &ieeesgechain->Address.Low, p->m_phys_addr);
4793 ddi_put32(acc_hdl, &ieeesgechain->Address.High, p->m_phys_addr >> 32);
4794
4795 /*
4796 * If there are more than 2 frames left we have to
4797 * fill in the next chain offset to the location of
4798 * the chain element in the next frame.
4799 * sgemax is the number of simple elements in an extra
4800 * frame. Note that the value NextChainOffset should be
4801 * in double-words (4 bytes).
4802 */
4803 if (frames >= 2) {
4804 ddi_put8(acc_hdl, &ieeesgechain->NextChainOffset,
4805 (sgemax * sizeof (MPI2_IEEE_SGE_SIMPLE64)) >> 4);
4806 } else {
4807 ddi_put8(acc_hdl, &ieeesgechain->NextChainOffset, 0);
4808 }
4809
4810 /*
4811 * Jump to next frame;
4812 * Starting here, chain buffers go into the per command SGL.
4813 * This buffer is allocated when chain buffers are needed.
4814 */
4815 ieeesge = (pMpi2IeeeSgeSimple64_t)p->m_frames_addr;
4816 i = cookiec;
4817
4818 /*
4819 * Start filling in frames with SGE's. If we
4820 * reach the end of frame and still have SGE's
4821 * to fill we need to add a chain element and
4822 * use another frame. j will be our counter
4823 * for what cookie we are at and i will be
4824 * the total cookiec. k is the current frame
4825 */
4826 for (k = 1; k <= frames; k++) {
4827 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4828
4829 /*
4830 * If we have reached the end of frame
4831 * and we have more SGE's to fill in
4832 * we have to fill the final entry
4833 * with a chain element and then
4834 * continue to the next frame
4835 */
4836 if ((l == (sgemax + 1)) && (k != frames)) {
4837 ieeesgechain = (pMpi25IeeeSgeChain64_t)ieeesge;
4838 j--;
4839 chainflags =
4840 MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
4841 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
4842 ddi_put8(p->m_acc_hdl,
4843 &ieeesgechain->Flags, chainflags);
4844 /*
4845 * k is the frame counter and (k + 1)
4846 * is the number of the next frame.
4847 * Note that frames are in contiguous
4848 * memory space.
4849 */
4850 nframe_phys_addr = p->m_phys_addr +
4851 (mpt->m_req_frame_size * k);
4852 ddi_put32(p->m_acc_hdl,
4853 &ieeesgechain->Address.Low,
4854 nframe_phys_addr);
4855 ddi_put32(p->m_acc_hdl,
4856 &ieeesgechain->Address.High,
4857 nframe_phys_addr >> 32);
4858
4859 /*
4860 * If there are more than 2 frames left
4861 * we have to next chain offset to
4862 * the location of the chain element
4863 * in the next frame and fill in the
4864 * length of the next chain
4865 */
4866 if ((frames - k) >= 2) {
4867 ddi_put8(p->m_acc_hdl,
4868 &ieeesgechain->NextChainOffset,
4869 (sgemax *
4870 sizeof (MPI2_IEEE_SGE_SIMPLE64))
4871 >> 4);
4872 ASSERT(mpt->m_req_frame_size >=
4873 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4874 ddi_put32(p->m_acc_hdl,
4875 &ieeesgechain->Length,
4876 mpt->m_req_frame_size /
4877 sizeof (MPI2_IEEE_SGE_SIMPLE64) *
4878 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4879 } else {
4880 /*
4881 * This is the last frame. Set
4882 * the NextChainOffset to 0 and
4883 * Length is the total size of
4884 * all remaining simple elements
4885 */
4886 ddi_put8(p->m_acc_hdl,
4887 &ieeesgechain->NextChainOffset,
4888 0);
4889 ddi_put32(p->m_acc_hdl,
4890 &ieeesgechain->Length,
4891 (cookiec - j) *
4892 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4893 }
4894
4895 /* Jump to the next frame */
4896 ieeesge = (pMpi2IeeeSgeSimple64_t)
4897 ((char *)p->m_frames_addr +
4898 (int)mpt->m_req_frame_size * k);
4899
4900 continue;
4901 }
4902
4903 ddi_put32(p->m_acc_hdl,
4904 &ieeesge->Address.Low,
4905 dmap->addr.address64.Low);
4906 ddi_put32(p->m_acc_hdl,
4907 &ieeesge->Address.High,
4908 dmap->addr.address64.High);
4909 ddi_put32(p->m_acc_hdl,
4910 &ieeesge->Length, dmap->count);
4911 flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
4912 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4913
4914 /*
4915 * If we are at the end of the frame and
4916 * there is another frame to fill in
4917 * do we need to do anything?
4918 * if ((l == sgemax) && (k != frames)) {
4919 * }
4920 */
4921
4922 /*
4923 * If this is the final cookie set end of list.
4924 */
4925 if (j == i) {
4926 flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
4927 }
4928
4929 ddi_put8(p->m_acc_hdl, &ieeesge->Flags, flags);
4930 dmap++;
4931 ieeesge++;
4932 }
4933 }
4934
4935 /*
4936 * Sync DMA with the chain buffers that were just created
4937 */
4938 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4939 }
4940
4941 static void
4942 mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control,
4943 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4944 {
4945 ASSERT(cmd->cmd_flags & CFLAG_DMAVALID);
4946
4947 NDBG1(("mptsas_sge_setup: cookiec=%d", cmd->cmd_cookiec));
4948
4949 /*
4950 * Set read/write bit in control.
4951 */
4952 if (cmd->cmd_flags & CFLAG_DMASEND) {
4953 *control |= MPI2_SCSIIO_CONTROL_WRITE;
4954 } else {
4955 *control |= MPI2_SCSIIO_CONTROL_READ;
4956 }
4957
4958 ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount);
4959
4960 /*
4961 * We have 4 cases here. First where we can fit all the
4962 * SG elements into the main frame, and the case
4963 * where we can't. The SG element is also different when using
4964 * MPI2.5 interface.
4965 * If we have more cookies than we can attach to a frame
4966 * we will need to use a chain element to point
4967 * a location of memory where the rest of the S/G
4968 * elements reside.
4969 */
4970 if (cmd->cmd_cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) {
4971 if (mpt->m_MPI25) {
4972 mptsas_ieee_sge_mainframe(cmd, frame, acc_hdl,
4973 cmd->cmd_cookiec,
4974 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
4975 } else {
4976 mptsas_sge_mainframe(cmd, frame, acc_hdl,
4977 cmd->cmd_cookiec,
4978 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
4979 | MPI2_SGE_FLAGS_END_OF_BUFFER
4980 | MPI2_SGE_FLAGS_END_OF_LIST) <<
4981 MPI2_SGE_FLAGS_SHIFT));
4982 }
4983 } else {
4984 if (mpt->m_MPI25) {
4985 mptsas_ieee_sge_chain(mpt, cmd, frame, acc_hdl);
4986 } else {
4987 mptsas_sge_chain(mpt, cmd, frame, acc_hdl);
4988 }
4989 }
4990 }
4991
4992 /*
4993 * Interrupt handling
4994 * Utility routine. Poll for status of a command sent to HBA
4995 * without interrupts (a FLAG_NOINTR command).
4996 */
4997 int
4998 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
4999 {
5000 int rval = TRUE;
5001
5002 NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
5003
5004 if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
5005 mptsas_restart_hba(mpt);
5006 }
5007
5008 /*
5009 * Wait, using drv_usecwait(), long enough for the command to
5010 * reasonably return from the target if the target isn't
5011 * "dead". A polled command may well be sent from scsi_poll, and
5012 * there are retries built in to scsi_poll if the transport
5013 * accepted the packet (TRAN_ACCEPT). scsi_poll waits 1 second
5014 * and retries the transport up to scsi_poll_busycnt times
5015 * (currently 60) if
5016 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
5017 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
5018 *
5019 * limit the waiting to avoid a hang in the event that the
5020 * cmd never gets started but we are still receiving interrupts
5021 */
5022 while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
5023 if (mptsas_wait_intr(mpt, polltime) == FALSE) {
5024 NDBG5(("mptsas_poll: command incomplete"));
5025 rval = FALSE;
5026 break;
5027 }
5028 }
5029
5030 if (rval == FALSE) {
5031
5032 /*
5033 * this isn't supposed to happen, the hba must be wedged
5034 * Mark this cmd as a timeout.
5035 */
5036 mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
5037 (STAT_TIMEOUT|STAT_ABORTED));
5038
5039 if (poll_cmd->cmd_queued == FALSE) {
5040
5041 NDBG5(("mptsas_poll: not on waitq"));
5042
5043 poll_cmd->cmd_pkt->pkt_state |=
5044 (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
5045 } else {
5046
5047 /* find and remove it from the waitq */
5048 NDBG5(("mptsas_poll: delete from waitq"));
5049 mptsas_waitq_delete(mpt, poll_cmd);
5050 }
5051
5052 }
5053 mptsas_fma_check(mpt, poll_cmd);
5054 NDBG5(("mptsas_poll: done"));
5055 return (rval);
5056 }
5057
5058 /*
5059 * Used for polling cmds and TM function
5060 */
5061 static int
5062 mptsas_wait_intr(mptsas_t *mpt, int polltime)
5063 {
5064 int cnt;
5065 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
5066 uint32_t int_mask;
5067
5068 NDBG5(("mptsas_wait_intr"));
5069
5070 mpt->m_polled_intr = 1;
5071
5072 /*
5073 * Get the current interrupt mask and disable interrupts. When
5074 * re-enabling ints, set mask to saved value.
5075 */
5076 int_mask = ddi_get32(mpt->m_datap, &mpt->m_reg->HostInterruptMask);
5077 MPTSAS_DISABLE_INTR(mpt);
5078
5079 /*
5080 * Keep polling for at least (polltime * 1000) seconds
5081 */
5082 for (cnt = 0; cnt < polltime; cnt++) {
5083 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5084 DDI_DMA_SYNC_FORCPU);
5085
5086 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5087 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5088
5089 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5090 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5091 ddi_get32(mpt->m_acc_post_queue_hdl,
5092 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5093 drv_usecwait(1000);
5094 continue;
5095 }
5096
5097 /*
5098 * The reply is valid, process it according to its
5099 * type.
5100 */
5101 mptsas_process_intr(mpt, reply_desc_union);
5102
5103 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5104 mpt->m_post_index = 0;
5105 }
5106
5107 /*
5108 * Update the global reply index
5109 */
5110 ddi_put32(mpt->m_datap,
5111 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5112 mpt->m_polled_intr = 0;
5113
5114 /*
5115 * Re-enable interrupts and quit.
5116 */
5117 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask,
5118 int_mask);
5119 return (TRUE);
5120
5121 }
5122
5123 /*
5124 * Clear polling flag, re-enable interrupts and quit.
5125 */
5126 mpt->m_polled_intr = 0;
5127 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask);
5128 return (FALSE);
5129 }
5130
5131 static void
5132 mptsas_handle_scsi_io_success(mptsas_t *mpt,
5133 pMpi2ReplyDescriptorsUnion_t reply_desc)
5134 {
5135 pMpi2SCSIIOSuccessReplyDescriptor_t scsi_io_success;
5136 uint16_t SMID;
5137 mptsas_slots_t *slots = mpt->m_active;
5138 mptsas_cmd_t *cmd = NULL;
5139 struct scsi_pkt *pkt;
5140
5141 ASSERT(mutex_owned(&mpt->m_mutex));
5142
5143 scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
5144 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &scsi_io_success->SMID);
5145
5146 /*
5147 * This is a success reply so just complete the IO. First, do a sanity
5148 * check on the SMID. The final slot is used for TM requests, which
5149 * would not come into this reply handler.
5150 */
5151 if ((SMID == 0) || (SMID > slots->m_n_normal)) {
5152 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
5153 SMID);
5154 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5155 return;
5156 }
5157
5158 cmd = slots->m_slot[SMID];
5159
5160 /*
5161 * print warning and return if the slot is empty
5162 */
5163 if (cmd == NULL) {
5164 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
5165 "in slot %d", SMID);
5166 return;
5167 }
5168
5169 pkt = CMD2PKT(cmd);
5170 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
5171 STATE_GOT_STATUS);
5172 if (cmd->cmd_flags & CFLAG_DMAVALID) {
5173 pkt->pkt_state |= STATE_XFERRED_DATA;
5174 }
5175 pkt->pkt_resid = 0;
5176
5177 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
5178 cmd->cmd_flags |= CFLAG_FINISHED;
5179 cv_broadcast(&mpt->m_passthru_cv);
5180 return;
5181 } else {
5182 mptsas_remove_cmd(mpt, cmd);
5183 }
5184
5185 if (cmd->cmd_flags & CFLAG_RETRY) {
5186 /*
5187 * The target returned QFULL or busy, do not add tihs
5188 * pkt to the doneq since the hba will retry
5189 * this cmd.
5190 *
5191 * The pkt has already been resubmitted in
5192 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5193 * Remove this cmd_flag here.
5194 */
5195 cmd->cmd_flags &= ~CFLAG_RETRY;
5196 } else {
5197 mptsas_doneq_add(mpt, cmd);
5198 }
5199 }
5200
5201 static void
5202 mptsas_handle_address_reply(mptsas_t *mpt,
5203 pMpi2ReplyDescriptorsUnion_t reply_desc)
5204 {
5205 pMpi2AddressReplyDescriptor_t address_reply;
5206 pMPI2DefaultReply_t reply;
5207 mptsas_fw_diagnostic_buffer_t *pBuffer;
5208 uint32_t reply_addr, reply_frame_dma_baseaddr;
5209 uint16_t SMID, iocstatus;
5210 mptsas_slots_t *slots = mpt->m_active;
5211 mptsas_cmd_t *cmd = NULL;
5212 uint8_t function, buffer_type;
5213 m_replyh_arg_t *args;
5214 int reply_frame_no;
5215
5216 ASSERT(mutex_owned(&mpt->m_mutex));
5217
5218 address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
5219 reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
5220 &address_reply->ReplyFrameAddress);
5221 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID);
5222
5223 /*
5224 * If reply frame is not in the proper range we should ignore this
5225 * message and exit the interrupt handler.
5226 */
5227 reply_frame_dma_baseaddr = mpt->m_reply_frame_dma_addr & 0xffffffffu;
5228 if ((reply_addr < reply_frame_dma_baseaddr) ||
5229 (reply_addr >= (reply_frame_dma_baseaddr +
5230 (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
5231 ((reply_addr - reply_frame_dma_baseaddr) %
5232 mpt->m_reply_frame_size != 0)) {
5233 mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
5234 "address 0x%x\n", reply_addr);
5235 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5236 return;
5237 }
5238
5239 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
5240 DDI_DMA_SYNC_FORCPU);
5241 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
5242 reply_frame_dma_baseaddr));
5243 function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
5244
5245 NDBG31(("mptsas_handle_address_reply: function 0x%x, reply_addr=0x%x",
5246 function, reply_addr));
5247
5248 /*
5249 * don't get slot information and command for events since these values
5250 * don't exist
5251 */
5252 if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) &&
5253 (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) {
5254 /*
5255 * This could be a TM reply, which use the last allocated SMID,
5256 * so allow for that.
5257 */
5258 if ((SMID == 0) || (SMID > (slots->m_n_normal + 1))) {
5259 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of "
5260 "%d\n", SMID);
5261 ddi_fm_service_impact(mpt->m_dip,
5262 DDI_SERVICE_UNAFFECTED);
5263 return;
5264 }
5265
5266 cmd = slots->m_slot[SMID];
5267
5268 /*
5269 * print warning and return if the slot is empty
5270 */
5271 if (cmd == NULL) {
5272 mptsas_log(mpt, CE_WARN, "?NULL command for address "
5273 "reply in slot %d", SMID);
5274 return;
5275 }
5276 if ((cmd->cmd_flags &
5277 (CFLAG_PASSTHRU | CFLAG_CONFIG | CFLAG_FW_DIAG))) {
5278 cmd->cmd_rfm = reply_addr;
5279 cmd->cmd_flags |= CFLAG_FINISHED;
5280 cv_broadcast(&mpt->m_passthru_cv);
5281 cv_broadcast(&mpt->m_config_cv);
5282 cv_broadcast(&mpt->m_fw_diag_cv);
5283 return;
5284 } else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
5285 mptsas_remove_cmd(mpt, cmd);
5286 }
5287 NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
5288 }
5289 /*
5290 * Depending on the function, we need to handle
5291 * the reply frame (and cmd) differently.
5292 */
5293 switch (function) {
5294 case MPI2_FUNCTION_SCSI_IO_REQUEST:
5295 mptsas_check_scsi_io_error(mpt, (pMpi2SCSIIOReply_t)reply, cmd);
5296 break;
5297 case MPI2_FUNCTION_SCSI_TASK_MGMT:
5298 cmd->cmd_rfm = reply_addr;
5299 mptsas_check_task_mgt(mpt, (pMpi2SCSIManagementReply_t)reply,
5300 cmd);
5301 break;
5302 case MPI2_FUNCTION_FW_DOWNLOAD:
5303 cmd->cmd_flags |= CFLAG_FINISHED;
5304 cv_signal(&mpt->m_fw_cv);
5305 break;
5306 case MPI2_FUNCTION_EVENT_NOTIFICATION:
5307 reply_frame_no = (reply_addr - reply_frame_dma_baseaddr) /
5308 mpt->m_reply_frame_size;
5309 args = &mpt->m_replyh_args[reply_frame_no];
5310 args->mpt = (void *)mpt;
5311 args->rfm = reply_addr;
5312
5313 /*
5314 * Record the event if its type is enabled in
5315 * this mpt instance by ioctl.
5316 */
5317 mptsas_record_event(args);
5318
5319 /*
5320 * Handle time critical events
5321 * NOT_RESPONDING/ADDED only now
5322 */
5323 if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
5324 /*
5325 * Would not return main process,
5326 * just let taskq resolve ack action
5327 * and ack would be sent in taskq thread
5328 */
5329 NDBG20(("send mptsas_handle_event_sync success"));
5330 }
5331
5332 if (mpt->m_in_reset) {
5333 NDBG20(("dropping event received during reset"));
5334 return;
5335 }
5336
5337 if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
5338 (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
5339 mptsas_log(mpt, CE_WARN, "No memory available"
5340 "for dispatch taskq");
5341 /*
5342 * Return the reply frame to the free queue.
5343 */
5344 ddi_put32(mpt->m_acc_free_queue_hdl,
5345 &((uint32_t *)(void *)
5346 mpt->m_free_queue)[mpt->m_free_index], reply_addr);
5347 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5348 DDI_DMA_SYNC_FORDEV);
5349 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5350 mpt->m_free_index = 0;
5351 }
5352
5353 ddi_put32(mpt->m_datap,
5354 &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
5355 }
5356 return;
5357 case MPI2_FUNCTION_DIAG_BUFFER_POST:
5358 /*
5359 * If SMID is 0, this implies that the reply is due to a
5360 * release function with a status that the buffer has been
5361 * released. Set the buffer flags accordingly.
5362 */
5363 if (SMID == 0) {
5364 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
5365 &reply->IOCStatus);
5366 buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl,
5367 &(((pMpi2DiagBufferPostReply_t)reply)->BufferType));
5368 if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) {
5369 pBuffer =
5370 &mpt->m_fw_diag_buffer_list[buffer_type];
5371 pBuffer->valid_data = TRUE;
5372 pBuffer->owned_by_firmware = FALSE;
5373 pBuffer->immediate = FALSE;
5374 }
5375 } else {
5376 /*
5377 * Normal handling of diag post reply with SMID.
5378 */
5379 cmd = slots->m_slot[SMID];
5380
5381 /*
5382 * print warning and return if the slot is empty
5383 */
5384 if (cmd == NULL) {
5385 mptsas_log(mpt, CE_WARN, "?NULL command for "
5386 "address reply in slot %d", SMID);
5387 return;
5388 }
5389 cmd->cmd_rfm = reply_addr;
5390 cmd->cmd_flags |= CFLAG_FINISHED;
5391 cv_broadcast(&mpt->m_fw_diag_cv);
5392 }
5393 return;
5394 default:
5395 mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
5396 break;
5397 }
5398
5399 /*
5400 * Return the reply frame to the free queue.
5401 */
5402 ddi_put32(mpt->m_acc_free_queue_hdl,
5403 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
5404 reply_addr);
5405 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5406 DDI_DMA_SYNC_FORDEV);
5407 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5408 mpt->m_free_index = 0;
5409 }
5410 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
5411 mpt->m_free_index);
5412
5413 if (cmd->cmd_flags & CFLAG_FW_CMD)
5414 return;
5415
5416 if (cmd->cmd_flags & CFLAG_RETRY) {
5417 /*
5418 * The target returned QFULL or busy, do not add this
5419 * pkt to the doneq since the hba will retry
5420 * this cmd.
5421 *
5422 * The pkt has already been resubmitted in
5423 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5424 * Remove this cmd_flag here.
5425 */
5426 cmd->cmd_flags &= ~CFLAG_RETRY;
5427 } else {
5428 mptsas_doneq_add(mpt, cmd);
5429 }
5430 }
5431
5432 #ifdef MPTSAS_DEBUG
5433 static uint8_t mptsas_last_sense[256];
5434 #endif
5435
5436 static void
5437 mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
5438 mptsas_cmd_t *cmd)
5439 {
5440 uint8_t scsi_status, scsi_state;
5441 uint16_t ioc_status, cmd_rqs_len;
5442 uint32_t xferred, sensecount, responsedata, loginfo = 0;
5443 struct scsi_pkt *pkt;
5444 struct scsi_arq_status *arqstat;
5445 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5446 uint8_t *sensedata = NULL;
5447 uint64_t sas_wwn;
5448 uint8_t phy;
5449 char wwn_str[MPTSAS_WWN_STRLEN];
5450
5451 scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
5452 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5453 scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
5454 xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
5455 sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
5456 responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl,
5457 &reply->ResponseInfo);
5458
5459 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
5460 sas_wwn = ptgt->m_addr.mta_wwn;
5461 phy = ptgt->m_phynum;
5462 if (sas_wwn == 0) {
5463 (void) sprintf(wwn_str, "p%x", phy);
5464 } else {
5465 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
5466 }
5467 loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
5468 &reply->IOCLogInfo);
5469 mptsas_log(mpt, CE_NOTE,
5470 "?Log info 0x%x received for target %d %s.\n"
5471 "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5472 loginfo, Tgt(cmd), wwn_str, scsi_status, ioc_status,
5473 scsi_state);
5474 }
5475
5476 NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5477 scsi_status, ioc_status, scsi_state));
5478
5479 pkt = CMD2PKT(cmd);
5480 *(pkt->pkt_scbp) = scsi_status;
5481
5482 if (loginfo == 0x31170000) {
5483 /*
5484 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
5485 * 0x31170000 comes, that means the device missing delay
5486 * is in progressing, the command need retry later.
5487 */
5488 *(pkt->pkt_scbp) = STATUS_BUSY;
5489 return;
5490 }
5491
5492 if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
5493 ((ioc_status & MPI2_IOCSTATUS_MASK) ==
5494 MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
5495 pkt->pkt_reason = CMD_INCOMPLETE;
5496 pkt->pkt_state |= STATE_GOT_BUS;
5497 if (ptgt->m_reset_delay == 0) {
5498 mptsas_set_throttle(mpt, ptgt,
5499 DRAIN_THROTTLE);
5500 }
5501 return;
5502 }
5503
5504 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5505 responsedata &= 0x000000FF;
5506 if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
5507 mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n");
5508 pkt->pkt_reason = CMD_TLR_OFF;
5509 return;
5510 }
5511 }
5512
5513
5514 switch (scsi_status) {
5515 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5516 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5517 arqstat = (void*)(pkt->pkt_scbp);
5518 arqstat->sts_rqpkt_status = *((struct scsi_status *)
5519 (pkt->pkt_scbp));
5520 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
5521 STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
5522 if (cmd->cmd_flags & CFLAG_XARQ) {
5523 pkt->pkt_state |= STATE_XARQ_DONE;
5524 }
5525 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5526 pkt->pkt_state |= STATE_XFERRED_DATA;
5527 }
5528 arqstat->sts_rqpkt_reason = pkt->pkt_reason;
5529 arqstat->sts_rqpkt_state = pkt->pkt_state;
5530 arqstat->sts_rqpkt_state |= STATE_XFERRED_DATA;
5531 arqstat->sts_rqpkt_statistics = pkt->pkt_statistics;
5532 sensedata = (uint8_t *)&arqstat->sts_sensedata;
5533 cmd_rqs_len = cmd->cmd_extrqslen ?
5534 cmd->cmd_extrqslen : cmd->cmd_rqslen;
5535 (void) ddi_dma_sync(mpt->m_dma_req_sense_hdl, 0, 0,
5536 DDI_DMA_SYNC_FORKERNEL);
5537 #ifdef MPTSAS_DEBUG
5538 bcopy(cmd->cmd_arq_buf, mptsas_last_sense,
5539 ((cmd_rqs_len >= sizeof (mptsas_last_sense)) ?
5540 sizeof (mptsas_last_sense):cmd_rqs_len));
5541 #endif
5542 bcopy((uchar_t *)cmd->cmd_arq_buf, sensedata,
5543 ((cmd_rqs_len >= sensecount) ? sensecount :
5544 cmd_rqs_len));
5545 arqstat->sts_rqpkt_resid = (cmd_rqs_len - sensecount);
5546 cmd->cmd_flags |= CFLAG_CMDARQ;
5547 /*
5548 * Set proper status for pkt if autosense was valid
5549 */
5550 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5551 struct scsi_status zero_status = { 0 };
5552 arqstat->sts_rqpkt_status = zero_status;
5553 }
5554
5555 /*
5556 * ASC=0x47 is parity error
5557 * ASC=0x48 is initiator detected error received
5558 */
5559 if ((scsi_sense_key(sensedata) == KEY_ABORTED_COMMAND) &&
5560 ((scsi_sense_asc(sensedata) == 0x47) ||
5561 (scsi_sense_asc(sensedata) == 0x48))) {
5562 mptsas_log(mpt, CE_NOTE, "Aborted_command!");
5563 }
5564
5565 /*
5566 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5567 * ASC/ASCQ=0x25/0x00 means invalid lun
5568 */
5569 if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
5570 (scsi_sense_asc(sensedata) == 0x3F) &&
5571 (scsi_sense_ascq(sensedata) == 0x0E)) ||
5572 ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
5573 (scsi_sense_asc(sensedata) == 0x25) &&
5574 (scsi_sense_ascq(sensedata) == 0x00))) {
5575 mptsas_topo_change_list_t *topo_node = NULL;
5576
5577 topo_node = kmem_zalloc(
5578 sizeof (mptsas_topo_change_list_t),
5579 KM_NOSLEEP);
5580 if (topo_node == NULL) {
5581 mptsas_log(mpt, CE_NOTE, "No memory"
5582 "resource for handle SAS dynamic"
5583 "reconfigure.\n");
5584 break;
5585 }
5586 topo_node->mpt = mpt;
5587 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
5588 topo_node->un.phymask = ptgt->m_addr.mta_phymask;
5589 topo_node->devhdl = ptgt->m_devhdl;
5590 topo_node->object = (void *)ptgt;
5591 topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5592
5593 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5594 mptsas_handle_dr,
5595 (void *)topo_node,
5596 DDI_NOSLEEP)) != DDI_SUCCESS) {
5597 kmem_free(topo_node,
5598 sizeof (mptsas_topo_change_list_t));
5599 mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5600 "for handle SAS dynamic reconfigure"
5601 "failed. \n");
5602 }
5603 }
5604 break;
5605 case MPI2_SCSI_STATUS_GOOD:
5606 switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5607 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5608 pkt->pkt_reason = CMD_DEV_GONE;
5609 pkt->pkt_state |= STATE_GOT_BUS;
5610 if (ptgt->m_reset_delay == 0) {
5611 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5612 }
5613 NDBG31(("lost disk for target%d, command:%x",
5614 Tgt(cmd), pkt->pkt_cdbp[0]));
5615 break;
5616 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5617 NDBG31(("data overrun: xferred=%d", xferred));
5618 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5619 pkt->pkt_reason = CMD_DATA_OVR;
5620 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5621 | STATE_SENT_CMD | STATE_GOT_STATUS
5622 | STATE_XFERRED_DATA);
5623 pkt->pkt_resid = 0;
5624 break;
5625 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5626 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5627 NDBG31(("data underrun: xferred=%d", xferred));
5628 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5629 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5630 | STATE_SENT_CMD | STATE_GOT_STATUS);
5631 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5632 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5633 pkt->pkt_state |= STATE_XFERRED_DATA;
5634 }
5635 break;
5636 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5637 if (cmd->cmd_active_expiration <= gethrtime()) {
5638 /*
5639 * When timeout requested, propagate
5640 * proper reason and statistics to
5641 * target drivers.
5642 */
5643 mptsas_set_pkt_reason(mpt, cmd, CMD_TIMEOUT,
5644 STAT_BUS_RESET | STAT_TIMEOUT);
5645 } else {
5646 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
5647 STAT_BUS_RESET);
5648 }
5649 break;
5650 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5651 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5652 mptsas_set_pkt_reason(mpt,
5653 cmd, CMD_RESET, STAT_DEV_RESET);
5654 break;
5655 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5656 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5657 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5658 mptsas_set_pkt_reason(mpt,
5659 cmd, CMD_TERMINATED, STAT_TERMINATED);
5660 break;
5661 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5662 case MPI2_IOCSTATUS_BUSY:
5663 /*
5664 * set throttles to drain
5665 */
5666 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
5667 ptgt = refhash_next(mpt->m_targets, ptgt)) {
5668 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5669 }
5670
5671 /*
5672 * retry command
5673 */
5674 cmd->cmd_flags |= CFLAG_RETRY;
5675 cmd->cmd_pkt_flags |= FLAG_HEAD;
5676
5677 (void) mptsas_accept_pkt(mpt, cmd);
5678 break;
5679 default:
5680 mptsas_log(mpt, CE_WARN,
5681 "unknown ioc_status = %x\n", ioc_status);
5682 mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5683 "count = %x, scsi_status = %x", scsi_state,
5684 xferred, scsi_status);
5685 break;
5686 }
5687 break;
5688 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5689 mptsas_handle_qfull(mpt, cmd);
5690 break;
5691 case MPI2_SCSI_STATUS_BUSY:
5692 NDBG31(("scsi_status busy received"));
5693 break;
5694 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5695 NDBG31(("scsi_status reservation conflict received"));
5696 break;
5697 default:
5698 mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x\n",
5699 scsi_status, ioc_status);
5700 mptsas_log(mpt, CE_WARN,
5701 "mptsas_process_intr: invalid scsi status\n");
5702 break;
5703 }
5704 }
5705
5706 static void
5707 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5708 mptsas_cmd_t *cmd)
5709 {
5710 uint8_t task_type;
5711 uint16_t ioc_status;
5712 uint32_t log_info;
5713 uint16_t dev_handle;
5714 struct scsi_pkt *pkt = CMD2PKT(cmd);
5715
5716 task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5717 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5718 log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5719 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5720
5721 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5722 mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5723 "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
5724 task_type, ioc_status, log_info, dev_handle);
5725 pkt->pkt_reason = CMD_INCOMPLETE;
5726 return;
5727 }
5728
5729 switch (task_type) {
5730 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5731 case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5732 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5733 case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5734 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5735 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5736 break;
5737 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5738 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5739 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5740 /*
5741 * Check for invalid DevHandle of 0 in case application
5742 * sends bad command. DevHandle of 0 could cause problems.
5743 */
5744 if (dev_handle == 0) {
5745 mptsas_log(mpt, CE_WARN, "!Can't flush target with"
5746 " DevHandle of 0.");
5747 } else {
5748 mptsas_flush_target(mpt, dev_handle, Lun(cmd),
5749 task_type);
5750 }
5751 break;
5752 default:
5753 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5754 task_type);
5755 mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5756 break;
5757 }
5758 }
5759
5760 static void
5761 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5762 {
5763 mptsas_t *mpt = arg->mpt;
5764 uint64_t t = arg->t;
5765 mptsas_cmd_t *cmd;
5766 struct scsi_pkt *pkt;
5767 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
5768
5769 mutex_enter(&item->mutex);
5770 while (item->flag & MPTSAS_DONEQ_THREAD_ACTIVE) {
5771 if (!item->doneq) {
5772 cv_wait(&item->cv, &item->mutex);
5773 }
5774 pkt = NULL;
5775 if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5776 cmd->cmd_flags |= CFLAG_COMPLETED;
5777 pkt = CMD2PKT(cmd);
5778 }
5779 mutex_exit(&item->mutex);
5780 if (pkt) {
5781 mptsas_pkt_comp(pkt, cmd);
5782 }
5783 mutex_enter(&item->mutex);
5784 }
5785 mutex_exit(&item->mutex);
5786 mutex_enter(&mpt->m_doneq_mutex);
5787 mpt->m_doneq_thread_n--;
5788 cv_broadcast(&mpt->m_doneq_thread_cv);
5789 mutex_exit(&mpt->m_doneq_mutex);
5790 }
5791
5792
5793 /*
5794 * mpt interrupt handler.
5795 */
5796 static uint_t
5797 mptsas_intr(caddr_t arg1, caddr_t arg2)
5798 {
5799 mptsas_t *mpt = (void *)arg1;
5800 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
5801 uchar_t did_reply = FALSE;
5802
5803 NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5804
5805 mutex_enter(&mpt->m_mutex);
5806
5807 /*
5808 * If interrupts are shared by two channels then check whether this
5809 * interrupt is genuinely for this channel by making sure first the
5810 * chip is in high power state.
5811 */
5812 if ((mpt->m_options & MPTSAS_OPT_PM) &&
5813 (mpt->m_power_level != PM_LEVEL_D0)) {
5814 mutex_exit(&mpt->m_mutex);
5815 return (DDI_INTR_UNCLAIMED);
5816 }
5817
5818 /*
5819 * If polling, interrupt was triggered by some shared interrupt because
5820 * IOC interrupts are disabled during polling, so polling routine will
5821 * handle any replies. Considering this, if polling is happening,
5822 * return with interrupt unclaimed.
5823 */
5824 if (mpt->m_polled_intr) {
5825 mutex_exit(&mpt->m_mutex);
5826 mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt");
5827 return (DDI_INTR_UNCLAIMED);
5828 }
5829
5830 /*
5831 * Read the istat register.
5832 */
5833 if ((INTPENDING(mpt)) != 0) {
5834 /*
5835 * read fifo until empty.
5836 */
5837 #ifndef __lock_lint
5838 _NOTE(CONSTCOND)
5839 #endif
5840 while (TRUE) {
5841 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5842 DDI_DMA_SYNC_FORCPU);
5843 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5844 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5845
5846 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5847 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5848 ddi_get32(mpt->m_acc_post_queue_hdl,
5849 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5850 break;
5851 }
5852
5853 /*
5854 * The reply is valid, process it according to its
5855 * type. Also, set a flag for updating the reply index
5856 * after they've all been processed.
5857 */
5858 did_reply = TRUE;
5859
5860 mptsas_process_intr(mpt, reply_desc_union);
5861
5862 /*
5863 * Increment post index and roll over if needed.
5864 */
5865 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5866 mpt->m_post_index = 0;
5867 }
5868 }
5869
5870 /*
5871 * Update the global reply index if at least one reply was
5872 * processed.
5873 */
5874 if (did_reply) {
5875 ddi_put32(mpt->m_datap,
5876 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5877 }
5878 } else {
5879 mutex_exit(&mpt->m_mutex);
5880 return (DDI_INTR_UNCLAIMED);
5881 }
5882 NDBG1(("mptsas_intr complete"));
5883
5884 /*
5885 * If no helper threads are created, process the doneq in ISR. If
5886 * helpers are created, use the doneq length as a metric to measure the
5887 * load on the interrupt CPU. If it is long enough, which indicates the
5888 * load is heavy, then we deliver the IO completions to the helpers.
5889 * This measurement has some limitations, although it is simple and
5890 * straightforward and works well for most of the cases at present.
5891 */
5892 if (!mpt->m_doneq_thread_n ||
5893 (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)) {
5894 mptsas_doneq_empty(mpt);
5895 } else {
5896 mptsas_deliver_doneq_thread(mpt);
5897 }
5898
5899 /*
5900 * If there are queued cmd, start them now.
5901 */
5902 if (mpt->m_waitq != NULL) {
5903 mptsas_restart_waitq(mpt);
5904 }
5905
5906 mutex_exit(&mpt->m_mutex);
5907 return (DDI_INTR_CLAIMED);
5908 }
5909
5910 static void
5911 mptsas_process_intr(mptsas_t *mpt,
5912 pMpi2ReplyDescriptorsUnion_t reply_desc_union)
5913 {
5914 uint8_t reply_type;
5915
5916 ASSERT(mutex_owned(&mpt->m_mutex));
5917
5918 /*
5919 * The reply is valid, process it according to its
5920 * type. Also, set a flag for updated the reply index
5921 * after they've all been processed.
5922 */
5923 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
5924 &reply_desc_union->Default.ReplyFlags);
5925 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
5926 if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
5927 reply_type == MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS) {
5928 mptsas_handle_scsi_io_success(mpt, reply_desc_union);
5929 } else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
5930 mptsas_handle_address_reply(mpt, reply_desc_union);
5931 } else {
5932 mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
5933 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5934 }
5935
5936 /*
5937 * Clear the reply descriptor for re-use and increment
5938 * index.
5939 */
5940 ddi_put64(mpt->m_acc_post_queue_hdl,
5941 &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
5942 0xFFFFFFFFFFFFFFFF);
5943 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5944 DDI_DMA_SYNC_FORDEV);
5945 }
5946
5947 /*
5948 * handle qfull condition
5949 */
5950 static void
5951 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
5952 {
5953 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5954
5955 if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
5956 (ptgt->m_qfull_retries == 0)) {
5957 /*
5958 * We have exhausted the retries on QFULL, or,
5959 * the target driver has indicated that it
5960 * wants to handle QFULL itself by setting
5961 * qfull-retries capability to 0. In either case
5962 * we want the target driver's QFULL handling
5963 * to kick in. We do this by having pkt_reason
5964 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
5965 */
5966 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5967 } else {
5968 if (ptgt->m_reset_delay == 0) {
5969 ptgt->m_t_throttle =
5970 max((ptgt->m_t_ncmds - 2), 0);
5971 }
5972
5973 cmd->cmd_pkt_flags |= FLAG_HEAD;
5974 cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
5975 cmd->cmd_flags |= CFLAG_RETRY;
5976
5977 (void) mptsas_accept_pkt(mpt, cmd);
5978
5979 /*
5980 * when target gives queue full status with no commands
5981 * outstanding (m_t_ncmds == 0), throttle is set to 0
5982 * (HOLD_THROTTLE), and the queue full handling start
5983 * (see psarc/1994/313); if there are commands outstanding,
5984 * throttle is set to (m_t_ncmds - 2)
5985 */
5986 if (ptgt->m_t_throttle == HOLD_THROTTLE) {
5987 /*
5988 * By setting throttle to QFULL_THROTTLE, we
5989 * avoid submitting new commands and in
5990 * mptsas_restart_cmd find out slots which need
5991 * their throttles to be cleared.
5992 */
5993 mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
5994 if (mpt->m_restart_cmd_timeid == 0) {
5995 mpt->m_restart_cmd_timeid =
5996 timeout(mptsas_restart_cmd, mpt,
5997 ptgt->m_qfull_retry_interval);
5998 }
5999 }
6000 }
6001 }
6002
6003 mptsas_phymask_t
6004 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
6005 {
6006 mptsas_phymask_t phy_mask = 0;
6007 uint8_t i = 0;
6008
6009 NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
6010
6011 ASSERT(mutex_owned(&mpt->m_mutex));
6012
6013 /*
6014 * If physport is 0xFF, this is a RAID volume. Use phymask of 0.
6015 */
6016 if (physport == 0xFF) {
6017 return (0);
6018 }
6019
6020 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
6021 if (mpt->m_phy_info[i].attached_devhdl &&
6022 (mpt->m_phy_info[i].phy_mask != 0) &&
6023 (mpt->m_phy_info[i].port_num == physport)) {
6024 phy_mask = mpt->m_phy_info[i].phy_mask;
6025 break;
6026 }
6027 }
6028 NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
6029 mpt->m_instance, physport, phy_mask));
6030 return (phy_mask);
6031 }
6032
6033 /*
6034 * mpt free device handle after device gone, by use of passthrough
6035 */
6036 static int
6037 mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl)
6038 {
6039 Mpi2SasIoUnitControlRequest_t req;
6040 Mpi2SasIoUnitControlReply_t rep;
6041 int ret;
6042
6043 ASSERT(mutex_owned(&mpt->m_mutex));
6044
6045 /*
6046 * Need to compose a SAS IO Unit Control request message
6047 * and call mptsas_do_passthru() function
6048 */
6049 bzero(&req, sizeof (req));
6050 bzero(&rep, sizeof (rep));
6051
6052 req.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
6053 req.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
6054 req.DevHandle = LE_16(devhdl);
6055
6056 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
6057 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
6058 if (ret != 0) {
6059 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
6060 "Control error %d", ret);
6061 return (DDI_FAILURE);
6062 }
6063
6064 /* do passthrough success, check the ioc status */
6065 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
6066 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
6067 "Control IOCStatus %d", LE_16(rep.IOCStatus));
6068 return (DDI_FAILURE);
6069 }
6070
6071 return (DDI_SUCCESS);
6072 }
6073
6074 static void
6075 mptsas_update_phymask(mptsas_t *mpt)
6076 {
6077 mptsas_phymask_t mask = 0, phy_mask;
6078 char *phy_mask_name;
6079 uint8_t current_port;
6080 int i, j;
6081
6082 NDBG20(("mptsas%d update phymask ", mpt->m_instance));
6083
6084 ASSERT(mutex_owned(&mpt->m_mutex));
6085
6086 (void) mptsas_get_sas_io_unit_page(mpt);
6087
6088 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6089
6090 for (i = 0; i < mpt->m_num_phys; i++) {
6091 phy_mask = 0x00;
6092
6093 if (mpt->m_phy_info[i].attached_devhdl == 0)
6094 continue;
6095
6096 bzero(phy_mask_name, sizeof (phy_mask_name));
6097
6098 current_port = mpt->m_phy_info[i].port_num;
6099
6100 if ((mask & (1 << i)) != 0)
6101 continue;
6102
6103 for (j = 0; j < mpt->m_num_phys; j++) {
6104 if (mpt->m_phy_info[j].attached_devhdl &&
6105 (mpt->m_phy_info[j].port_num == current_port)) {
6106 phy_mask |= (1 << j);
6107 }
6108 }
6109 mask = mask | phy_mask;
6110
6111 for (j = 0; j < mpt->m_num_phys; j++) {
6112 if ((phy_mask >> j) & 0x01) {
6113 mpt->m_phy_info[j].phy_mask = phy_mask;
6114 }
6115 }
6116
6117 (void) sprintf(phy_mask_name, "%x", phy_mask);
6118
6119 mutex_exit(&mpt->m_mutex);
6120 /*
6121 * register a iport, if the port has already been existed
6122 * SCSA will do nothing and just return.
6123 */
6124 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
6125 mutex_enter(&mpt->m_mutex);
6126 }
6127 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6128 NDBG20(("mptsas%d update phymask return", mpt->m_instance));
6129 }
6130
6131 /*
6132 * mptsas_handle_dr is a task handler for DR, the DR action includes:
6133 * 1. Directly attched Device Added/Removed.
6134 * 2. Expander Device Added/Removed.
6135 * 3. Indirectly Attached Device Added/Expander.
6136 * 4. LUNs of a existing device status change.
6137 * 5. RAID volume created/deleted.
6138 * 6. Member of RAID volume is released because of RAID deletion.
6139 * 7. Physical disks are removed because of RAID creation.
6140 */
6141 static void
6142 mptsas_handle_dr(void *args) {
6143 mptsas_topo_change_list_t *topo_node = NULL;
6144 mptsas_topo_change_list_t *save_node = NULL;
6145 mptsas_t *mpt;
6146 dev_info_t *parent = NULL;
6147 mptsas_phymask_t phymask = 0;
6148 char *phy_mask_name;
6149 uint8_t flags = 0, physport = 0xff;
6150 uint8_t port_update = 0;
6151 uint_t event;
6152
6153 topo_node = (mptsas_topo_change_list_t *)args;
6154
6155 mpt = topo_node->mpt;
6156 event = topo_node->event;
6157 flags = topo_node->flags;
6158
6159 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6160
6161 NDBG20(("mptsas%d handle_dr enter", mpt->m_instance));
6162
6163 switch (event) {
6164 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6165 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6166 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE) ||
6167 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6168 /*
6169 * Direct attached or expander attached device added
6170 * into system or a Phys Disk that is being unhidden.
6171 */
6172 port_update = 1;
6173 }
6174 break;
6175 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6176 /*
6177 * New expander added into system, it must be the head
6178 * of topo_change_list_t
6179 */
6180 port_update = 1;
6181 break;
6182 default:
6183 port_update = 0;
6184 break;
6185 }
6186 /*
6187 * All cases port_update == 1 may cause initiator port form change
6188 */
6189 mutex_enter(&mpt->m_mutex);
6190 if (mpt->m_port_chng && port_update) {
6191 /*
6192 * mpt->m_port_chng flag indicates some PHYs of initiator
6193 * port have changed to online. So when expander added or
6194 * directly attached device online event come, we force to
6195 * update port information by issueing SAS IO Unit Page and
6196 * update PHYMASKs.
6197 */
6198 (void) mptsas_update_phymask(mpt);
6199 mpt->m_port_chng = 0;
6200
6201 }
6202 mutex_exit(&mpt->m_mutex);
6203 while (topo_node) {
6204 phymask = 0;
6205 if (parent == NULL) {
6206 physport = topo_node->un.physport;
6207 event = topo_node->event;
6208 flags = topo_node->flags;
6209 if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET |
6210 MPTSAS_DR_EVENT_OFFLINE_SMP)) {
6211 /*
6212 * For all offline events, phymask is known
6213 */
6214 phymask = topo_node->un.phymask;
6215 goto find_parent;
6216 }
6217 if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6218 goto handle_topo_change;
6219 }
6220 if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) {
6221 phymask = topo_node->un.phymask;
6222 goto find_parent;
6223 }
6224
6225 if ((flags ==
6226 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) &&
6227 (event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) {
6228 /*
6229 * There is no any field in IR_CONFIG_CHANGE
6230 * event indicate physport/phynum, let's get
6231 * parent after SAS Device Page0 request.
6232 */
6233 goto handle_topo_change;
6234 }
6235
6236 mutex_enter(&mpt->m_mutex);
6237 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6238 /*
6239 * If the direct attached device added or a
6240 * phys disk is being unhidden, argument
6241 * physport actually is PHY#, so we have to get
6242 * phymask according PHY#.
6243 */
6244 physport = mpt->m_phy_info[physport].port_num;
6245 }
6246
6247 /*
6248 * Translate physport to phymask so that we can search
6249 * parent dip.
6250 */
6251 phymask = mptsas_physport_to_phymask(mpt,
6252 physport);
6253 mutex_exit(&mpt->m_mutex);
6254
6255 find_parent:
6256 bzero(phy_mask_name, MPTSAS_MAX_PHYS);
6257 /*
6258 * For RAID topology change node, write the iport name
6259 * as v0.
6260 */
6261 if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6262 (void) sprintf(phy_mask_name, "v0");
6263 } else {
6264 /*
6265 * phymask can bo 0 if the drive has been
6266 * pulled by the time an add event is
6267 * processed. If phymask is 0, just skip this
6268 * event and continue.
6269 */
6270 if (phymask == 0) {
6271 mutex_enter(&mpt->m_mutex);
6272 save_node = topo_node;
6273 topo_node = topo_node->next;
6274 ASSERT(save_node);
6275 kmem_free(save_node,
6276 sizeof (mptsas_topo_change_list_t));
6277 mutex_exit(&mpt->m_mutex);
6278
6279 parent = NULL;
6280 continue;
6281 }
6282 (void) sprintf(phy_mask_name, "%x", phymask);
6283 }
6284 parent = scsi_hba_iport_find(mpt->m_dip,
6285 phy_mask_name);
6286 if (parent == NULL) {
6287 mptsas_log(mpt, CE_WARN, "Failed to find an "
6288 "iport, should not happen!");
6289 goto out;
6290 }
6291
6292 }
6293 ASSERT(parent);
6294 handle_topo_change:
6295
6296 mutex_enter(&mpt->m_mutex);
6297 /*
6298 * If HBA is being reset, don't perform operations depending
6299 * on the IOC. We must free the topo list, however.
6300 */
6301 if (!mpt->m_in_reset)
6302 mptsas_handle_topo_change(topo_node, parent);
6303 else
6304 NDBG20(("skipping topo change received during reset"));
6305 save_node = topo_node;
6306 topo_node = topo_node->next;
6307 ASSERT(save_node);
6308 kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
6309 mutex_exit(&mpt->m_mutex);
6310
6311 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6312 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
6313 (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
6314 /*
6315 * If direct attached device associated, make sure
6316 * reset the parent before start the next one. But
6317 * all devices associated with expander shares the
6318 * parent. Also, reset parent if this is for RAID.
6319 */
6320 parent = NULL;
6321 }
6322 }
6323 out:
6324 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6325 }
6326
6327 static void
6328 mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
6329 dev_info_t *parent)
6330 {
6331 mptsas_target_t *ptgt = NULL;
6332 mptsas_smp_t *psmp = NULL;
6333 mptsas_t *mpt = (void *)topo_node->mpt;
6334 uint16_t devhdl;
6335 uint16_t attached_devhdl;
6336 uint64_t sas_wwn = 0;
6337 int rval = 0;
6338 uint32_t page_address;
6339 uint8_t phy, flags;
6340 char *addr = NULL;
6341 dev_info_t *lundip;
6342 int circ = 0, circ1 = 0;
6343 char attached_wwnstr[MPTSAS_WWN_STRLEN];
6344
6345 NDBG20(("mptsas%d handle_topo_change enter, devhdl 0x%x,"
6346 "event 0x%x, flags 0x%x", mpt->m_instance, topo_node->devhdl,
6347 topo_node->event, topo_node->flags));
6348
6349 ASSERT(mutex_owned(&mpt->m_mutex));
6350
6351 switch (topo_node->event) {
6352 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6353 {
6354 char *phy_mask_name;
6355 mptsas_phymask_t phymask = 0;
6356
6357 if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6358 /*
6359 * Get latest RAID info.
6360 */
6361 (void) mptsas_get_raid_info(mpt);
6362 ptgt = refhash_linear_search(mpt->m_targets,
6363 mptsas_target_eval_devhdl, &topo_node->devhdl);
6364 if (ptgt == NULL)
6365 break;
6366 } else {
6367 ptgt = (void *)topo_node->object;
6368 }
6369
6370 if (ptgt == NULL) {
6371 /*
6372 * If a Phys Disk was deleted, RAID info needs to be
6373 * updated to reflect the new topology.
6374 */
6375 (void) mptsas_get_raid_info(mpt);
6376
6377 /*
6378 * Get sas device page 0 by DevHandle to make sure if
6379 * SSP/SATA end device exist.
6380 */
6381 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
6382 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
6383 topo_node->devhdl;
6384
6385 rval = mptsas_get_target_device_info(mpt, page_address,
6386 &devhdl, &ptgt);
6387 if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
6388 mptsas_log(mpt, CE_NOTE,
6389 "mptsas_handle_topo_change: target %d is "
6390 "not a SAS/SATA device. \n",
6391 topo_node->devhdl);
6392 } else if (rval == DEV_INFO_FAIL_ALLOC) {
6393 mptsas_log(mpt, CE_NOTE,
6394 "mptsas_handle_topo_change: could not "
6395 "allocate memory. \n");
6396 }
6397 /*
6398 * If rval is DEV_INFO_PHYS_DISK than there is nothing
6399 * else to do, just leave.
6400 */
6401 if (rval != DEV_INFO_SUCCESS) {
6402 return;
6403 }
6404 }
6405
6406 ASSERT(ptgt->m_devhdl == topo_node->devhdl);
6407
6408 mutex_exit(&mpt->m_mutex);
6409 flags = topo_node->flags;
6410
6411 if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
6412 phymask = ptgt->m_addr.mta_phymask;
6413 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6414 (void) sprintf(phy_mask_name, "%x", phymask);
6415 parent = scsi_hba_iport_find(mpt->m_dip,
6416 phy_mask_name);
6417 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6418 if (parent == NULL) {
6419 mptsas_log(mpt, CE_WARN, "Failed to find a "
6420 "iport for PD, should not happen!");
6421 mutex_enter(&mpt->m_mutex);
6422 break;
6423 }
6424 }
6425
6426 if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6427 ndi_devi_enter(parent, &circ1);
6428 (void) mptsas_config_raid(parent, topo_node->devhdl,
6429 &lundip);
6430 ndi_devi_exit(parent, circ1);
6431 } else {
6432 /*
6433 * hold nexus for bus configure
6434 */
6435 ndi_devi_enter(scsi_vhci_dip, &circ);
6436 ndi_devi_enter(parent, &circ1);
6437 rval = mptsas_config_target(parent, ptgt);
6438 /*
6439 * release nexus for bus configure
6440 */
6441 ndi_devi_exit(parent, circ1);
6442 ndi_devi_exit(scsi_vhci_dip, circ);
6443
6444 /*
6445 * Add parent's props for SMHBA support
6446 */
6447 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6448 bzero(attached_wwnstr,
6449 sizeof (attached_wwnstr));
6450 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
6451 ptgt->m_addr.mta_wwn);
6452 if (ddi_prop_update_string(DDI_DEV_T_NONE,
6453 parent,
6454 SCSI_ADDR_PROP_ATTACHED_PORT,
6455 attached_wwnstr)
6456 != DDI_PROP_SUCCESS) {
6457 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6458 parent,
6459 SCSI_ADDR_PROP_ATTACHED_PORT);
6460 mptsas_log(mpt, CE_WARN, "Failed to"
6461 "attached-port props");
6462 return;
6463 }
6464 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6465 MPTSAS_NUM_PHYS, 1) !=
6466 DDI_PROP_SUCCESS) {
6467 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6468 parent, MPTSAS_NUM_PHYS);
6469 mptsas_log(mpt, CE_WARN, "Failed to"
6470 " create num-phys props");
6471 return;
6472 }
6473
6474 /*
6475 * Update PHY info for smhba
6476 */
6477 mutex_enter(&mpt->m_mutex);
6478 if (mptsas_smhba_phy_init(mpt)) {
6479 mutex_exit(&mpt->m_mutex);
6480 mptsas_log(mpt, CE_WARN, "mptsas phy"
6481 " update failed");
6482 return;
6483 }
6484 mutex_exit(&mpt->m_mutex);
6485
6486 /*
6487 * topo_node->un.physport is really the PHY#
6488 * for direct attached devices
6489 */
6490 mptsas_smhba_set_one_phy_props(mpt, parent,
6491 topo_node->un.physport, &attached_devhdl);
6492
6493 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6494 MPTSAS_VIRTUAL_PORT, 0) !=
6495 DDI_PROP_SUCCESS) {
6496 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6497 parent, MPTSAS_VIRTUAL_PORT);
6498 mptsas_log(mpt, CE_WARN,
6499 "mptsas virtual-port"
6500 "port prop update failed");
6501 return;
6502 }
6503 }
6504 }
6505 mutex_enter(&mpt->m_mutex);
6506
6507 NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
6508 "phymask:%x.", mpt->m_instance, ptgt->m_devhdl,
6509 ptgt->m_addr.mta_phymask));
6510 break;
6511 }
6512 case MPTSAS_DR_EVENT_OFFLINE_TARGET:
6513 {
6514 devhdl = topo_node->devhdl;
6515 ptgt = refhash_linear_search(mpt->m_targets,
6516 mptsas_target_eval_devhdl, &devhdl);
6517 if (ptgt == NULL)
6518 break;
6519
6520 sas_wwn = ptgt->m_addr.mta_wwn;
6521 phy = ptgt->m_phynum;
6522
6523 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
6524
6525 if (sas_wwn) {
6526 (void) sprintf(addr, "w%016"PRIx64, sas_wwn);
6527 } else {
6528 (void) sprintf(addr, "p%x", phy);
6529 }
6530 ASSERT(ptgt->m_devhdl == devhdl);
6531
6532 if ((topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) ||
6533 (topo_node->flags ==
6534 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6535 /*
6536 * Get latest RAID info if RAID volume status changes
6537 * or Phys Disk status changes
6538 */
6539 (void) mptsas_get_raid_info(mpt);
6540 }
6541 /*
6542 * Abort all outstanding command on the device
6543 */
6544 rval = mptsas_do_scsi_reset(mpt, devhdl);
6545 if (rval) {
6546 NDBG20(("mptsas%d handle_topo_change to reset target "
6547 "before offline devhdl:%x, phymask:%x, rval:%x",
6548 mpt->m_instance, ptgt->m_devhdl,
6549 ptgt->m_addr.mta_phymask, rval));
6550 }
6551
6552 mutex_exit(&mpt->m_mutex);
6553
6554 ndi_devi_enter(scsi_vhci_dip, &circ);
6555 ndi_devi_enter(parent, &circ1);
6556 rval = mptsas_offline_target(parent, addr);
6557 ndi_devi_exit(parent, circ1);
6558 ndi_devi_exit(scsi_vhci_dip, circ);
6559 NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
6560 "phymask:%x, rval:%x", mpt->m_instance,
6561 ptgt->m_devhdl, ptgt->m_addr.mta_phymask, rval));
6562
6563 kmem_free(addr, SCSI_MAXNAMELEN);
6564
6565 /*
6566 * Clear parent's props for SMHBA support
6567 */
6568 flags = topo_node->flags;
6569 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6570 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6571 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6572 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6573 DDI_PROP_SUCCESS) {
6574 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6575 SCSI_ADDR_PROP_ATTACHED_PORT);
6576 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6577 "prop update failed");
6578 break;
6579 }
6580 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6581 MPTSAS_NUM_PHYS, 0) !=
6582 DDI_PROP_SUCCESS) {
6583 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6584 MPTSAS_NUM_PHYS);
6585 mptsas_log(mpt, CE_WARN, "mptsas num phys "
6586 "prop update failed");
6587 break;
6588 }
6589 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6590 MPTSAS_VIRTUAL_PORT, 1) !=
6591 DDI_PROP_SUCCESS) {
6592 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6593 MPTSAS_VIRTUAL_PORT);
6594 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6595 "prop update failed");
6596 break;
6597 }
6598 }
6599
6600 mutex_enter(&mpt->m_mutex);
6601 ptgt->m_led_status = 0;
6602 (void) mptsas_flush_led_status(mpt, ptgt);
6603 if (rval == DDI_SUCCESS) {
6604 refhash_remove(mpt->m_targets, ptgt);
6605 ptgt = NULL;
6606 } else {
6607 /*
6608 * clean DR_INTRANSITION flag to allow I/O down to
6609 * PHCI driver since failover finished.
6610 * Invalidate the devhdl
6611 */
6612 ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6613 ptgt->m_tgt_unconfigured = 0;
6614 mutex_enter(&mpt->m_tx_waitq_mutex);
6615 ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6616 mutex_exit(&mpt->m_tx_waitq_mutex);
6617 }
6618
6619 /*
6620 * Send SAS IO Unit Control to free the dev handle
6621 */
6622 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6623 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
6624 rval = mptsas_free_devhdl(mpt, devhdl);
6625
6626 NDBG20(("mptsas%d handle_topo_change to remove "
6627 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6628 rval));
6629 }
6630
6631 break;
6632 }
6633 case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
6634 {
6635 devhdl = topo_node->devhdl;
6636 /*
6637 * If this is the remove handle event, do a reset first.
6638 */
6639 if (topo_node->event == MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6640 rval = mptsas_do_scsi_reset(mpt, devhdl);
6641 if (rval) {
6642 NDBG20(("mpt%d reset target before remove "
6643 "devhdl:%x, rval:%x", mpt->m_instance,
6644 devhdl, rval));
6645 }
6646 }
6647
6648 /*
6649 * Send SAS IO Unit Control to free the dev handle
6650 */
6651 rval = mptsas_free_devhdl(mpt, devhdl);
6652 NDBG20(("mptsas%d handle_topo_change to remove "
6653 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6654 rval));
6655 break;
6656 }
6657 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6658 {
6659 mptsas_smp_t smp;
6660 dev_info_t *smpdip;
6661
6662 devhdl = topo_node->devhdl;
6663
6664 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
6665 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)devhdl;
6666 rval = mptsas_get_sas_expander_page0(mpt, page_address, &smp);
6667 if (rval != DDI_SUCCESS) {
6668 mptsas_log(mpt, CE_WARN, "failed to online smp, "
6669 "handle %x", devhdl);
6670 return;
6671 }
6672
6673 psmp = mptsas_smp_alloc(mpt, &smp);
6674 if (psmp == NULL) {
6675 return;
6676 }
6677
6678 mutex_exit(&mpt->m_mutex);
6679 ndi_devi_enter(parent, &circ1);
6680 (void) mptsas_online_smp(parent, psmp, &smpdip);
6681 ndi_devi_exit(parent, circ1);
6682
6683 mutex_enter(&mpt->m_mutex);
6684 break;
6685 }
6686 case MPTSAS_DR_EVENT_OFFLINE_SMP:
6687 {
6688 devhdl = topo_node->devhdl;
6689 uint32_t dev_info;
6690
6691 psmp = refhash_linear_search(mpt->m_smp_targets,
6692 mptsas_smp_eval_devhdl, &devhdl);
6693 if (psmp == NULL)
6694 break;
6695 /*
6696 * The mptsas_smp_t data is released only if the dip is offlined
6697 * successfully.
6698 */
6699 mutex_exit(&mpt->m_mutex);
6700
6701 ndi_devi_enter(parent, &circ1);
6702 rval = mptsas_offline_smp(parent, psmp, NDI_DEVI_REMOVE);
6703 ndi_devi_exit(parent, circ1);
6704
6705 dev_info = psmp->m_deviceinfo;
6706 if ((dev_info & DEVINFO_DIRECT_ATTACHED) ==
6707 DEVINFO_DIRECT_ATTACHED) {
6708 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6709 MPTSAS_VIRTUAL_PORT, 1) !=
6710 DDI_PROP_SUCCESS) {
6711 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6712 MPTSAS_VIRTUAL_PORT);
6713 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6714 "prop update failed");
6715 return;
6716 }
6717 /*
6718 * Check whether the smp connected to the iport,
6719 */
6720 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6721 MPTSAS_NUM_PHYS, 0) !=
6722 DDI_PROP_SUCCESS) {
6723 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6724 MPTSAS_NUM_PHYS);
6725 mptsas_log(mpt, CE_WARN, "mptsas num phys"
6726 "prop update failed");
6727 return;
6728 }
6729 /*
6730 * Clear parent's attached-port props
6731 */
6732 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6733 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6734 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6735 DDI_PROP_SUCCESS) {
6736 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6737 SCSI_ADDR_PROP_ATTACHED_PORT);
6738 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6739 "prop update failed");
6740 return;
6741 }
6742 }
6743
6744 mutex_enter(&mpt->m_mutex);
6745 NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
6746 "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
6747 if (rval == DDI_SUCCESS) {
6748 refhash_remove(mpt->m_smp_targets, psmp);
6749 } else {
6750 psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
6751 }
6752
6753 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6754
6755 break;
6756 }
6757 default:
6758 return;
6759 }
6760 }
6761
6762 /*
6763 * Record the event if its type is enabled in mpt instance by ioctl.
6764 */
6765 static void
6766 mptsas_record_event(void *args)
6767 {
6768 m_replyh_arg_t *replyh_arg;
6769 pMpi2EventNotificationReply_t eventreply;
6770 uint32_t event, rfm;
6771 mptsas_t *mpt;
6772 int i, j;
6773 uint16_t event_data_len;
6774 boolean_t sendAEN = FALSE;
6775
6776 replyh_arg = (m_replyh_arg_t *)args;
6777 rfm = replyh_arg->rfm;
6778 mpt = replyh_arg->mpt;
6779
6780 eventreply = (pMpi2EventNotificationReply_t)
6781 (mpt->m_reply_frame + (rfm -
6782 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
6783 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6784
6785
6786 /*
6787 * Generate a system event to let anyone who cares know that a
6788 * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the
6789 * event mask is set to.
6790 */
6791 if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
6792 sendAEN = TRUE;
6793 }
6794
6795 /*
6796 * Record the event only if it is not masked. Determine which dword
6797 * and bit of event mask to test.
6798 */
6799 i = (uint8_t)(event / 32);
6800 j = (uint8_t)(event % 32);
6801 if ((i < 4) && ((1 << j) & mpt->m_event_mask[i])) {
6802 i = mpt->m_event_index;
6803 mpt->m_events[i].Type = event;
6804 mpt->m_events[i].Number = ++mpt->m_event_number;
6805 bzero(mpt->m_events[i].Data, MPTSAS_MAX_EVENT_DATA_LENGTH * 4);
6806 event_data_len = ddi_get16(mpt->m_acc_reply_frame_hdl,
6807 &eventreply->EventDataLength);
6808
6809 if (event_data_len > 0) {
6810 /*
6811 * Limit data to size in m_event entry
6812 */
6813 if (event_data_len > MPTSAS_MAX_EVENT_DATA_LENGTH) {
6814 event_data_len = MPTSAS_MAX_EVENT_DATA_LENGTH;
6815 }
6816 for (j = 0; j < event_data_len; j++) {
6817 mpt->m_events[i].Data[j] =
6818 ddi_get32(mpt->m_acc_reply_frame_hdl,
6819 &(eventreply->EventData[j]));
6820 }
6821
6822 /*
6823 * check for index wrap-around
6824 */
6825 if (++i == MPTSAS_EVENT_QUEUE_SIZE) {
6826 i = 0;
6827 }
6828 mpt->m_event_index = (uint8_t)i;
6829
6830 /*
6831 * Set flag to send the event.
6832 */
6833 sendAEN = TRUE;
6834 }
6835 }
6836
6837 /*
6838 * Generate a system event if flag is set to let anyone who cares know
6839 * that an event has occurred.
6840 */
6841 if (sendAEN) {
6842 (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
6843 "SAS", NULL, NULL, DDI_NOSLEEP);
6844 }
6845 }
6846
6847 #define SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS
6848 /*
6849 * handle sync events from ioc in interrupt
6850 * return value:
6851 * DDI_SUCCESS: The event is handled by this func
6852 * DDI_FAILURE: Event is not handled
6853 */
6854 static int
6855 mptsas_handle_event_sync(void *args)
6856 {
6857 m_replyh_arg_t *replyh_arg;
6858 pMpi2EventNotificationReply_t eventreply;
6859 uint32_t event, rfm;
6860 mptsas_t *mpt;
6861 uint_t iocstatus;
6862
6863 replyh_arg = (m_replyh_arg_t *)args;
6864 rfm = replyh_arg->rfm;
6865 mpt = replyh_arg->mpt;
6866
6867 ASSERT(mutex_owned(&mpt->m_mutex));
6868
6869 eventreply = (pMpi2EventNotificationReply_t)
6870 (mpt->m_reply_frame + (rfm -
6871 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
6872 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6873
6874 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
6875 &eventreply->IOCStatus)) {
6876 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
6877 mptsas_log(mpt, CE_WARN,
6878 "!mptsas_handle_event_sync: event 0x%x, "
6879 "IOCStatus=0x%x, "
6880 "IOCLogInfo=0x%x", event, iocstatus,
6881 ddi_get32(mpt->m_acc_reply_frame_hdl,
6882 &eventreply->IOCLogInfo));
6883 } else {
6884 mptsas_log(mpt, CE_WARN,
6885 "mptsas_handle_event_sync: event 0x%x, "
6886 "IOCStatus=0x%x, "
6887 "(IOCLogInfo=0x%x)", event, iocstatus,
6888 ddi_get32(mpt->m_acc_reply_frame_hdl,
6889 &eventreply->IOCLogInfo));
6890 }
6891 }
6892
6893 /*
6894 * figure out what kind of event we got and handle accordingly
6895 */
6896 switch (event) {
6897 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
6898 {
6899 pMpi2EventDataSasTopologyChangeList_t sas_topo_change_list;
6900 uint8_t num_entries, expstatus, phy;
6901 uint8_t phystatus, physport, state, i;
6902 uint8_t start_phy_num, link_rate;
6903 uint16_t dev_handle, reason_code;
6904 uint16_t enc_handle, expd_handle;
6905 char string[80], curr[80], prev[80];
6906 mptsas_topo_change_list_t *topo_head = NULL;
6907 mptsas_topo_change_list_t *topo_tail = NULL;
6908 mptsas_topo_change_list_t *topo_node = NULL;
6909 mptsas_target_t *ptgt;
6910 mptsas_smp_t *psmp;
6911 uint8_t flags = 0, exp_flag;
6912 smhba_info_t *pSmhba = NULL;
6913
6914 NDBG20(("mptsas_handle_event_sync: SAS topology change"));
6915
6916 sas_topo_change_list = (pMpi2EventDataSasTopologyChangeList_t)
6917 eventreply->EventData;
6918
6919 enc_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6920 &sas_topo_change_list->EnclosureHandle);
6921 expd_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6922 &sas_topo_change_list->ExpanderDevHandle);
6923 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6924 &sas_topo_change_list->NumEntries);
6925 start_phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
6926 &sas_topo_change_list->StartPhyNum);
6927 expstatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6928 &sas_topo_change_list->ExpStatus);
6929 physport = ddi_get8(mpt->m_acc_reply_frame_hdl,
6930 &sas_topo_change_list->PhysicalPort);
6931
6932 string[0] = 0;
6933 if (expd_handle) {
6934 flags = MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED;
6935 switch (expstatus) {
6936 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6937 (void) sprintf(string, " added");
6938 /*
6939 * New expander device added
6940 */
6941 mpt->m_port_chng = 1;
6942 topo_node = kmem_zalloc(
6943 sizeof (mptsas_topo_change_list_t),
6944 KM_SLEEP);
6945 topo_node->mpt = mpt;
6946 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_SMP;
6947 topo_node->un.physport = physport;
6948 topo_node->devhdl = expd_handle;
6949 topo_node->flags = flags;
6950 topo_node->object = NULL;
6951 if (topo_head == NULL) {
6952 topo_head = topo_tail = topo_node;
6953 } else {
6954 topo_tail->next = topo_node;
6955 topo_tail = topo_node;
6956 }
6957 break;
6958 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6959 (void) sprintf(string, " not responding, "
6960 "removed");
6961 psmp = refhash_linear_search(mpt->m_smp_targets,
6962 mptsas_smp_eval_devhdl, &expd_handle);
6963 if (psmp == NULL)
6964 break;
6965
6966 topo_node = kmem_zalloc(
6967 sizeof (mptsas_topo_change_list_t),
6968 KM_SLEEP);
6969 topo_node->mpt = mpt;
6970 topo_node->un.phymask =
6971 psmp->m_addr.mta_phymask;
6972 topo_node->event = MPTSAS_DR_EVENT_OFFLINE_SMP;
6973 topo_node->devhdl = expd_handle;
6974 topo_node->flags = flags;
6975 topo_node->object = NULL;
6976 if (topo_head == NULL) {
6977 topo_head = topo_tail = topo_node;
6978 } else {
6979 topo_tail->next = topo_node;
6980 topo_tail = topo_node;
6981 }
6982 break;
6983 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6984 break;
6985 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6986 (void) sprintf(string, " not responding, "
6987 "delaying removal");
6988 break;
6989 default:
6990 break;
6991 }
6992 } else {
6993 flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
6994 }
6995
6996 NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
6997 enc_handle, expd_handle, string));
6998 for (i = 0; i < num_entries; i++) {
6999 phy = i + start_phy_num;
7000 phystatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
7001 &sas_topo_change_list->PHY[i].PhyStatus);
7002 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7003 &sas_topo_change_list->PHY[i].AttachedDevHandle);
7004 reason_code = phystatus & MPI2_EVENT_SAS_TOPO_RC_MASK;
7005 /*
7006 * Filter out processing of Phy Vacant Status unless
7007 * the reason code is "Not Responding". Process all
7008 * other combinations of Phy Status and Reason Codes.
7009 */
7010 if ((phystatus &
7011 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) &&
7012 (reason_code !=
7013 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
7014 continue;
7015 }
7016 curr[0] = 0;
7017 prev[0] = 0;
7018 string[0] = 0;
7019 switch (reason_code) {
7020 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7021 {
7022 NDBG20(("mptsas%d phy %d physical_port %d "
7023 "dev_handle %d added", mpt->m_instance, phy,
7024 physport, dev_handle));
7025 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
7026 &sas_topo_change_list->PHY[i].LinkRate);
7027 state = (link_rate &
7028 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
7029 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
7030 switch (state) {
7031 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7032 (void) sprintf(curr, "is disabled");
7033 break;
7034 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7035 (void) sprintf(curr, "is offline, "
7036 "failed speed negotiation");
7037 break;
7038 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7039 (void) sprintf(curr, "SATA OOB "
7040 "complete");
7041 break;
7042 case SMP_RESET_IN_PROGRESS:
7043 (void) sprintf(curr, "SMP reset in "
7044 "progress");
7045 break;
7046 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7047 (void) sprintf(curr, "is online at "
7048 "1.5 Gbps");
7049 break;
7050 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7051 (void) sprintf(curr, "is online at 3.0 "
7052 "Gbps");
7053 break;
7054 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7055 (void) sprintf(curr, "is online at 6.0 "
7056 "Gbps");
7057 break;
7058 case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7059 (void) sprintf(curr,
7060 "is online at 12.0 Gbps");
7061 break;
7062 default:
7063 (void) sprintf(curr, "state is "
7064 "unknown");
7065 break;
7066 }
7067 /*
7068 * New target device added into the system.
7069 * Set association flag according to if an
7070 * expander is used or not.
7071 */
7072 exp_flag =
7073 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
7074 if (flags ==
7075 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
7076 flags = exp_flag;
7077 }
7078 topo_node = kmem_zalloc(
7079 sizeof (mptsas_topo_change_list_t),
7080 KM_SLEEP);
7081 topo_node->mpt = mpt;
7082 topo_node->event =
7083 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7084 if (expd_handle == 0) {
7085 /*
7086 * Per MPI 2, if expander dev handle
7087 * is 0, it's a directly attached
7088 * device. So driver use PHY to decide
7089 * which iport is associated
7090 */
7091 physport = phy;
7092 mpt->m_port_chng = 1;
7093 }
7094 topo_node->un.physport = physport;
7095 topo_node->devhdl = dev_handle;
7096 topo_node->flags = flags;
7097 topo_node->object = NULL;
7098 if (topo_head == NULL) {
7099 topo_head = topo_tail = topo_node;
7100 } else {
7101 topo_tail->next = topo_node;
7102 topo_tail = topo_node;
7103 }
7104 break;
7105 }
7106 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7107 {
7108 NDBG20(("mptsas%d phy %d physical_port %d "
7109 "dev_handle %d removed", mpt->m_instance,
7110 phy, physport, dev_handle));
7111 /*
7112 * Set association flag according to if an
7113 * expander is used or not.
7114 */
7115 exp_flag =
7116 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
7117 if (flags ==
7118 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
7119 flags = exp_flag;
7120 }
7121 /*
7122 * Target device is removed from the system
7123 * Before the device is really offline from
7124 * from system.
7125 */
7126 ptgt = refhash_linear_search(mpt->m_targets,
7127 mptsas_target_eval_devhdl, &dev_handle);
7128 /*
7129 * If ptgt is NULL here, it means that the
7130 * DevHandle is not in the hash table. This is
7131 * reasonable sometimes. For example, if a
7132 * disk was pulled, then added, then pulled
7133 * again, the disk will not have been put into
7134 * the hash table because the add event will
7135 * have an invalid phymask. BUT, this does not
7136 * mean that the DevHandle is invalid. The
7137 * controller will still have a valid DevHandle
7138 * that must be removed. To do this, use the
7139 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
7140 */
7141 if (ptgt == NULL) {
7142 topo_node = kmem_zalloc(
7143 sizeof (mptsas_topo_change_list_t),
7144 KM_SLEEP);
7145 topo_node->mpt = mpt;
7146 topo_node->un.phymask = 0;
7147 topo_node->event =
7148 MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
7149 topo_node->devhdl = dev_handle;
7150 topo_node->flags = flags;
7151 topo_node->object = NULL;
7152 if (topo_head == NULL) {
7153 topo_head = topo_tail =
7154 topo_node;
7155 } else {
7156 topo_tail->next = topo_node;
7157 topo_tail = topo_node;
7158 }
7159 break;
7160 }
7161
7162 /*
7163 * Update DR flag immediately avoid I/O failure
7164 * before failover finish. Pay attention to the
7165 * mutex protect, we need grab m_tx_waitq_mutex
7166 * during set m_dr_flag because we won't add
7167 * the following command into waitq, instead,
7168 * we need return TRAN_BUSY in the tran_start
7169 * context.
7170 */
7171 mutex_enter(&mpt->m_tx_waitq_mutex);
7172 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7173 mutex_exit(&mpt->m_tx_waitq_mutex);
7174
7175 topo_node = kmem_zalloc(
7176 sizeof (mptsas_topo_change_list_t),
7177 KM_SLEEP);
7178 topo_node->mpt = mpt;
7179 topo_node->un.phymask =
7180 ptgt->m_addr.mta_phymask;
7181 topo_node->event =
7182 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7183 topo_node->devhdl = dev_handle;
7184 topo_node->flags = flags;
7185 topo_node->object = NULL;
7186 if (topo_head == NULL) {
7187 topo_head = topo_tail = topo_node;
7188 } else {
7189 topo_tail->next = topo_node;
7190 topo_tail = topo_node;
7191 }
7192 break;
7193 }
7194 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7195 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
7196 &sas_topo_change_list->PHY[i].LinkRate);
7197 state = (link_rate &
7198 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
7199 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
7200 pSmhba = &mpt->m_phy_info[i].smhba_info;
7201 pSmhba->negotiated_link_rate = state;
7202 switch (state) {
7203 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7204 (void) sprintf(curr, "is disabled");
7205 mptsas_smhba_log_sysevent(mpt,
7206 ESC_SAS_PHY_EVENT,
7207 SAS_PHY_REMOVE,
7208 &mpt->m_phy_info[i].smhba_info);
7209 mpt->m_phy_info[i].smhba_info.
7210 negotiated_link_rate
7211 = 0x1;
7212 break;
7213 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7214 (void) sprintf(curr, "is offline, "
7215 "failed speed negotiation");
7216 mptsas_smhba_log_sysevent(mpt,
7217 ESC_SAS_PHY_EVENT,
7218 SAS_PHY_OFFLINE,
7219 &mpt->m_phy_info[i].smhba_info);
7220 break;
7221 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7222 (void) sprintf(curr, "SATA OOB "
7223 "complete");
7224 break;
7225 case SMP_RESET_IN_PROGRESS:
7226 (void) sprintf(curr, "SMP reset in "
7227 "progress");
7228 break;
7229 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7230 (void) sprintf(curr, "is online at "
7231 "1.5 Gbps");
7232 if ((expd_handle == 0) &&
7233 (enc_handle == 1)) {
7234 mpt->m_port_chng = 1;
7235 }
7236 mptsas_smhba_log_sysevent(mpt,
7237 ESC_SAS_PHY_EVENT,
7238 SAS_PHY_ONLINE,
7239 &mpt->m_phy_info[i].smhba_info);
7240 break;
7241 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7242 (void) sprintf(curr, "is online at 3.0 "
7243 "Gbps");
7244 if ((expd_handle == 0) &&
7245 (enc_handle == 1)) {
7246 mpt->m_port_chng = 1;
7247 }
7248 mptsas_smhba_log_sysevent(mpt,
7249 ESC_SAS_PHY_EVENT,
7250 SAS_PHY_ONLINE,
7251 &mpt->m_phy_info[i].smhba_info);
7252 break;
7253 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7254 (void) sprintf(curr, "is online at "
7255 "6.0 Gbps");
7256 if ((expd_handle == 0) &&
7257 (enc_handle == 1)) {
7258 mpt->m_port_chng = 1;
7259 }
7260 mptsas_smhba_log_sysevent(mpt,
7261 ESC_SAS_PHY_EVENT,
7262 SAS_PHY_ONLINE,
7263 &mpt->m_phy_info[i].smhba_info);
7264 break;
7265 case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7266 (void) sprintf(curr, "is online at "
7267 "12.0 Gbps");
7268 if ((expd_handle == 0) &&
7269 (enc_handle == 1)) {
7270 mpt->m_port_chng = 1;
7271 }
7272 mptsas_smhba_log_sysevent(mpt,
7273 ESC_SAS_PHY_EVENT,
7274 SAS_PHY_ONLINE,
7275 &mpt->m_phy_info[i].smhba_info);
7276 break;
7277 default:
7278 (void) sprintf(curr, "state is "
7279 "unknown");
7280 break;
7281 }
7282
7283 state = (link_rate &
7284 MPI2_EVENT_SAS_TOPO_LR_PREV_MASK) >>
7285 MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT;
7286 switch (state) {
7287 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7288 (void) sprintf(prev, ", was disabled");
7289 break;
7290 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7291 (void) sprintf(prev, ", was offline, "
7292 "failed speed negotiation");
7293 break;
7294 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7295 (void) sprintf(prev, ", was SATA OOB "
7296 "complete");
7297 break;
7298 case SMP_RESET_IN_PROGRESS:
7299 (void) sprintf(prev, ", was SMP reset "
7300 "in progress");
7301 break;
7302 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7303 (void) sprintf(prev, ", was online at "
7304 "1.5 Gbps");
7305 break;
7306 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7307 (void) sprintf(prev, ", was online at "
7308 "3.0 Gbps");
7309 break;
7310 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7311 (void) sprintf(prev, ", was online at "
7312 "6.0 Gbps");
7313 break;
7314 case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7315 (void) sprintf(prev, ", was online at "
7316 "12.0 Gbps");
7317 break;
7318 default:
7319 break;
7320 }
7321 (void) sprintf(&string[strlen(string)], "link "
7322 "changed, ");
7323 break;
7324 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7325 continue;
7326 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7327 (void) sprintf(&string[strlen(string)],
7328 "target not responding, delaying "
7329 "removal");
7330 break;
7331 }
7332 NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
7333 mpt->m_instance, phy, dev_handle, string, curr,
7334 prev));
7335 }
7336 if (topo_head != NULL) {
7337 /*
7338 * Launch DR taskq to handle topology change
7339 */
7340 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7341 mptsas_handle_dr, (void *)topo_head,
7342 DDI_NOSLEEP)) != DDI_SUCCESS) {
7343 while (topo_head != NULL) {
7344 topo_node = topo_head;
7345 topo_head = topo_head->next;
7346 kmem_free(topo_node,
7347 sizeof (mptsas_topo_change_list_t));
7348 }
7349 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7350 "for handle SAS DR event failed. \n");
7351 }
7352 }
7353 break;
7354 }
7355 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7356 {
7357 Mpi2EventDataIrConfigChangeList_t *irChangeList;
7358 mptsas_topo_change_list_t *topo_head = NULL;
7359 mptsas_topo_change_list_t *topo_tail = NULL;
7360 mptsas_topo_change_list_t *topo_node = NULL;
7361 mptsas_target_t *ptgt;
7362 uint8_t num_entries, i, reason;
7363 uint16_t volhandle, diskhandle;
7364
7365 irChangeList = (pMpi2EventDataIrConfigChangeList_t)
7366 eventreply->EventData;
7367 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
7368 &irChangeList->NumElements);
7369
7370 NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
7371 mpt->m_instance));
7372
7373 for (i = 0; i < num_entries; i++) {
7374 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7375 &irChangeList->ConfigElement[i].ReasonCode);
7376 volhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7377 &irChangeList->ConfigElement[i].VolDevHandle);
7378 diskhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7379 &irChangeList->ConfigElement[i].PhysDiskDevHandle);
7380
7381 switch (reason) {
7382 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
7383 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
7384 {
7385 NDBG20(("mptsas %d volume added\n",
7386 mpt->m_instance));
7387
7388 topo_node = kmem_zalloc(
7389 sizeof (mptsas_topo_change_list_t),
7390 KM_SLEEP);
7391
7392 topo_node->mpt = mpt;
7393 topo_node->event =
7394 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7395 topo_node->un.physport = 0xff;
7396 topo_node->devhdl = volhandle;
7397 topo_node->flags =
7398 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7399 topo_node->object = NULL;
7400 if (topo_head == NULL) {
7401 topo_head = topo_tail = topo_node;
7402 } else {
7403 topo_tail->next = topo_node;
7404 topo_tail = topo_node;
7405 }
7406 break;
7407 }
7408 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
7409 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
7410 {
7411 NDBG20(("mptsas %d volume deleted\n",
7412 mpt->m_instance));
7413 ptgt = refhash_linear_search(mpt->m_targets,
7414 mptsas_target_eval_devhdl, &volhandle);
7415 if (ptgt == NULL)
7416 break;
7417
7418 /*
7419 * Clear any flags related to volume
7420 */
7421 (void) mptsas_delete_volume(mpt, volhandle);
7422
7423 /*
7424 * Update DR flag immediately avoid I/O failure
7425 */
7426 mutex_enter(&mpt->m_tx_waitq_mutex);
7427 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7428 mutex_exit(&mpt->m_tx_waitq_mutex);
7429
7430 topo_node = kmem_zalloc(
7431 sizeof (mptsas_topo_change_list_t),
7432 KM_SLEEP);
7433 topo_node->mpt = mpt;
7434 topo_node->un.phymask =
7435 ptgt->m_addr.mta_phymask;
7436 topo_node->event =
7437 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7438 topo_node->devhdl = volhandle;
7439 topo_node->flags =
7440 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7441 topo_node->object = (void *)ptgt;
7442 if (topo_head == NULL) {
7443 topo_head = topo_tail = topo_node;
7444 } else {
7445 topo_tail->next = topo_node;
7446 topo_tail = topo_node;
7447 }
7448 break;
7449 }
7450 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
7451 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
7452 {
7453 ptgt = refhash_linear_search(mpt->m_targets,
7454 mptsas_target_eval_devhdl, &diskhandle);
7455 if (ptgt == NULL)
7456 break;
7457
7458 /*
7459 * Update DR flag immediately avoid I/O failure
7460 */
7461 mutex_enter(&mpt->m_tx_waitq_mutex);
7462 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7463 mutex_exit(&mpt->m_tx_waitq_mutex);
7464
7465 topo_node = kmem_zalloc(
7466 sizeof (mptsas_topo_change_list_t),
7467 KM_SLEEP);
7468 topo_node->mpt = mpt;
7469 topo_node->un.phymask =
7470 ptgt->m_addr.mta_phymask;
7471 topo_node->event =
7472 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7473 topo_node->devhdl = diskhandle;
7474 topo_node->flags =
7475 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7476 topo_node->object = (void *)ptgt;
7477 if (topo_head == NULL) {
7478 topo_head = topo_tail = topo_node;
7479 } else {
7480 topo_tail->next = topo_node;
7481 topo_tail = topo_node;
7482 }
7483 break;
7484 }
7485 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
7486 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
7487 {
7488 /*
7489 * The physical drive is released by a IR
7490 * volume. But we cannot get the the physport
7491 * or phynum from the event data, so we only
7492 * can get the physport/phynum after SAS
7493 * Device Page0 request for the devhdl.
7494 */
7495 topo_node = kmem_zalloc(
7496 sizeof (mptsas_topo_change_list_t),
7497 KM_SLEEP);
7498 topo_node->mpt = mpt;
7499 topo_node->un.phymask = 0;
7500 topo_node->event =
7501 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7502 topo_node->devhdl = diskhandle;
7503 topo_node->flags =
7504 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7505 topo_node->object = NULL;
7506 mpt->m_port_chng = 1;
7507 if (topo_head == NULL) {
7508 topo_head = topo_tail = topo_node;
7509 } else {
7510 topo_tail->next = topo_node;
7511 topo_tail = topo_node;
7512 }
7513 break;
7514 }
7515 default:
7516 break;
7517 }
7518 }
7519
7520 if (topo_head != NULL) {
7521 /*
7522 * Launch DR taskq to handle topology change
7523 */
7524 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7525 mptsas_handle_dr, (void *)topo_head,
7526 DDI_NOSLEEP)) != DDI_SUCCESS) {
7527 while (topo_head != NULL) {
7528 topo_node = topo_head;
7529 topo_head = topo_head->next;
7530 kmem_free(topo_node,
7531 sizeof (mptsas_topo_change_list_t));
7532 }
7533 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7534 "for handle SAS DR event failed. \n");
7535 }
7536 }
7537 break;
7538 }
7539 default:
7540 return (DDI_FAILURE);
7541 }
7542
7543 return (DDI_SUCCESS);
7544 }
7545
7546 /*
7547 * handle events from ioc
7548 */
7549 static void
7550 mptsas_handle_event(void *args)
7551 {
7552 m_replyh_arg_t *replyh_arg;
7553 pMpi2EventNotificationReply_t eventreply;
7554 uint32_t event, iocloginfo, rfm;
7555 uint32_t status;
7556 uint8_t port;
7557 mptsas_t *mpt;
7558 uint_t iocstatus;
7559
7560 replyh_arg = (m_replyh_arg_t *)args;
7561 rfm = replyh_arg->rfm;
7562 mpt = replyh_arg->mpt;
7563
7564 mutex_enter(&mpt->m_mutex);
7565 /*
7566 * If HBA is being reset, drop incoming event.
7567 */
7568 if (mpt->m_in_reset) {
7569 NDBG20(("dropping event received prior to reset"));
7570 mutex_exit(&mpt->m_mutex);
7571 return;
7572 }
7573
7574 eventreply = (pMpi2EventNotificationReply_t)
7575 (mpt->m_reply_frame + (rfm -
7576 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
7577 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7578
7579 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7580 &eventreply->IOCStatus)) {
7581 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7582 mptsas_log(mpt, CE_WARN,
7583 "!mptsas_handle_event: IOCStatus=0x%x, "
7584 "IOCLogInfo=0x%x", iocstatus,
7585 ddi_get32(mpt->m_acc_reply_frame_hdl,
7586 &eventreply->IOCLogInfo));
7587 } else {
7588 mptsas_log(mpt, CE_WARN,
7589 "mptsas_handle_event: IOCStatus=0x%x, "
7590 "IOCLogInfo=0x%x", iocstatus,
7591 ddi_get32(mpt->m_acc_reply_frame_hdl,
7592 &eventreply->IOCLogInfo));
7593 }
7594 }
7595
7596 /*
7597 * figure out what kind of event we got and handle accordingly
7598 */
7599 switch (event) {
7600 case MPI2_EVENT_LOG_ENTRY_ADDED:
7601 break;
7602 case MPI2_EVENT_LOG_DATA:
7603 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7604 &eventreply->IOCLogInfo);
7605 NDBG20(("mptsas %d log info %x received.\n", mpt->m_instance,
7606 iocloginfo));
7607 break;
7608 case MPI2_EVENT_STATE_CHANGE:
7609 NDBG20(("mptsas%d state change.", mpt->m_instance));
7610 break;
7611 case MPI2_EVENT_HARD_RESET_RECEIVED:
7612 NDBG20(("mptsas%d event change.", mpt->m_instance));
7613 break;
7614 case MPI2_EVENT_SAS_DISCOVERY:
7615 {
7616 MPI2_EVENT_DATA_SAS_DISCOVERY *sasdiscovery;
7617 char string[80];
7618 uint8_t rc;
7619
7620 sasdiscovery =
7621 (pMpi2EventDataSasDiscovery_t)eventreply->EventData;
7622
7623 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7624 &sasdiscovery->ReasonCode);
7625 port = ddi_get8(mpt->m_acc_reply_frame_hdl,
7626 &sasdiscovery->PhysicalPort);
7627 status = ddi_get32(mpt->m_acc_reply_frame_hdl,
7628 &sasdiscovery->DiscoveryStatus);
7629
7630 string[0] = 0;
7631 switch (rc) {
7632 case MPI2_EVENT_SAS_DISC_RC_STARTED:
7633 (void) sprintf(string, "STARTING");
7634 break;
7635 case MPI2_EVENT_SAS_DISC_RC_COMPLETED:
7636 (void) sprintf(string, "COMPLETED");
7637 break;
7638 default:
7639 (void) sprintf(string, "UNKNOWN");
7640 break;
7641 }
7642
7643 NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string,
7644 port, status));
7645
7646 break;
7647 }
7648 case MPI2_EVENT_EVENT_CHANGE:
7649 NDBG20(("mptsas%d event change.", mpt->m_instance));
7650 break;
7651 case MPI2_EVENT_TASK_SET_FULL:
7652 {
7653 pMpi2EventDataTaskSetFull_t taskfull;
7654
7655 taskfull = (pMpi2EventDataTaskSetFull_t)eventreply->EventData;
7656
7657 NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
7658 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7659 &taskfull->CurrentDepth)));
7660 break;
7661 }
7662 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7663 {
7664 /*
7665 * SAS TOPOLOGY CHANGE LIST Event has already been handled
7666 * in mptsas_handle_event_sync() of interrupt context
7667 */
7668 break;
7669 }
7670 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
7671 {
7672 pMpi2EventDataSasEnclDevStatusChange_t encstatus;
7673 uint8_t rc;
7674 char string[80];
7675
7676 encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
7677 eventreply->EventData;
7678
7679 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7680 &encstatus->ReasonCode);
7681 switch (rc) {
7682 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7683 (void) sprintf(string, "added");
7684 break;
7685 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7686 (void) sprintf(string, ", not responding");
7687 break;
7688 default:
7689 break;
7690 }
7691 NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure "
7692 "%x%s\n", mpt->m_instance,
7693 ddi_get16(mpt->m_acc_reply_frame_hdl,
7694 &encstatus->EnclosureHandle), string));
7695 break;
7696 }
7697
7698 /*
7699 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
7700 * mptsas_handle_event_sync,in here just send ack message.
7701 */
7702 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
7703 {
7704 pMpi2EventDataSasDeviceStatusChange_t statuschange;
7705 uint8_t rc;
7706 uint16_t devhdl;
7707 uint64_t wwn = 0;
7708 uint32_t wwn_lo, wwn_hi;
7709
7710 statuschange = (pMpi2EventDataSasDeviceStatusChange_t)
7711 eventreply->EventData;
7712 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7713 &statuschange->ReasonCode);
7714 wwn_lo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7715 (uint32_t *)(void *)&statuschange->SASAddress);
7716 wwn_hi = ddi_get32(mpt->m_acc_reply_frame_hdl,
7717 (uint32_t *)(void *)&statuschange->SASAddress + 1);
7718 wwn = ((uint64_t)wwn_hi << 32) | wwn_lo;
7719 devhdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
7720 &statuschange->DevHandle);
7721
7722 NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64,
7723 wwn));
7724
7725 switch (rc) {
7726 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7727 NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x",
7728 ddi_get8(mpt->m_acc_reply_frame_hdl,
7729 &statuschange->ASC),
7730 ddi_get8(mpt->m_acc_reply_frame_hdl,
7731 &statuschange->ASCQ)));
7732 break;
7733
7734 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7735 NDBG20(("Device not supported"));
7736 break;
7737
7738 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7739 NDBG20(("IOC internally generated the Target Reset "
7740 "for devhdl:%x", devhdl));
7741 break;
7742
7743 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7744 NDBG20(("IOC's internally generated Target Reset "
7745 "completed for devhdl:%x", devhdl));
7746 break;
7747
7748 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7749 NDBG20(("IOC internally generated Abort Task"));
7750 break;
7751
7752 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7753 NDBG20(("IOC's internally generated Abort Task "
7754 "completed"));
7755 break;
7756
7757 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7758 NDBG20(("IOC internally generated Abort Task Set"));
7759 break;
7760
7761 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7762 NDBG20(("IOC internally generated Clear Task Set"));
7763 break;
7764
7765 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7766 NDBG20(("IOC internally generated Query Task"));
7767 break;
7768
7769 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7770 NDBG20(("Device sent an Asynchronous Notification"));
7771 break;
7772
7773 default:
7774 break;
7775 }
7776 break;
7777 }
7778 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7779 {
7780 /*
7781 * IR TOPOLOGY CHANGE LIST Event has already been handled
7782 * in mpt_handle_event_sync() of interrupt context
7783 */
7784 break;
7785 }
7786 case MPI2_EVENT_IR_OPERATION_STATUS:
7787 {
7788 Mpi2EventDataIrOperationStatus_t *irOpStatus;
7789 char reason_str[80];
7790 uint8_t rc, percent;
7791 uint16_t handle;
7792
7793 irOpStatus = (pMpi2EventDataIrOperationStatus_t)
7794 eventreply->EventData;
7795 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7796 &irOpStatus->RAIDOperation);
7797 percent = ddi_get8(mpt->m_acc_reply_frame_hdl,
7798 &irOpStatus->PercentComplete);
7799 handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7800 &irOpStatus->VolDevHandle);
7801
7802 switch (rc) {
7803 case MPI2_EVENT_IR_RAIDOP_RESYNC:
7804 (void) sprintf(reason_str, "resync");
7805 break;
7806 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
7807 (void) sprintf(reason_str, "online capacity "
7808 "expansion");
7809 break;
7810 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
7811 (void) sprintf(reason_str, "consistency check");
7812 break;
7813 default:
7814 (void) sprintf(reason_str, "unknown reason %x",
7815 rc);
7816 }
7817
7818 NDBG20(("mptsas%d raid operational status: (%s)"
7819 "\thandle(0x%04x), percent complete(%d)\n",
7820 mpt->m_instance, reason_str, handle, percent));
7821 break;
7822 }
7823 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
7824 {
7825 pMpi2EventDataSasBroadcastPrimitive_t sas_broadcast;
7826 uint8_t phy_num;
7827 uint8_t primitive;
7828
7829 sas_broadcast = (pMpi2EventDataSasBroadcastPrimitive_t)
7830 eventreply->EventData;
7831
7832 phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
7833 &sas_broadcast->PhyNum);
7834 primitive = ddi_get8(mpt->m_acc_reply_frame_hdl,
7835 &sas_broadcast->Primitive);
7836
7837 switch (primitive) {
7838 case MPI2_EVENT_PRIMITIVE_CHANGE:
7839 mptsas_smhba_log_sysevent(mpt,
7840 ESC_SAS_HBA_PORT_BROADCAST,
7841 SAS_PORT_BROADCAST_CHANGE,
7842 &mpt->m_phy_info[phy_num].smhba_info);
7843 break;
7844 case MPI2_EVENT_PRIMITIVE_SES:
7845 mptsas_smhba_log_sysevent(mpt,
7846 ESC_SAS_HBA_PORT_BROADCAST,
7847 SAS_PORT_BROADCAST_SES,
7848 &mpt->m_phy_info[phy_num].smhba_info);
7849 break;
7850 case MPI2_EVENT_PRIMITIVE_EXPANDER:
7851 mptsas_smhba_log_sysevent(mpt,
7852 ESC_SAS_HBA_PORT_BROADCAST,
7853 SAS_PORT_BROADCAST_D01_4,
7854 &mpt->m_phy_info[phy_num].smhba_info);
7855 break;
7856 case MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT:
7857 mptsas_smhba_log_sysevent(mpt,
7858 ESC_SAS_HBA_PORT_BROADCAST,
7859 SAS_PORT_BROADCAST_D04_7,
7860 &mpt->m_phy_info[phy_num].smhba_info);
7861 break;
7862 case MPI2_EVENT_PRIMITIVE_RESERVED3:
7863 mptsas_smhba_log_sysevent(mpt,
7864 ESC_SAS_HBA_PORT_BROADCAST,
7865 SAS_PORT_BROADCAST_D16_7,
7866 &mpt->m_phy_info[phy_num].smhba_info);
7867 break;
7868 case MPI2_EVENT_PRIMITIVE_RESERVED4:
7869 mptsas_smhba_log_sysevent(mpt,
7870 ESC_SAS_HBA_PORT_BROADCAST,
7871 SAS_PORT_BROADCAST_D29_7,
7872 &mpt->m_phy_info[phy_num].smhba_info);
7873 break;
7874 case MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED:
7875 mptsas_smhba_log_sysevent(mpt,
7876 ESC_SAS_HBA_PORT_BROADCAST,
7877 SAS_PORT_BROADCAST_D24_0,
7878 &mpt->m_phy_info[phy_num].smhba_info);
7879 break;
7880 case MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED:
7881 mptsas_smhba_log_sysevent(mpt,
7882 ESC_SAS_HBA_PORT_BROADCAST,
7883 SAS_PORT_BROADCAST_D27_4,
7884 &mpt->m_phy_info[phy_num].smhba_info);
7885 break;
7886 default:
7887 NDBG16(("mptsas%d: unknown BROADCAST PRIMITIVE"
7888 " %x received",
7889 mpt->m_instance, primitive));
7890 break;
7891 }
7892 NDBG16(("mptsas%d sas broadcast primitive: "
7893 "\tprimitive(0x%04x), phy(%d) complete\n",
7894 mpt->m_instance, primitive, phy_num));
7895 break;
7896 }
7897 case MPI2_EVENT_IR_VOLUME:
7898 {
7899 Mpi2EventDataIrVolume_t *irVolume;
7900 uint16_t devhandle;
7901 uint32_t state;
7902 int config, vol;
7903 uint8_t found = FALSE;
7904
7905 irVolume = (pMpi2EventDataIrVolume_t)eventreply->EventData;
7906 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7907 &irVolume->NewValue);
7908 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7909 &irVolume->VolDevHandle);
7910
7911 NDBG20(("EVENT_IR_VOLUME event is received"));
7912
7913 /*
7914 * Get latest RAID info and then find the DevHandle for this
7915 * event in the configuration. If the DevHandle is not found
7916 * just exit the event.
7917 */
7918 (void) mptsas_get_raid_info(mpt);
7919 for (config = 0; (config < mpt->m_num_raid_configs) &&
7920 (!found); config++) {
7921 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
7922 if (mpt->m_raidconfig[config].m_raidvol[vol].
7923 m_raidhandle == devhandle) {
7924 found = TRUE;
7925 break;
7926 }
7927 }
7928 }
7929 if (!found) {
7930 break;
7931 }
7932
7933 switch (irVolume->ReasonCode) {
7934 case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
7935 {
7936 uint32_t i;
7937 mpt->m_raidconfig[config].m_raidvol[vol].m_settings =
7938 state;
7939
7940 i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
7941 mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
7942 ", auto-config of hot-swap drives is %s"
7943 ", write caching is %s"
7944 ", hot-spare pool mask is %02x\n",
7945 vol, state &
7946 MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
7947 ? "disabled" : "enabled",
7948 i == MPI2_RAIDVOL0_SETTING_UNCHANGED
7949 ? "controlled by member disks" :
7950 i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
7951 ? "disabled" :
7952 i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
7953 ? "enabled" :
7954 "incorrectly set",
7955 (state >> 16) & 0xff);
7956 break;
7957 }
7958 case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
7959 {
7960 mpt->m_raidconfig[config].m_raidvol[vol].m_state =
7961 (uint8_t)state;
7962
7963 mptsas_log(mpt, CE_NOTE,
7964 "Volume %d is now %s\n", vol,
7965 state == MPI2_RAID_VOL_STATE_OPTIMAL
7966 ? "optimal" :
7967 state == MPI2_RAID_VOL_STATE_DEGRADED
7968 ? "degraded" :
7969 state == MPI2_RAID_VOL_STATE_ONLINE
7970 ? "online" :
7971 state == MPI2_RAID_VOL_STATE_INITIALIZING
7972 ? "initializing" :
7973 state == MPI2_RAID_VOL_STATE_FAILED
7974 ? "failed" :
7975 state == MPI2_RAID_VOL_STATE_MISSING
7976 ? "missing" :
7977 "state unknown");
7978 break;
7979 }
7980 case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
7981 {
7982 mpt->m_raidconfig[config].m_raidvol[vol].
7983 m_statusflags = state;
7984
7985 mptsas_log(mpt, CE_NOTE,
7986 " Volume %d is now %s%s%s%s%s%s%s%s%s\n",
7987 vol,
7988 state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
7989 ? ", enabled" : ", disabled",
7990 state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
7991 ? ", quiesced" : "",
7992 state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
7993 ? ", inactive" : ", active",
7994 state &
7995 MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
7996 ? ", bad block table is full" : "",
7997 state &
7998 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
7999 ? ", resync in progress" : "",
8000 state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
8001 ? ", background initialization in progress" : "",
8002 state &
8003 MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
8004 ? ", capacity expansion in progress" : "",
8005 state &
8006 MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
8007 ? ", consistency check in progress" : "",
8008 state & MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB
8009 ? ", data scrub in progress" : "");
8010 break;
8011 }
8012 default:
8013 break;
8014 }
8015 break;
8016 }
8017 case MPI2_EVENT_IR_PHYSICAL_DISK:
8018 {
8019 Mpi2EventDataIrPhysicalDisk_t *irPhysDisk;
8020 uint16_t devhandle, enchandle, slot;
8021 uint32_t status, state;
8022 uint8_t physdisknum, reason;
8023
8024 irPhysDisk = (Mpi2EventDataIrPhysicalDisk_t *)
8025 eventreply->EventData;
8026 physdisknum = ddi_get8(mpt->m_acc_reply_frame_hdl,
8027 &irPhysDisk->PhysDiskNum);
8028 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
8029 &irPhysDisk->PhysDiskDevHandle);
8030 enchandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
8031 &irPhysDisk->EnclosureHandle);
8032 slot = ddi_get16(mpt->m_acc_reply_frame_hdl,
8033 &irPhysDisk->Slot);
8034 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
8035 &irPhysDisk->NewValue);
8036 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
8037 &irPhysDisk->ReasonCode);
8038
8039 NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
8040
8041 switch (reason) {
8042 case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
8043 mptsas_log(mpt, CE_NOTE,
8044 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8045 "for enclosure with handle 0x%x is now in hot "
8046 "spare pool %d",
8047 physdisknum, devhandle, slot, enchandle,
8048 (state >> 16) & 0xff);
8049 break;
8050
8051 case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
8052 status = state;
8053 mptsas_log(mpt, CE_NOTE,
8054 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8055 "for enclosure with handle 0x%x is now "
8056 "%s%s%s%s%s\n", physdisknum, devhandle, slot,
8057 enchandle,
8058 status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
8059 ? ", inactive" : ", active",
8060 status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
8061 ? ", out of sync" : "",
8062 status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
8063 ? ", quiesced" : "",
8064 status &
8065 MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
8066 ? ", write cache enabled" : "",
8067 status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
8068 ? ", capacity expansion target" : "");
8069 break;
8070
8071 case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
8072 mptsas_log(mpt, CE_NOTE,
8073 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8074 "for enclosure with handle 0x%x is now %s\n",
8075 physdisknum, devhandle, slot, enchandle,
8076 state == MPI2_RAID_PD_STATE_OPTIMAL
8077 ? "optimal" :
8078 state == MPI2_RAID_PD_STATE_REBUILDING
8079 ? "rebuilding" :
8080 state == MPI2_RAID_PD_STATE_DEGRADED
8081 ? "degraded" :
8082 state == MPI2_RAID_PD_STATE_HOT_SPARE
8083 ? "a hot spare" :
8084 state == MPI2_RAID_PD_STATE_ONLINE
8085 ? "online" :
8086 state == MPI2_RAID_PD_STATE_OFFLINE
8087 ? "offline" :
8088 state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
8089 ? "not compatible" :
8090 state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
8091 ? "not configured" :
8092 "state unknown");
8093 break;
8094 }
8095 break;
8096 }
8097 default:
8098 NDBG20(("mptsas%d: unknown event %x received",
8099 mpt->m_instance, event));
8100 break;
8101 }
8102
8103 /*
8104 * Return the reply frame to the free queue.
8105 */
8106 ddi_put32(mpt->m_acc_free_queue_hdl,
8107 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm);
8108 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
8109 DDI_DMA_SYNC_FORDEV);
8110 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
8111 mpt->m_free_index = 0;
8112 }
8113 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
8114 mpt->m_free_index);
8115 mutex_exit(&mpt->m_mutex);
8116 }
8117
8118 /*
8119 * invoked from timeout() to restart qfull cmds with throttle == 0
8120 */
8121 static void
8122 mptsas_restart_cmd(void *arg)
8123 {
8124 mptsas_t *mpt = arg;
8125 mptsas_target_t *ptgt = NULL;
8126
8127 mutex_enter(&mpt->m_mutex);
8128
8129 mpt->m_restart_cmd_timeid = 0;
8130
8131 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
8132 ptgt = refhash_next(mpt->m_targets, ptgt)) {
8133 if (ptgt->m_reset_delay == 0) {
8134 if (ptgt->m_t_throttle == QFULL_THROTTLE) {
8135 mptsas_set_throttle(mpt, ptgt,
8136 MAX_THROTTLE);
8137 }
8138 }
8139 }
8140 mptsas_restart_hba(mpt);
8141 mutex_exit(&mpt->m_mutex);
8142 }
8143
8144 void
8145 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8146 {
8147 int slot;
8148 mptsas_slots_t *slots = mpt->m_active;
8149 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8150
8151 ASSERT(cmd != NULL);
8152 ASSERT(cmd->cmd_queued == FALSE);
8153
8154 /*
8155 * Task Management cmds are removed in their own routines. Also,
8156 * we don't want to modify timeout based on TM cmds.
8157 */
8158 if (cmd->cmd_flags & CFLAG_TM_CMD) {
8159 return;
8160 }
8161
8162 slot = cmd->cmd_slot;
8163
8164 /*
8165 * remove the cmd.
8166 */
8167 if (cmd == slots->m_slot[slot]) {
8168 NDBG31(("mptsas_remove_cmd: removing cmd=0x%p, flags "
8169 "0x%x", (void *)cmd, cmd->cmd_flags));
8170 slots->m_slot[slot] = NULL;
8171 mpt->m_ncmds--;
8172
8173 /*
8174 * only decrement per target ncmds if command
8175 * has a target associated with it.
8176 */
8177 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
8178 ptgt->m_t_ncmds--;
8179 /*
8180 * reset throttle if we just ran an untagged command
8181 * to a tagged target
8182 */
8183 if ((ptgt->m_t_ncmds == 0) &&
8184 ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
8185 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8186 }
8187
8188 /*
8189 * Remove this command from the active queue.
8190 */
8191 if (cmd->cmd_active_expiration != 0) {
8192 TAILQ_REMOVE(&ptgt->m_active_cmdq, cmd,
8193 cmd_active_link);
8194 cmd->cmd_active_expiration = 0;
8195 }
8196 }
8197 }
8198
8199 /*
8200 * This is all we need to do for ioc commands.
8201 */
8202 if (cmd->cmd_flags & CFLAG_CMDIOC) {
8203 mptsas_return_to_pool(mpt, cmd);
8204 return;
8205 }
8206
8207 ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);
8208 }
8209
8210 /*
8211 * accept all cmds on the tx_waitq if any and then
8212 * start a fresh request from the top of the device queue.
8213 *
8214 * since there are always cmds queued on the tx_waitq, and rare cmds on
8215 * the instance waitq, so this function should not be invoked in the ISR,
8216 * the mptsas_restart_waitq() is invoked in the ISR instead. otherwise, the
8217 * burden belongs to the IO dispatch CPUs is moved the interrupt CPU.
8218 */
8219 static void
8220 mptsas_restart_hba(mptsas_t *mpt)
8221 {
8222 ASSERT(mutex_owned(&mpt->m_mutex));
8223
8224 mutex_enter(&mpt->m_tx_waitq_mutex);
8225 if (mpt->m_tx_waitq) {
8226 mptsas_accept_tx_waitq(mpt);
8227 }
8228 mutex_exit(&mpt->m_tx_waitq_mutex);
8229 mptsas_restart_waitq(mpt);
8230 }
8231
8232 /*
8233 * start a fresh request from the top of the device queue
8234 */
8235 static void
8236 mptsas_restart_waitq(mptsas_t *mpt)
8237 {
8238 mptsas_cmd_t *cmd, *next_cmd;
8239 mptsas_target_t *ptgt = NULL;
8240
8241 NDBG1(("mptsas_restart_waitq: mpt=0x%p", (void *)mpt));
8242
8243 ASSERT(mutex_owned(&mpt->m_mutex));
8244
8245 /*
8246 * If there is a reset delay, don't start any cmds. Otherwise, start
8247 * as many cmds as possible.
8248 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
8249 * commands is m_max_requests - 2.
8250 */
8251 cmd = mpt->m_waitq;
8252
8253 while (cmd != NULL) {
8254 next_cmd = cmd->cmd_linkp;
8255 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
8256 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8257 /*
8258 * passthru command get slot need
8259 * set CFLAG_PREPARED.
8260 */
8261 cmd->cmd_flags |= CFLAG_PREPARED;
8262 mptsas_waitq_delete(mpt, cmd);
8263 mptsas_start_passthru(mpt, cmd);
8264 }
8265 cmd = next_cmd;
8266 continue;
8267 }
8268 if (cmd->cmd_flags & CFLAG_CONFIG) {
8269 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8270 /*
8271 * Send the config page request and delete it
8272 * from the waitq.
8273 */
8274 cmd->cmd_flags |= CFLAG_PREPARED;
8275 mptsas_waitq_delete(mpt, cmd);
8276 mptsas_start_config_page_access(mpt, cmd);
8277 }
8278 cmd = next_cmd;
8279 continue;
8280 }
8281 if (cmd->cmd_flags & CFLAG_FW_DIAG) {
8282 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8283 /*
8284 * Send the FW Diag request and delete if from
8285 * the waitq.
8286 */
8287 cmd->cmd_flags |= CFLAG_PREPARED;
8288 mptsas_waitq_delete(mpt, cmd);
8289 mptsas_start_diag(mpt, cmd);
8290 }
8291 cmd = next_cmd;
8292 continue;
8293 }
8294
8295 ptgt = cmd->cmd_tgt_addr;
8296 if (ptgt && (ptgt->m_t_throttle == DRAIN_THROTTLE) &&
8297 (ptgt->m_t_ncmds == 0)) {
8298 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8299 }
8300 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
8301 (ptgt && (ptgt->m_reset_delay == 0)) &&
8302 (ptgt && (ptgt->m_t_ncmds <
8303 ptgt->m_t_throttle))) {
8304 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8305 mptsas_waitq_delete(mpt, cmd);
8306 (void) mptsas_start_cmd(mpt, cmd);
8307 }
8308 }
8309 cmd = next_cmd;
8310 }
8311 }
8312 /*
8313 * Cmds are queued if tran_start() doesn't get the m_mutexlock(no wait).
8314 * Accept all those queued cmds before new cmd is accept so that the
8315 * cmds are sent in order.
8316 */
8317 static void
8318 mptsas_accept_tx_waitq(mptsas_t *mpt)
8319 {
8320 mptsas_cmd_t *cmd;
8321
8322 ASSERT(mutex_owned(&mpt->m_mutex));
8323 ASSERT(mutex_owned(&mpt->m_tx_waitq_mutex));
8324
8325 /*
8326 * A Bus Reset could occur at any time and flush the tx_waitq,
8327 * so we cannot count on the tx_waitq to contain even one cmd.
8328 * And when the m_tx_waitq_mutex is released and run
8329 * mptsas_accept_pkt(), the tx_waitq may be flushed.
8330 */
8331 cmd = mpt->m_tx_waitq;
8332 for (;;) {
8333 if ((cmd = mpt->m_tx_waitq) == NULL) {
8334 mpt->m_tx_draining = 0;
8335 break;
8336 }
8337 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL) {
8338 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8339 }
8340 cmd->cmd_linkp = NULL;
8341 mutex_exit(&mpt->m_tx_waitq_mutex);
8342 if (mptsas_accept_pkt(mpt, cmd) != TRAN_ACCEPT)
8343 cmn_err(CE_WARN, "mpt: mptsas_accept_tx_waitq: failed "
8344 "to accept cmd on queue\n");
8345 mutex_enter(&mpt->m_tx_waitq_mutex);
8346 }
8347 }
8348
8349
8350 /*
8351 * mpt tag type lookup
8352 */
8353 static char mptsas_tag_lookup[] =
8354 {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
8355
8356 static int
8357 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8358 {
8359 struct scsi_pkt *pkt = CMD2PKT(cmd);
8360 uint32_t control = 0;
8361 caddr_t mem, arsbuf;
8362 pMpi2SCSIIORequest_t io_request;
8363 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
8364 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
8365 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8366 uint16_t SMID, io_flags = 0;
8367 uint8_t ars_size;
8368 uint64_t request_desc;
8369 uint32_t ars_dmaaddrlow;
8370 mptsas_cmd_t *c;
8371
8372 NDBG1(("mptsas_start_cmd: cmd=0x%p, flags 0x%x", (void *)cmd,
8373 cmd->cmd_flags));
8374
8375 /*
8376 * Set SMID and increment index. Rollover to 1 instead of 0 if index
8377 * is at the max. 0 is an invalid SMID, so we call the first index 1.
8378 */
8379 SMID = cmd->cmd_slot;
8380
8381 /*
8382 * It is possible for back to back device reset to
8383 * happen before the reset delay has expired. That's
8384 * ok, just let the device reset go out on the bus.
8385 */
8386 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8387 ASSERT(ptgt->m_reset_delay == 0);
8388 }
8389
8390 /*
8391 * if a non-tagged cmd is submitted to an active tagged target
8392 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
8393 * to be untagged
8394 */
8395 if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
8396 (ptgt->m_t_ncmds > 1) &&
8397 ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
8398 (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
8399 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8400 NDBG23(("target=%d, untagged cmd, start draining\n",
8401 ptgt->m_devhdl));
8402
8403 if (ptgt->m_reset_delay == 0) {
8404 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
8405 }
8406
8407 mptsas_remove_cmd(mpt, cmd);
8408 cmd->cmd_pkt_flags |= FLAG_HEAD;
8409 mptsas_waitq_add(mpt, cmd);
8410 }
8411 return (DDI_FAILURE);
8412 }
8413
8414 /*
8415 * Set correct tag bits.
8416 */
8417 if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
8418 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
8419 FLAG_TAGMASK) >> 12)]) {
8420 case MSG_SIMPLE_QTAG:
8421 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8422 break;
8423 case MSG_HEAD_QTAG:
8424 control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
8425 break;
8426 case MSG_ORDERED_QTAG:
8427 control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
8428 break;
8429 default:
8430 mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
8431 break;
8432 }
8433 } else {
8434 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
8435 ptgt->m_t_throttle = 1;
8436 }
8437 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8438 }
8439
8440 if (cmd->cmd_pkt_flags & FLAG_TLR) {
8441 control |= MPI2_SCSIIO_CONTROL_TLR_ON;
8442 }
8443
8444 mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
8445 io_request = (pMpi2SCSIIORequest_t)mem;
8446 if (cmd->cmd_extrqslen != 0) {
8447 /*
8448 * Mapping of the buffer was done in mptsas_pkt_alloc_extern().
8449 * Calculate the DMA address with the same offset.
8450 */
8451 arsbuf = cmd->cmd_arq_buf;
8452 ars_size = cmd->cmd_extrqslen;
8453 ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
8454 ((uintptr_t)arsbuf - (uintptr_t)mpt->m_req_sense)) &
8455 0xffffffffu;
8456 } else {
8457 arsbuf = mpt->m_req_sense + (mpt->m_req_sense_size * (SMID-1));
8458 cmd->cmd_arq_buf = arsbuf;
8459 ars_size = mpt->m_req_sense_size;
8460 ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
8461 (mpt->m_req_sense_size * (SMID-1))) &
8462 0xffffffffu;
8463 }
8464 bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
8465 bzero(arsbuf, ars_size);
8466
8467 ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
8468 (MPI2_SCSI_IO_REQUEST, SGL) / 4);
8469 mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
8470 MPI2_FUNCTION_SCSI_IO_REQUEST);
8471
8472 (void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
8473 io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
8474
8475 io_flags = cmd->cmd_cdblen;
8476 if (mptsas_use_fastpath &&
8477 ptgt->m_io_flags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
8478 io_flags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
8479 request_desc = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
8480 } else {
8481 request_desc = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
8482 }
8483 ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
8484 /*
8485 * setup the Scatter/Gather DMA list for this request
8486 */
8487 if (cmd->cmd_cookiec > 0) {
8488 mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
8489 } else {
8490 ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
8491 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
8492 MPI2_SGE_FLAGS_END_OF_BUFFER |
8493 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
8494 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
8495 }
8496
8497 /*
8498 * save ARQ information
8499 */
8500 ddi_put8(acc_hdl, &io_request->SenseBufferLength, ars_size);
8501 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress, ars_dmaaddrlow);
8502
8503 ddi_put32(acc_hdl, &io_request->Control, control);
8504
8505 NDBG31(("starting message=%d(0x%p), with cmd=0x%p",
8506 SMID, (void *)io_request, (void *)cmd));
8507
8508 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
8509 (void) ddi_dma_sync(mpt->m_dma_req_sense_hdl, 0, 0,
8510 DDI_DMA_SYNC_FORDEV);
8511
8512 /*
8513 * Build request descriptor and write it to the request desc post reg.
8514 */
8515 request_desc |= (SMID << 16);
8516 request_desc |= (uint64_t)ptgt->m_devhdl << 48;
8517 MPTSAS_START_CMD(mpt, request_desc);
8518
8519 /*
8520 * Start timeout.
8521 */
8522 cmd->cmd_active_expiration =
8523 gethrtime() + (hrtime_t)pkt->pkt_time * NANOSEC;
8524 #ifdef MPTSAS_TEST
8525 /*
8526 * Force timeouts to happen immediately.
8527 */
8528 if (mptsas_test_timeouts)
8529 cmd->cmd_active_expiration = gethrtime();
8530 #endif
8531 c = TAILQ_FIRST(&ptgt->m_active_cmdq);
8532 if (c == NULL ||
8533 c->cmd_active_expiration < cmd->cmd_active_expiration) {
8534 /*
8535 * Common case is that this is the last pending expiration
8536 * (or queue is empty). Insert at head of the queue.
8537 */
8538 TAILQ_INSERT_HEAD(&ptgt->m_active_cmdq, cmd, cmd_active_link);
8539 } else {
8540 /*
8541 * Queue is not empty and first element expires later than
8542 * this command. Search for element expiring sooner.
8543 */
8544 while ((c = TAILQ_NEXT(c, cmd_active_link)) != NULL) {
8545 if (c->cmd_active_expiration <
8546 cmd->cmd_active_expiration) {
8547 TAILQ_INSERT_BEFORE(c, cmd, cmd_active_link);
8548 break;
8549 }
8550 }
8551 if (c == NULL) {
8552 /*
8553 * No element found expiring sooner, append to
8554 * non-empty queue.
8555 */
8556 TAILQ_INSERT_TAIL(&ptgt->m_active_cmdq, cmd,
8557 cmd_active_link);
8558 }
8559 }
8560
8561 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8562 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8563 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8564 return (DDI_FAILURE);
8565 }
8566 return (DDI_SUCCESS);
8567 }
8568
8569 /*
8570 * Select a helper thread to handle current doneq
8571 */
8572 static void
8573 mptsas_deliver_doneq_thread(mptsas_t *mpt)
8574 {
8575 uint64_t t, i;
8576 uint32_t min = 0xffffffff;
8577 mptsas_doneq_thread_list_t *item;
8578
8579 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
8580 item = &mpt->m_doneq_thread_id[i];
8581 /*
8582 * If the completed command on help thread[i] less than
8583 * doneq_thread_threshold, then pick the thread[i]. Otherwise
8584 * pick a thread which has least completed command.
8585 */
8586
8587 mutex_enter(&item->mutex);
8588 if (item->len < mpt->m_doneq_thread_threshold) {
8589 t = i;
8590 mutex_exit(&item->mutex);
8591 break;
8592 }
8593 if (item->len < min) {
8594 min = item->len;
8595 t = i;
8596 }
8597 mutex_exit(&item->mutex);
8598 }
8599 mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
8600 mptsas_doneq_mv(mpt, t);
8601 cv_signal(&mpt->m_doneq_thread_id[t].cv);
8602 mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
8603 }
8604
8605 /*
8606 * move the current global doneq to the doneq of thead[t]
8607 */
8608 static void
8609 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
8610 {
8611 mptsas_cmd_t *cmd;
8612 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8613
8614 ASSERT(mutex_owned(&item->mutex));
8615 while ((cmd = mpt->m_doneq) != NULL) {
8616 if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
8617 mpt->m_donetail = &mpt->m_doneq;
8618 }
8619 cmd->cmd_linkp = NULL;
8620 *item->donetail = cmd;
8621 item->donetail = &cmd->cmd_linkp;
8622 mpt->m_doneq_len--;
8623 item->len++;
8624 }
8625 }
8626
8627 void
8628 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
8629 {
8630 struct scsi_pkt *pkt = CMD2PKT(cmd);
8631
8632 /* Check all acc and dma handles */
8633 if ((mptsas_check_acc_handle(mpt->m_datap) !=
8634 DDI_SUCCESS) ||
8635 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
8636 DDI_SUCCESS) ||
8637 (mptsas_check_acc_handle(mpt->m_acc_req_sense_hdl) !=
8638 DDI_SUCCESS) ||
8639 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
8640 DDI_SUCCESS) ||
8641 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
8642 DDI_SUCCESS) ||
8643 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
8644 DDI_SUCCESS) ||
8645 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
8646 DDI_SUCCESS) ||
8647 (mptsas_check_acc_handle(mpt->m_config_handle) !=
8648 DDI_SUCCESS)) {
8649 ddi_fm_service_impact(mpt->m_dip,
8650 DDI_SERVICE_UNAFFECTED);
8651 ddi_fm_acc_err_clear(mpt->m_config_handle,
8652 DDI_FME_VER0);
8653 pkt->pkt_reason = CMD_TRAN_ERR;
8654 pkt->pkt_statistics = 0;
8655 }
8656 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
8657 DDI_SUCCESS) ||
8658 (mptsas_check_dma_handle(mpt->m_dma_req_sense_hdl) !=
8659 DDI_SUCCESS) ||
8660 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
8661 DDI_SUCCESS) ||
8662 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
8663 DDI_SUCCESS) ||
8664 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
8665 DDI_SUCCESS) ||
8666 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
8667 DDI_SUCCESS)) {
8668 ddi_fm_service_impact(mpt->m_dip,
8669 DDI_SERVICE_UNAFFECTED);
8670 pkt->pkt_reason = CMD_TRAN_ERR;
8671 pkt->pkt_statistics = 0;
8672 }
8673 if (cmd->cmd_dmahandle &&
8674 (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
8675 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8676 pkt->pkt_reason = CMD_TRAN_ERR;
8677 pkt->pkt_statistics = 0;
8678 }
8679 if ((cmd->cmd_extra_frames &&
8680 ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
8681 DDI_SUCCESS) ||
8682 (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
8683 DDI_SUCCESS)))) {
8684 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8685 pkt->pkt_reason = CMD_TRAN_ERR;
8686 pkt->pkt_statistics = 0;
8687 }
8688 }
8689
8690 /*
8691 * These routines manipulate the queue of commands that
8692 * are waiting for their completion routines to be called.
8693 * The queue is usually in FIFO order but on an MP system
8694 * it's possible for the completion routines to get out
8695 * of order. If that's a problem you need to add a global
8696 * mutex around the code that calls the completion routine
8697 * in the interrupt handler.
8698 */
8699 static void
8700 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8701 {
8702 struct scsi_pkt *pkt = CMD2PKT(cmd);
8703
8704 NDBG31(("mptsas_doneq_add: cmd=0x%p", (void *)cmd));
8705
8706 ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
8707 cmd->cmd_linkp = NULL;
8708 cmd->cmd_flags |= CFLAG_FINISHED;
8709 cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
8710
8711 mptsas_fma_check(mpt, cmd);
8712
8713 /*
8714 * only add scsi pkts that have completion routines to
8715 * the doneq. no intr cmds do not have callbacks.
8716 */
8717 if (pkt && (pkt->pkt_comp)) {
8718 *mpt->m_donetail = cmd;
8719 mpt->m_donetail = &cmd->cmd_linkp;
8720 mpt->m_doneq_len++;
8721 }
8722 }
8723
8724 static mptsas_cmd_t *
8725 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
8726 {
8727 mptsas_cmd_t *cmd;
8728 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8729
8730 /* pop one off the done queue */
8731 if ((cmd = item->doneq) != NULL) {
8732 /* if the queue is now empty fix the tail pointer */
8733 NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
8734 if ((item->doneq = cmd->cmd_linkp) == NULL) {
8735 item->donetail = &item->doneq;
8736 }
8737 cmd->cmd_linkp = NULL;
8738 item->len--;
8739 }
8740 return (cmd);
8741 }
8742
8743 static void
8744 mptsas_doneq_empty(mptsas_t *mpt)
8745 {
8746 if (mpt->m_doneq && !mpt->m_in_callback) {
8747 mptsas_cmd_t *cmd, *next;
8748 struct scsi_pkt *pkt;
8749
8750 mpt->m_in_callback = 1;
8751 cmd = mpt->m_doneq;
8752 mpt->m_doneq = NULL;
8753 mpt->m_donetail = &mpt->m_doneq;
8754 mpt->m_doneq_len = 0;
8755
8756 mutex_exit(&mpt->m_mutex);
8757 /*
8758 * run the completion routines of all the
8759 * completed commands
8760 */
8761 while (cmd != NULL) {
8762 next = cmd->cmd_linkp;
8763 cmd->cmd_linkp = NULL;
8764 /* run this command's completion routine */
8765 cmd->cmd_flags |= CFLAG_COMPLETED;
8766 pkt = CMD2PKT(cmd);
8767 mptsas_pkt_comp(pkt, cmd);
8768 cmd = next;
8769 }
8770 mutex_enter(&mpt->m_mutex);
8771 mpt->m_in_callback = 0;
8772 }
8773 }
8774
8775 /*
8776 * These routines manipulate the target's queue of pending requests
8777 */
8778 void
8779 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8780 {
8781 NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
8782 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8783 cmd->cmd_queued = TRUE;
8784 if (ptgt)
8785 ptgt->m_t_nwait++;
8786 if (cmd->cmd_pkt_flags & FLAG_HEAD) {
8787 if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
8788 mpt->m_waitqtail = &cmd->cmd_linkp;
8789 }
8790 mpt->m_waitq = cmd;
8791 } else {
8792 cmd->cmd_linkp = NULL;
8793 *(mpt->m_waitqtail) = cmd;
8794 mpt->m_waitqtail = &cmd->cmd_linkp;
8795 }
8796 }
8797
8798 static mptsas_cmd_t *
8799 mptsas_waitq_rm(mptsas_t *mpt)
8800 {
8801 mptsas_cmd_t *cmd;
8802 mptsas_target_t *ptgt;
8803 NDBG7(("mptsas_waitq_rm"));
8804
8805 MPTSAS_WAITQ_RM(mpt, cmd);
8806
8807 NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
8808 if (cmd) {
8809 ptgt = cmd->cmd_tgt_addr;
8810 if (ptgt) {
8811 ptgt->m_t_nwait--;
8812 ASSERT(ptgt->m_t_nwait >= 0);
8813 }
8814 }
8815 return (cmd);
8816 }
8817
8818 /*
8819 * remove specified cmd from the middle of the wait queue.
8820 */
8821 static void
8822 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8823 {
8824 mptsas_cmd_t *prevp = mpt->m_waitq;
8825 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8826
8827 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8828 (void *)mpt, (void *)cmd));
8829 if (ptgt) {
8830 ptgt->m_t_nwait--;
8831 ASSERT(ptgt->m_t_nwait >= 0);
8832 }
8833
8834 if (prevp == cmd) {
8835 if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
8836 mpt->m_waitqtail = &mpt->m_waitq;
8837
8838 cmd->cmd_linkp = NULL;
8839 cmd->cmd_queued = FALSE;
8840 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8841 (void *)mpt, (void *)cmd));
8842 return;
8843 }
8844
8845 while (prevp != NULL) {
8846 if (prevp->cmd_linkp == cmd) {
8847 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8848 mpt->m_waitqtail = &prevp->cmd_linkp;
8849
8850 cmd->cmd_linkp = NULL;
8851 cmd->cmd_queued = FALSE;
8852 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8853 (void *)mpt, (void *)cmd));
8854 return;
8855 }
8856 prevp = prevp->cmd_linkp;
8857 }
8858 cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
8859 }
8860
8861 static mptsas_cmd_t *
8862 mptsas_tx_waitq_rm(mptsas_t *mpt)
8863 {
8864 mptsas_cmd_t *cmd;
8865 NDBG7(("mptsas_tx_waitq_rm"));
8866
8867 MPTSAS_TX_WAITQ_RM(mpt, cmd);
8868
8869 NDBG7(("mptsas_tx_waitq_rm: cmd=0x%p", (void *)cmd));
8870
8871 return (cmd);
8872 }
8873
8874 /*
8875 * remove specified cmd from the middle of the tx_waitq.
8876 */
8877 static void
8878 mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8879 {
8880 mptsas_cmd_t *prevp = mpt->m_tx_waitq;
8881
8882 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8883 (void *)mpt, (void *)cmd));
8884
8885 if (prevp == cmd) {
8886 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL)
8887 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8888
8889 cmd->cmd_linkp = NULL;
8890 cmd->cmd_queued = FALSE;
8891 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8892 (void *)mpt, (void *)cmd));
8893 return;
8894 }
8895
8896 while (prevp != NULL) {
8897 if (prevp->cmd_linkp == cmd) {
8898 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8899 mpt->m_tx_waitqtail = &prevp->cmd_linkp;
8900
8901 cmd->cmd_linkp = NULL;
8902 cmd->cmd_queued = FALSE;
8903 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8904 (void *)mpt, (void *)cmd));
8905 return;
8906 }
8907 prevp = prevp->cmd_linkp;
8908 }
8909 cmn_err(CE_PANIC, "mpt: mptsas_tx_waitq_delete: queue botch");
8910 }
8911
8912 /*
8913 * device and bus reset handling
8914 *
8915 * Notes:
8916 * - RESET_ALL: reset the controller
8917 * - RESET_TARGET: reset the target specified in scsi_address
8918 */
8919 static int
8920 mptsas_scsi_reset(struct scsi_address *ap, int level)
8921 {
8922 mptsas_t *mpt = ADDR2MPT(ap);
8923 int rval;
8924 mptsas_tgt_private_t *tgt_private;
8925 mptsas_target_t *ptgt = NULL;
8926
8927 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
8928 ptgt = tgt_private->t_private;
8929 if (ptgt == NULL) {
8930 return (FALSE);
8931 }
8932 NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,
8933 level));
8934
8935 mutex_enter(&mpt->m_mutex);
8936 /*
8937 * if we are not in panic set up a reset delay for this target
8938 */
8939 if (!ddi_in_panic()) {
8940 mptsas_setup_bus_reset_delay(mpt);
8941 } else {
8942 drv_usecwait(mpt->m_scsi_reset_delay * 1000);
8943 }
8944 rval = mptsas_do_scsi_reset(mpt, ptgt->m_devhdl);
8945 mutex_exit(&mpt->m_mutex);
8946
8947 /*
8948 * The transport layer expect to only see TRUE and
8949 * FALSE. Therefore, we will adjust the return value
8950 * if mptsas_do_scsi_reset returns FAILED.
8951 */
8952 if (rval == FAILED)
8953 rval = FALSE;
8954 return (rval);
8955 }
8956
8957 static int
8958 mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl)
8959 {
8960 int rval = FALSE;
8961 uint8_t config, disk;
8962
8963 ASSERT(mutex_owned(&mpt->m_mutex));
8964
8965 if (mptsas_debug_resets) {
8966 mptsas_log(mpt, CE_WARN, "mptsas_do_scsi_reset: target=%d",
8967 devhdl);
8968 }
8969
8970 /*
8971 * Issue a Target Reset message to the target specified but not to a
8972 * disk making up a raid volume. Just look through the RAID config
8973 * Phys Disk list of DevHandles. If the target's DevHandle is in this
8974 * list, then don't reset this target.
8975 */
8976 for (config = 0; config < mpt->m_num_raid_configs; config++) {
8977 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
8978 if (devhdl == mpt->m_raidconfig[config].
8979 m_physdisk_devhdl[disk]) {
8980 return (TRUE);
8981 }
8982 }
8983 }
8984
8985 rval = mptsas_ioc_task_management(mpt,
8986 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, devhdl, 0, NULL, 0, 0);
8987
8988 mptsas_doneq_empty(mpt);
8989 return (rval);
8990 }
8991
8992 static int
8993 mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
8994 void (*callback)(caddr_t), caddr_t arg)
8995 {
8996 mptsas_t *mpt = ADDR2MPT(ap);
8997
8998 NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap->a_target));
8999
9000 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
9001 &mpt->m_mutex, &mpt->m_reset_notify_listf));
9002 }
9003
9004 static int
9005 mptsas_get_name(struct scsi_device *sd, char *name, int len)
9006 {
9007 dev_info_t *lun_dip = NULL;
9008
9009 ASSERT(sd != NULL);
9010 ASSERT(name != NULL);
9011 lun_dip = sd->sd_dev;
9012 ASSERT(lun_dip != NULL);
9013
9014 if (mptsas_name_child(lun_dip, name, len) == DDI_SUCCESS) {
9015 return (1);
9016 } else {
9017 return (0);
9018 }
9019 }
9020
9021 static int
9022 mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len)
9023 {
9024 return (mptsas_get_name(sd, name, len));
9025 }
9026
9027 void
9028 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
9029 {
9030
9031 NDBG25(("mptsas_set_throttle: throttle=%x", what));
9032
9033 /*
9034 * if the bus is draining/quiesced, no changes to the throttles
9035 * are allowed. Not allowing change of throttles during draining
9036 * limits error recovery but will reduce draining time
9037 *
9038 * all throttles should have been set to HOLD_THROTTLE
9039 */
9040 if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
9041 return;
9042 }
9043
9044 if (what == HOLD_THROTTLE) {
9045 ptgt->m_t_throttle = HOLD_THROTTLE;
9046 } else if (ptgt->m_reset_delay == 0) {
9047 ptgt->m_t_throttle = what;
9048 }
9049 }
9050
9051 /*
9052 * Clean up from a device reset.
9053 * For the case of target reset, this function clears the waitq of all
9054 * commands for a particular target. For the case of abort task set, this
9055 * function clears the waitq of all commonds for a particular target/lun.
9056 */
9057 static void
9058 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
9059 {
9060 mptsas_slots_t *slots = mpt->m_active;
9061 mptsas_cmd_t *cmd, *next_cmd;
9062 int slot;
9063 uchar_t reason;
9064 uint_t stat;
9065 hrtime_t timestamp;
9066
9067 NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
9068
9069 timestamp = gethrtime();
9070
9071 /*
9072 * Make sure the I/O Controller has flushed all cmds
9073 * that are associated with this target for a target reset
9074 * and target/lun for abort task set.
9075 * Account for TM requests, which use the last SMID.
9076 */
9077 for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
9078 if ((cmd = slots->m_slot[slot]) == NULL)
9079 continue;
9080 reason = CMD_RESET;
9081 stat = STAT_DEV_RESET;
9082 switch (tasktype) {
9083 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
9084 if (Tgt(cmd) == target) {
9085 if (cmd->cmd_active_expiration <= timestamp) {
9086 /*
9087 * When timeout requested, propagate
9088 * proper reason and statistics to
9089 * target drivers.
9090 */
9091 reason = CMD_TIMEOUT;
9092 stat |= STAT_TIMEOUT;
9093 }
9094 NDBG25(("mptsas_flush_target discovered non-"
9095 "NULL cmd in slot %d, tasktype 0x%x", slot,
9096 tasktype));
9097 mptsas_dump_cmd(mpt, cmd);
9098 mptsas_remove_cmd(mpt, cmd);
9099 mptsas_set_pkt_reason(mpt, cmd, reason, stat);
9100 mptsas_doneq_add(mpt, cmd);
9101 }
9102 break;
9103 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9104 reason = CMD_ABORTED;
9105 stat = STAT_ABORTED;
9106 /*FALLTHROUGH*/
9107 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9108 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9109
9110 NDBG25(("mptsas_flush_target discovered non-"
9111 "NULL cmd in slot %d, tasktype 0x%x", slot,
9112 tasktype));
9113 mptsas_dump_cmd(mpt, cmd);
9114 mptsas_remove_cmd(mpt, cmd);
9115 mptsas_set_pkt_reason(mpt, cmd, reason,
9116 stat);
9117 mptsas_doneq_add(mpt, cmd);
9118 }
9119 break;
9120 default:
9121 break;
9122 }
9123 }
9124
9125 /*
9126 * Flush the waitq and tx_waitq of this target's cmds
9127 */
9128 cmd = mpt->m_waitq;
9129
9130 reason = CMD_RESET;
9131 stat = STAT_DEV_RESET;
9132
9133 switch (tasktype) {
9134 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
9135 while (cmd != NULL) {
9136 next_cmd = cmd->cmd_linkp;
9137 if (Tgt(cmd) == target) {
9138 mptsas_waitq_delete(mpt, cmd);
9139 mptsas_set_pkt_reason(mpt, cmd,
9140 reason, stat);
9141 mptsas_doneq_add(mpt, cmd);
9142 }
9143 cmd = next_cmd;
9144 }
9145 mutex_enter(&mpt->m_tx_waitq_mutex);
9146 cmd = mpt->m_tx_waitq;
9147 while (cmd != NULL) {
9148 next_cmd = cmd->cmd_linkp;
9149 if (Tgt(cmd) == target) {
9150 mptsas_tx_waitq_delete(mpt, cmd);
9151 mutex_exit(&mpt->m_tx_waitq_mutex);
9152 mptsas_set_pkt_reason(mpt, cmd,
9153 reason, stat);
9154 mptsas_doneq_add(mpt, cmd);
9155 mutex_enter(&mpt->m_tx_waitq_mutex);
9156 }
9157 cmd = next_cmd;
9158 }
9159 mutex_exit(&mpt->m_tx_waitq_mutex);
9160 break;
9161 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9162 reason = CMD_ABORTED;
9163 stat = STAT_ABORTED;
9164 /*FALLTHROUGH*/
9165 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9166 while (cmd != NULL) {
9167 next_cmd = cmd->cmd_linkp;
9168 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9169 mptsas_waitq_delete(mpt, cmd);
9170 mptsas_set_pkt_reason(mpt, cmd,
9171 reason, stat);
9172 mptsas_doneq_add(mpt, cmd);
9173 }
9174 cmd = next_cmd;
9175 }
9176 mutex_enter(&mpt->m_tx_waitq_mutex);
9177 cmd = mpt->m_tx_waitq;
9178 while (cmd != NULL) {
9179 next_cmd = cmd->cmd_linkp;
9180 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9181 mptsas_tx_waitq_delete(mpt, cmd);
9182 mutex_exit(&mpt->m_tx_waitq_mutex);
9183 mptsas_set_pkt_reason(mpt, cmd,
9184 reason, stat);
9185 mptsas_doneq_add(mpt, cmd);
9186 mutex_enter(&mpt->m_tx_waitq_mutex);
9187 }
9188 cmd = next_cmd;
9189 }
9190 mutex_exit(&mpt->m_tx_waitq_mutex);
9191 break;
9192 default:
9193 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
9194 tasktype);
9195 break;
9196 }
9197 }
9198
9199 /*
9200 * Clean up hba state, abort all outstanding command and commands in waitq
9201 * reset timeout of all targets.
9202 */
9203 static void
9204 mptsas_flush_hba(mptsas_t *mpt)
9205 {
9206 mptsas_slots_t *slots = mpt->m_active;
9207 mptsas_cmd_t *cmd;
9208 int slot;
9209
9210 NDBG25(("mptsas_flush_hba"));
9211
9212 /*
9213 * The I/O Controller should have already sent back
9214 * all commands via the scsi I/O reply frame. Make
9215 * sure all commands have been flushed.
9216 * Account for TM request, which use the last SMID.
9217 */
9218 for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
9219 if ((cmd = slots->m_slot[slot]) == NULL)
9220 continue;
9221
9222 if (cmd->cmd_flags & CFLAG_CMDIOC) {
9223 /*
9224 * Need to make sure to tell everyone that might be
9225 * waiting on this command that it's going to fail. If
9226 * we get here, this command will never timeout because
9227 * the active command table is going to be re-allocated,
9228 * so there will be nothing to check against a time out.
9229 * Instead, mark the command as failed due to reset.
9230 */
9231 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
9232 STAT_BUS_RESET);
9233 if ((cmd->cmd_flags &
9234 (CFLAG_PASSTHRU | CFLAG_CONFIG | CFLAG_FW_DIAG))) {
9235 cmd->cmd_flags |= CFLAG_FINISHED;
9236 cv_broadcast(&mpt->m_passthru_cv);
9237 cv_broadcast(&mpt->m_config_cv);
9238 cv_broadcast(&mpt->m_fw_diag_cv);
9239 }
9240 continue;
9241 }
9242
9243 NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
9244 slot));
9245 mptsas_dump_cmd(mpt, cmd);
9246
9247 mptsas_remove_cmd(mpt, cmd);
9248 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9249 mptsas_doneq_add(mpt, cmd);
9250 }
9251
9252 /*
9253 * Flush the waitq.
9254 */
9255 while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
9256 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9257 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9258 (cmd->cmd_flags & CFLAG_CONFIG) ||
9259 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9260 cmd->cmd_flags |= CFLAG_FINISHED;
9261 cv_broadcast(&mpt->m_passthru_cv);
9262 cv_broadcast(&mpt->m_config_cv);
9263 cv_broadcast(&mpt->m_fw_diag_cv);
9264 } else {
9265 mptsas_doneq_add(mpt, cmd);
9266 }
9267 }
9268
9269 /*
9270 * Flush the tx_waitq
9271 */
9272 mutex_enter(&mpt->m_tx_waitq_mutex);
9273 while ((cmd = mptsas_tx_waitq_rm(mpt)) != NULL) {
9274 mutex_exit(&mpt->m_tx_waitq_mutex);
9275 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9276 mptsas_doneq_add(mpt, cmd);
9277 mutex_enter(&mpt->m_tx_waitq_mutex);
9278 }
9279 mutex_exit(&mpt->m_tx_waitq_mutex);
9280
9281 /*
9282 * Drain the taskqs prior to reallocating resources.
9283 */
9284 mutex_exit(&mpt->m_mutex);
9285 ddi_taskq_wait(mpt->m_event_taskq);
9286 ddi_taskq_wait(mpt->m_dr_taskq);
9287 mutex_enter(&mpt->m_mutex);
9288 }
9289
9290 /*
9291 * set pkt_reason and OR in pkt_statistics flag
9292 */
9293 static void
9294 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
9295 uint_t stat)
9296 {
9297 #ifndef __lock_lint
9298 _NOTE(ARGUNUSED(mpt))
9299 #endif
9300
9301 NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
9302 (void *)cmd, reason, stat));
9303
9304 if (cmd) {
9305 if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
9306 cmd->cmd_pkt->pkt_reason = reason;
9307 }
9308 cmd->cmd_pkt->pkt_statistics |= stat;
9309 }
9310 }
9311
9312 static void
9313 mptsas_start_watch_reset_delay()
9314 {
9315 NDBG22(("mptsas_start_watch_reset_delay"));
9316
9317 mutex_enter(&mptsas_global_mutex);
9318 if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
9319 mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
9320 drv_usectohz((clock_t)
9321 MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
9322 ASSERT(mptsas_reset_watch != NULL);
9323 }
9324 mutex_exit(&mptsas_global_mutex);
9325 }
9326
9327 static void
9328 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
9329 {
9330 mptsas_target_t *ptgt = NULL;
9331
9332 ASSERT(MUTEX_HELD(&mpt->m_mutex));
9333
9334 NDBG22(("mptsas_setup_bus_reset_delay"));
9335 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9336 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9337 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9338 ptgt->m_reset_delay = mpt->m_scsi_reset_delay;
9339 }
9340
9341 mptsas_start_watch_reset_delay();
9342 }
9343
9344 /*
9345 * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
9346 * mpt instance for active reset delays
9347 */
9348 static void
9349 mptsas_watch_reset_delay(void *arg)
9350 {
9351 #ifndef __lock_lint
9352 _NOTE(ARGUNUSED(arg))
9353 #endif
9354
9355 mptsas_t *mpt;
9356 int not_done = 0;
9357
9358 NDBG22(("mptsas_watch_reset_delay"));
9359
9360 mutex_enter(&mptsas_global_mutex);
9361 mptsas_reset_watch = 0;
9362 mutex_exit(&mptsas_global_mutex);
9363 rw_enter(&mptsas_global_rwlock, RW_READER);
9364 for (mpt = mptsas_head; mpt != NULL; mpt = mpt->m_next) {
9365 if (mpt->m_tran == 0) {
9366 continue;
9367 }
9368 mutex_enter(&mpt->m_mutex);
9369 not_done += mptsas_watch_reset_delay_subr(mpt);
9370 mutex_exit(&mpt->m_mutex);
9371 }
9372 rw_exit(&mptsas_global_rwlock);
9373
9374 if (not_done) {
9375 mptsas_start_watch_reset_delay();
9376 }
9377 }
9378
9379 static int
9380 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
9381 {
9382 int done = 0;
9383 int restart = 0;
9384 mptsas_target_t *ptgt = NULL;
9385
9386 NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
9387
9388 ASSERT(mutex_owned(&mpt->m_mutex));
9389
9390 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9391 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9392 if (ptgt->m_reset_delay != 0) {
9393 ptgt->m_reset_delay -=
9394 MPTSAS_WATCH_RESET_DELAY_TICK;
9395 if (ptgt->m_reset_delay <= 0) {
9396 ptgt->m_reset_delay = 0;
9397 mptsas_set_throttle(mpt, ptgt,
9398 MAX_THROTTLE);
9399 restart++;
9400 } else {
9401 done = -1;
9402 }
9403 }
9404 }
9405
9406 if (restart > 0) {
9407 mptsas_restart_hba(mpt);
9408 }
9409 return (done);
9410 }
9411
9412 #ifdef MPTSAS_TEST
9413 static void
9414 mptsas_test_reset(mptsas_t *mpt, int target)
9415 {
9416 mptsas_target_t *ptgt = NULL;
9417
9418 if (mptsas_rtest == target) {
9419 if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
9420 mptsas_rtest = -1;
9421 }
9422 if (mptsas_rtest == -1) {
9423 NDBG22(("mptsas_test_reset success"));
9424 }
9425 }
9426 }
9427 #endif
9428
9429 /*
9430 * abort handling:
9431 *
9432 * Notes:
9433 * - if pkt is not NULL, abort just that command
9434 * - if pkt is NULL, abort all outstanding commands for target
9435 */
9436 static int
9437 mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
9438 {
9439 mptsas_t *mpt = ADDR2MPT(ap);
9440 int rval;
9441 mptsas_tgt_private_t *tgt_private;
9442 int target, lun;
9443
9444 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
9445 tran_tgt_private;
9446 ASSERT(tgt_private != NULL);
9447 target = tgt_private->t_private->m_devhdl;
9448 lun = tgt_private->t_lun;
9449
9450 NDBG23(("mptsas_scsi_abort: target=%d.%d", target, lun));
9451
9452 mutex_enter(&mpt->m_mutex);
9453 rval = mptsas_do_scsi_abort(mpt, target, lun, pkt);
9454 mutex_exit(&mpt->m_mutex);
9455 return (rval);
9456 }
9457
9458 static int
9459 mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, struct scsi_pkt *pkt)
9460 {
9461 mptsas_cmd_t *sp = NULL;
9462 mptsas_slots_t *slots = mpt->m_active;
9463 int rval = FALSE;
9464
9465 ASSERT(mutex_owned(&mpt->m_mutex));
9466
9467 /*
9468 * Abort the command pkt on the target/lun in ap. If pkt is
9469 * NULL, abort all outstanding commands on that target/lun.
9470 * If you can abort them, return 1, else return 0.
9471 * Each packet that's aborted should be sent back to the target
9472 * driver through the callback routine, with pkt_reason set to
9473 * CMD_ABORTED.
9474 *
9475 * abort cmd pkt on HBA hardware; clean out of outstanding
9476 * command lists, etc.
9477 */
9478 if (pkt != NULL) {
9479 /* abort the specified packet */
9480 sp = PKT2CMD(pkt);
9481
9482 if (sp->cmd_queued) {
9483 NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
9484 (void *)sp));
9485 mptsas_waitq_delete(mpt, sp);
9486 mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
9487 STAT_ABORTED);
9488 mptsas_doneq_add(mpt, sp);
9489 rval = TRUE;
9490 goto done;
9491 }
9492
9493 /*
9494 * Have mpt firmware abort this command
9495 */
9496
9497 if (slots->m_slot[sp->cmd_slot] != NULL) {
9498 rval = mptsas_ioc_task_management(mpt,
9499 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
9500 lun, NULL, 0, 0);
9501
9502 /*
9503 * The transport layer expects only TRUE and FALSE.
9504 * Therefore, if mptsas_ioc_task_management returns
9505 * FAILED we will return FALSE.
9506 */
9507 if (rval == FAILED)
9508 rval = FALSE;
9509 goto done;
9510 }
9511 }
9512
9513 /*
9514 * If pkt is NULL then abort task set
9515 */
9516 rval = mptsas_ioc_task_management(mpt,
9517 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0);
9518
9519 /*
9520 * The transport layer expects only TRUE and FALSE.
9521 * Therefore, if mptsas_ioc_task_management returns
9522 * FAILED we will return FALSE.
9523 */
9524 if (rval == FAILED)
9525 rval = FALSE;
9526
9527 #ifdef MPTSAS_TEST
9528 if (rval && mptsas_test_stop) {
9529 debug_enter("mptsas_do_scsi_abort");
9530 }
9531 #endif
9532
9533 done:
9534 mptsas_doneq_empty(mpt);
9535 return (rval);
9536 }
9537
9538 /*
9539 * capability handling:
9540 * (*tran_getcap). Get the capability named, and return its value.
9541 */
9542 static int
9543 mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
9544 {
9545 mptsas_t *mpt = ADDR2MPT(ap);
9546 int ckey;
9547 int rval = FALSE;
9548
9549 NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
9550 ap->a_target, cap, tgtonly));
9551
9552 mutex_enter(&mpt->m_mutex);
9553
9554 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9555 mutex_exit(&mpt->m_mutex);
9556 return (UNDEFINED);
9557 }
9558
9559 switch (ckey) {
9560 case SCSI_CAP_DMA_MAX:
9561 rval = (int)mpt->m_msg_dma_attr.dma_attr_maxxfer;
9562 break;
9563 case SCSI_CAP_ARQ:
9564 rval = TRUE;
9565 break;
9566 case SCSI_CAP_MSG_OUT:
9567 case SCSI_CAP_PARITY:
9568 case SCSI_CAP_UNTAGGED_QING:
9569 rval = TRUE;
9570 break;
9571 case SCSI_CAP_TAGGED_QING:
9572 rval = TRUE;
9573 break;
9574 case SCSI_CAP_RESET_NOTIFICATION:
9575 rval = TRUE;
9576 break;
9577 case SCSI_CAP_LINKED_CMDS:
9578 rval = FALSE;
9579 break;
9580 case SCSI_CAP_QFULL_RETRIES:
9581 rval = ((mptsas_tgt_private_t *)(ap->a_hba_tran->
9582 tran_tgt_private))->t_private->m_qfull_retries;
9583 break;
9584 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9585 rval = drv_hztousec(((mptsas_tgt_private_t *)
9586 (ap->a_hba_tran->tran_tgt_private))->
9587 t_private->m_qfull_retry_interval) / 1000;
9588 break;
9589 case SCSI_CAP_CDB_LEN:
9590 rval = CDB_GROUP4;
9591 break;
9592 case SCSI_CAP_INTERCONNECT_TYPE:
9593 rval = INTERCONNECT_SAS;
9594 break;
9595 case SCSI_CAP_TRAN_LAYER_RETRIES:
9596 if (mpt->m_ioc_capabilities &
9597 MPI2_IOCFACTS_CAPABILITY_TLR)
9598 rval = TRUE;
9599 else
9600 rval = FALSE;
9601 break;
9602 default:
9603 rval = UNDEFINED;
9604 break;
9605 }
9606
9607 NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
9608
9609 mutex_exit(&mpt->m_mutex);
9610 return (rval);
9611 }
9612
9613 /*
9614 * (*tran_setcap). Set the capability named to the value given.
9615 */
9616 static int
9617 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
9618 {
9619 mptsas_t *mpt = ADDR2MPT(ap);
9620 int ckey;
9621 int rval = FALSE;
9622
9623 NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
9624 ap->a_target, cap, value, tgtonly));
9625
9626 if (!tgtonly) {
9627 return (rval);
9628 }
9629
9630 mutex_enter(&mpt->m_mutex);
9631
9632 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9633 mutex_exit(&mpt->m_mutex);
9634 return (UNDEFINED);
9635 }
9636
9637 switch (ckey) {
9638 case SCSI_CAP_DMA_MAX:
9639 case SCSI_CAP_MSG_OUT:
9640 case SCSI_CAP_PARITY:
9641 case SCSI_CAP_INITIATOR_ID:
9642 case SCSI_CAP_LINKED_CMDS:
9643 case SCSI_CAP_UNTAGGED_QING:
9644 case SCSI_CAP_RESET_NOTIFICATION:
9645 /*
9646 * None of these are settable via
9647 * the capability interface.
9648 */
9649 break;
9650 case SCSI_CAP_ARQ:
9651 /*
9652 * We cannot turn off arq so return false if asked to
9653 */
9654 if (value) {
9655 rval = TRUE;
9656 } else {
9657 rval = FALSE;
9658 }
9659 break;
9660 case SCSI_CAP_TAGGED_QING:
9661 mptsas_set_throttle(mpt, ((mptsas_tgt_private_t *)
9662 (ap->a_hba_tran->tran_tgt_private))->t_private,
9663 MAX_THROTTLE);
9664 rval = TRUE;
9665 break;
9666 case SCSI_CAP_QFULL_RETRIES:
9667 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9668 t_private->m_qfull_retries = (uchar_t)value;
9669 rval = TRUE;
9670 break;
9671 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9672 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9673 t_private->m_qfull_retry_interval =
9674 drv_usectohz(value * 1000);
9675 rval = TRUE;
9676 break;
9677 default:
9678 rval = UNDEFINED;
9679 break;
9680 }
9681 mutex_exit(&mpt->m_mutex);
9682 return (rval);
9683 }
9684
9685 /*
9686 * Utility routine for mptsas_ifsetcap/ifgetcap
9687 */
9688 /*ARGSUSED*/
9689 static int
9690 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp)
9691 {
9692 NDBG24(("mptsas_scsi_capchk: cap=%s", cap));
9693
9694 if (!cap)
9695 return (FALSE);
9696
9697 *cidxp = scsi_hba_lookup_capstr(cap);
9698 return (TRUE);
9699 }
9700
9701 static int
9702 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
9703 {
9704 mptsas_slots_t *old_active = mpt->m_active;
9705 mptsas_slots_t *new_active;
9706 size_t size;
9707
9708 /*
9709 * if there are active commands, then we cannot
9710 * change size of active slots array.
9711 */
9712 ASSERT(mpt->m_ncmds == 0);
9713
9714 size = MPTSAS_SLOTS_SIZE(mpt);
9715 new_active = kmem_zalloc(size, flag);
9716 if (new_active == NULL) {
9717 NDBG1(("new active alloc failed"));
9718 return (-1);
9719 }
9720 /*
9721 * Since SMID 0 is reserved and the TM slot is reserved, the
9722 * number of slots that can be used at any one time is
9723 * m_max_requests - 2.
9724 */
9725 new_active->m_n_normal = (mpt->m_max_requests - 2);
9726 new_active->m_size = size;
9727 new_active->m_rotor = 1;
9728 if (old_active)
9729 mptsas_free_active_slots(mpt);
9730 mpt->m_active = new_active;
9731
9732 return (0);
9733 }
9734
9735 static void
9736 mptsas_free_active_slots(mptsas_t *mpt)
9737 {
9738 mptsas_slots_t *active = mpt->m_active;
9739 size_t size;
9740
9741 if (active == NULL)
9742 return;
9743 size = active->m_size;
9744 kmem_free(active, size);
9745 mpt->m_active = NULL;
9746 }
9747
9748 /*
9749 * Error logging, printing, and debug print routines.
9750 */
9751 static char *mptsas_label = "mpt_sas";
9752
9753 /*PRINTFLIKE3*/
9754 void
9755 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
9756 {
9757 dev_info_t *dev;
9758 va_list ap;
9759
9760 if (mpt) {
9761 dev = mpt->m_dip;
9762 } else {
9763 dev = 0;
9764 }
9765
9766 mutex_enter(&mptsas_log_mutex);
9767
9768 va_start(ap, fmt);
9769 (void) vsprintf(mptsas_log_buf, fmt, ap);
9770 va_end(ap);
9771
9772 if (level == CE_CONT) {
9773 scsi_log(dev, mptsas_label, level, "%s\n", mptsas_log_buf);
9774 } else {
9775 scsi_log(dev, mptsas_label, level, "%s", mptsas_log_buf);
9776 }
9777
9778 mutex_exit(&mptsas_log_mutex);
9779 }
9780
9781 #ifdef MPTSAS_DEBUG
9782 /*
9783 * Use a circular buffer to log messages to private memory.
9784 * Increment idx atomically to minimize risk to miss lines.
9785 * It's fast and does not hold up the proceedings too much.
9786 */
9787 static const size_t mptsas_dbglog_linecnt = MPTSAS_DBGLOG_LINECNT;
9788 static const size_t mptsas_dbglog_linelen = MPTSAS_DBGLOG_LINELEN;
9789 static char mptsas_dbglog_bufs[MPTSAS_DBGLOG_LINECNT][MPTSAS_DBGLOG_LINELEN];
9790 static uint32_t mptsas_dbglog_idx = 0;
9791
9792 /*PRINTFLIKE1*/
9793 void
9794 mptsas_debug_log(char *fmt, ...)
9795 {
9796 va_list ap;
9797 uint32_t idx;
9798
9799 idx = atomic_inc_32_nv(&mptsas_dbglog_idx) &
9800 (mptsas_dbglog_linecnt - 1);
9801
9802 va_start(ap, fmt);
9803 (void) vsnprintf(mptsas_dbglog_bufs[idx],
9804 mptsas_dbglog_linelen, fmt, ap);
9805 va_end(ap);
9806 }
9807
9808 /*PRINTFLIKE1*/
9809 void
9810 mptsas_printf(char *fmt, ...)
9811 {
9812 dev_info_t *dev = 0;
9813 va_list ap;
9814
9815 mutex_enter(&mptsas_log_mutex);
9816
9817 va_start(ap, fmt);
9818 (void) vsprintf(mptsas_log_buf, fmt, ap);
9819 va_end(ap);
9820
9821 #ifdef PROM_PRINTF
9822 prom_printf("%s:\t%s\n", mptsas_label, mptsas_log_buf);
9823 #else
9824 scsi_log(dev, mptsas_label, CE_CONT, "!%s\n", mptsas_log_buf);
9825 #endif
9826 mutex_exit(&mptsas_log_mutex);
9827 }
9828 #endif
9829
9830 /*
9831 * timeout handling
9832 */
9833 static void
9834 mptsas_watch(void *arg)
9835 {
9836 #ifndef __lock_lint
9837 _NOTE(ARGUNUSED(arg))
9838 #endif
9839
9840 mptsas_t *mpt;
9841 uint32_t doorbell;
9842
9843 NDBG30(("mptsas_watch"));
9844
9845 rw_enter(&mptsas_global_rwlock, RW_READER);
9846 for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
9847
9848 mutex_enter(&mpt->m_mutex);
9849
9850 /* Skip device if not powered on */
9851 if (mpt->m_options & MPTSAS_OPT_PM) {
9852 if (mpt->m_power_level == PM_LEVEL_D0) {
9853 (void) pm_busy_component(mpt->m_dip, 0);
9854 mpt->m_busy = 1;
9855 } else {
9856 mutex_exit(&mpt->m_mutex);
9857 continue;
9858 }
9859 }
9860
9861 /*
9862 * Check if controller is in a FAULT state. If so, reset it.
9863 */
9864 doorbell = ddi_get32(mpt->m_datap, &mpt->m_reg->Doorbell);
9865 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
9866 doorbell &= MPI2_DOORBELL_DATA_MASK;
9867 mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
9868 "code: %04x", doorbell);
9869 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
9870 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
9871 mptsas_log(mpt, CE_WARN, "Reset failed"
9872 "after fault was detected");
9873 }
9874 }
9875
9876 /*
9877 * For now, always call mptsas_watchsubr.
9878 */
9879 mptsas_watchsubr(mpt);
9880
9881 if (mpt->m_options & MPTSAS_OPT_PM) {
9882 mpt->m_busy = 0;
9883 (void) pm_idle_component(mpt->m_dip, 0);
9884 }
9885
9886 mutex_exit(&mpt->m_mutex);
9887 }
9888 rw_exit(&mptsas_global_rwlock);
9889
9890 mutex_enter(&mptsas_global_mutex);
9891 if (mptsas_timeouts_enabled)
9892 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
9893 mutex_exit(&mptsas_global_mutex);
9894 }
9895
9896 static void
9897 mptsas_watchsubr(mptsas_t *mpt)
9898 {
9899 int i;
9900 mptsas_cmd_t *cmd;
9901 mptsas_target_t *ptgt = NULL;
9902 hrtime_t timestamp = gethrtime();
9903
9904 ASSERT(MUTEX_HELD(&mpt->m_mutex));
9905
9906 NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
9907
9908 #ifdef MPTSAS_TEST
9909 if (mptsas_enable_untagged) {
9910 mptsas_test_untagged++;
9911 }
9912 #endif
9913
9914 /*
9915 * Check for commands stuck in active slot
9916 * Account for TM requests, which use the last SMID.
9917 */
9918 for (i = 0; i <= mpt->m_active->m_n_normal; i++) {
9919 if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
9920 if (cmd->cmd_active_expiration <= timestamp) {
9921 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
9922 /*
9923 * There seems to be a command stuck
9924 * in the active slot. Drain throttle.
9925 */
9926 mptsas_set_throttle(mpt,
9927 cmd->cmd_tgt_addr,
9928 DRAIN_THROTTLE);
9929 } else if (cmd->cmd_flags &
9930 (CFLAG_PASSTHRU | CFLAG_CONFIG |
9931 CFLAG_FW_DIAG)) {
9932 /*
9933 * passthrough command timeout
9934 */
9935 cmd->cmd_flags |= (CFLAG_FINISHED |
9936 CFLAG_TIMEOUT);
9937 cv_broadcast(&mpt->m_passthru_cv);
9938 cv_broadcast(&mpt->m_config_cv);
9939 cv_broadcast(&mpt->m_fw_diag_cv);
9940 }
9941 }
9942 }
9943 }
9944
9945 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9946 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9947 /*
9948 * If we were draining due to a qfull condition,
9949 * go back to full throttle.
9950 */
9951 if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
9952 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
9953 (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
9954 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9955 mptsas_restart_hba(mpt);
9956 }
9957
9958 cmd = TAILQ_LAST(&ptgt->m_active_cmdq, mptsas_active_cmdq);
9959 if (cmd == NULL)
9960 continue;
9961
9962 if (cmd->cmd_active_expiration <= timestamp) {
9963 /*
9964 * Earliest command timeout expired. Drain throttle.
9965 */
9966 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
9967
9968 /*
9969 * Check for remaining commands.
9970 */
9971 cmd = TAILQ_FIRST(&ptgt->m_active_cmdq);
9972 if (cmd->cmd_active_expiration > timestamp) {
9973 /*
9974 * Wait for remaining commands to complete or
9975 * time out.
9976 */
9977 NDBG23(("command timed out, pending drain"));
9978 continue;
9979 }
9980
9981 /*
9982 * All command timeouts expired.
9983 */
9984 mptsas_log(mpt, CE_NOTE, "Timeout of %d seconds "
9985 "expired with %d commands on target %d lun %d.",
9986 cmd->cmd_pkt->pkt_time, ptgt->m_t_ncmds,
9987 ptgt->m_devhdl, Lun(cmd));
9988
9989 mptsas_cmd_timeout(mpt, ptgt);
9990 } else if (cmd->cmd_active_expiration <=
9991 timestamp + (hrtime_t)mptsas_scsi_watchdog_tick * NANOSEC) {
9992 NDBG23(("pending timeout"));
9993 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
9994 }
9995 }
9996 }
9997
9998 /*
9999 * timeout recovery
10000 */
10001 static void
10002 mptsas_cmd_timeout(mptsas_t *mpt, mptsas_target_t *ptgt)
10003 {
10004 uint16_t devhdl;
10005 uint64_t sas_wwn;
10006 uint8_t phy;
10007 char wwn_str[MPTSAS_WWN_STRLEN];
10008
10009 devhdl = ptgt->m_devhdl;
10010 sas_wwn = ptgt->m_addr.mta_wwn;
10011 phy = ptgt->m_phynum;
10012 if (sas_wwn == 0) {
10013 (void) sprintf(wwn_str, "p%x", phy);
10014 } else {
10015 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
10016 }
10017
10018 NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
10019 mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
10020 "target %d %s, enclosure %u", devhdl, wwn_str,
10021 ptgt->m_enclosure);
10022
10023 /*
10024 * Abort all outstanding commands on the device.
10025 */
10026 NDBG29(("mptsas_cmd_timeout: device reset"));
10027 if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
10028 mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
10029 "recovery failed!", devhdl);
10030 }
10031 }
10032
10033 /*
10034 * Device / Hotplug control
10035 */
10036 static int
10037 mptsas_scsi_quiesce(dev_info_t *dip)
10038 {
10039 mptsas_t *mpt;
10040 scsi_hba_tran_t *tran;
10041
10042 tran = ddi_get_driver_private(dip);
10043 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
10044 return (-1);
10045
10046 return (mptsas_quiesce_bus(mpt));
10047 }
10048
10049 static int
10050 mptsas_scsi_unquiesce(dev_info_t *dip)
10051 {
10052 mptsas_t *mpt;
10053 scsi_hba_tran_t *tran;
10054
10055 tran = ddi_get_driver_private(dip);
10056 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
10057 return (-1);
10058
10059 return (mptsas_unquiesce_bus(mpt));
10060 }
10061
10062 static int
10063 mptsas_quiesce_bus(mptsas_t *mpt)
10064 {
10065 mptsas_target_t *ptgt = NULL;
10066
10067 NDBG28(("mptsas_quiesce_bus"));
10068 mutex_enter(&mpt->m_mutex);
10069
10070 /* Set all the throttles to zero */
10071 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10072 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10073 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10074 }
10075
10076 /* If there are any outstanding commands in the queue */
10077 if (mpt->m_ncmds) {
10078 mpt->m_softstate |= MPTSAS_SS_DRAINING;
10079 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
10080 mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
10081 if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
10082 /*
10083 * Quiesce has been interrupted
10084 */
10085 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
10086 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10087 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10088 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10089 }
10090 mptsas_restart_hba(mpt);
10091 if (mpt->m_quiesce_timeid != 0) {
10092 timeout_id_t tid = mpt->m_quiesce_timeid;
10093 mpt->m_quiesce_timeid = 0;
10094 mutex_exit(&mpt->m_mutex);
10095 (void) untimeout(tid);
10096 return (-1);
10097 }
10098 mutex_exit(&mpt->m_mutex);
10099 return (-1);
10100 } else {
10101 /* Bus has been quiesced */
10102 ASSERT(mpt->m_quiesce_timeid == 0);
10103 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
10104 mpt->m_softstate |= MPTSAS_SS_QUIESCED;
10105 mutex_exit(&mpt->m_mutex);
10106 return (0);
10107 }
10108 }
10109 /* Bus was not busy - QUIESCED */
10110 mutex_exit(&mpt->m_mutex);
10111
10112 return (0);
10113 }
10114
10115 static int
10116 mptsas_unquiesce_bus(mptsas_t *mpt)
10117 {
10118 mptsas_target_t *ptgt = NULL;
10119
10120 NDBG28(("mptsas_unquiesce_bus"));
10121 mutex_enter(&mpt->m_mutex);
10122 mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
10123 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10124 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10125 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10126 }
10127 mptsas_restart_hba(mpt);
10128 mutex_exit(&mpt->m_mutex);
10129 return (0);
10130 }
10131
10132 static void
10133 mptsas_ncmds_checkdrain(void *arg)
10134 {
10135 mptsas_t *mpt = arg;
10136 mptsas_target_t *ptgt = NULL;
10137
10138 mutex_enter(&mpt->m_mutex);
10139 if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
10140 mpt->m_quiesce_timeid = 0;
10141 if (mpt->m_ncmds == 0) {
10142 /* Command queue has been drained */
10143 cv_signal(&mpt->m_cv);
10144 } else {
10145 /*
10146 * The throttle may have been reset because
10147 * of a SCSI bus reset
10148 */
10149 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10150 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10151 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10152 }
10153
10154 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
10155 mpt, (MPTSAS_QUIESCE_TIMEOUT *
10156 drv_usectohz(1000000)));
10157 }
10158 }
10159 mutex_exit(&mpt->m_mutex);
10160 }
10161
10162 /*ARGSUSED*/
10163 static void
10164 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
10165 {
10166 int i;
10167 uint8_t *cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
10168 char buf[128];
10169
10170 buf[0] = '\0';
10171 NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd,
10172 Tgt(cmd), Lun(cmd)));
10173 (void) sprintf(&buf[0], "\tcdb=[");
10174 for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
10175 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
10176 }
10177 (void) sprintf(&buf[strlen(buf)], " ]");
10178 NDBG25(("?%s\n", buf));
10179 NDBG25(("?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
10180 cmd->cmd_pkt->pkt_flags, cmd->cmd_pkt->pkt_statistics,
10181 cmd->cmd_pkt->pkt_state));
10182 NDBG25(("?pkt_scbp=0x%x cmd_flags=0x%x\n", cmd->cmd_pkt->pkt_scbp ?
10183 *(cmd->cmd_pkt->pkt_scbp) : 0, cmd->cmd_flags));
10184 }
10185
10186 static void
10187 mptsas_passthru_sge(ddi_acc_handle_t acc_hdl, mptsas_pt_request_t *pt,
10188 pMpi2SGESimple64_t sgep)
10189 {
10190 uint32_t sge_flags;
10191 uint32_t data_size, dataout_size;
10192 ddi_dma_cookie_t data_cookie;
10193 ddi_dma_cookie_t dataout_cookie;
10194
10195 data_size = pt->data_size;
10196 dataout_size = pt->dataout_size;
10197 data_cookie = pt->data_cookie;
10198 dataout_cookie = pt->dataout_cookie;
10199
10200 if (dataout_size) {
10201 sge_flags = dataout_size |
10202 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10203 MPI2_SGE_FLAGS_END_OF_BUFFER |
10204 MPI2_SGE_FLAGS_HOST_TO_IOC |
10205 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10206 MPI2_SGE_FLAGS_SHIFT);
10207 ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
10208 ddi_put32(acc_hdl, &sgep->Address.Low,
10209 (uint32_t)(dataout_cookie.dmac_laddress &
10210 0xffffffffull));
10211 ddi_put32(acc_hdl, &sgep->Address.High,
10212 (uint32_t)(dataout_cookie.dmac_laddress
10213 >> 32));
10214 sgep++;
10215 }
10216 sge_flags = data_size;
10217 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10218 MPI2_SGE_FLAGS_LAST_ELEMENT |
10219 MPI2_SGE_FLAGS_END_OF_BUFFER |
10220 MPI2_SGE_FLAGS_END_OF_LIST |
10221 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10222 MPI2_SGE_FLAGS_SHIFT);
10223 if (pt->direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10224 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
10225 MPI2_SGE_FLAGS_SHIFT);
10226 } else {
10227 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
10228 MPI2_SGE_FLAGS_SHIFT);
10229 }
10230 ddi_put32(acc_hdl, &sgep->FlagsLength,
10231 sge_flags);
10232 ddi_put32(acc_hdl, &sgep->Address.Low,
10233 (uint32_t)(data_cookie.dmac_laddress &
10234 0xffffffffull));
10235 ddi_put32(acc_hdl, &sgep->Address.High,
10236 (uint32_t)(data_cookie.dmac_laddress >> 32));
10237 }
10238
10239 static void
10240 mptsas_passthru_ieee_sge(ddi_acc_handle_t acc_hdl, mptsas_pt_request_t *pt,
10241 pMpi2IeeeSgeSimple64_t ieeesgep)
10242 {
10243 uint8_t sge_flags;
10244 uint32_t data_size, dataout_size;
10245 ddi_dma_cookie_t data_cookie;
10246 ddi_dma_cookie_t dataout_cookie;
10247
10248 data_size = pt->data_size;
10249 dataout_size = pt->dataout_size;
10250 data_cookie = pt->data_cookie;
10251 dataout_cookie = pt->dataout_cookie;
10252
10253 sge_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
10254 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
10255 if (dataout_size) {
10256 ddi_put32(acc_hdl, &ieeesgep->Length, dataout_size);
10257 ddi_put32(acc_hdl, &ieeesgep->Address.Low,
10258 (uint32_t)(dataout_cookie.dmac_laddress &
10259 0xffffffffull));
10260 ddi_put32(acc_hdl, &ieeesgep->Address.High,
10261 (uint32_t)(dataout_cookie.dmac_laddress >> 32));
10262 ddi_put8(acc_hdl, &ieeesgep->Flags, sge_flags);
10263 ieeesgep++;
10264 }
10265 sge_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
10266 ddi_put32(acc_hdl, &ieeesgep->Length, data_size);
10267 ddi_put32(acc_hdl, &ieeesgep->Address.Low,
10268 (uint32_t)(data_cookie.dmac_laddress & 0xffffffffull));
10269 ddi_put32(acc_hdl, &ieeesgep->Address.High,
10270 (uint32_t)(data_cookie.dmac_laddress >> 32));
10271 ddi_put8(acc_hdl, &ieeesgep->Flags, sge_flags);
10272 }
10273
10274 static void
10275 mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd)
10276 {
10277 caddr_t memp;
10278 pMPI2RequestHeader_t request_hdrp;
10279 struct scsi_pkt *pkt = cmd->cmd_pkt;
10280 mptsas_pt_request_t *pt = pkt->pkt_ha_private;
10281 uint32_t request_size;
10282 uint32_t i;
10283 uint64_t request_desc = 0;
10284 uint8_t desc_type;
10285 uint16_t SMID;
10286 uint8_t *request, function;
10287 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
10288 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
10289
10290 desc_type = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10291
10292 request = pt->request;
10293 request_size = pt->request_size;
10294
10295 SMID = cmd->cmd_slot;
10296
10297 /*
10298 * Store the passthrough message in memory location
10299 * corresponding to our slot number
10300 */
10301 memp = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
10302 request_hdrp = (pMPI2RequestHeader_t)memp;
10303 bzero(memp, mpt->m_req_frame_size);
10304
10305 for (i = 0; i < request_size; i++) {
10306 bcopy(request + i, memp + i, 1);
10307 }
10308
10309 NDBG15(("mptsas_start_passthru: Func 0x%x, MsgFlags 0x%x, "
10310 "size=%d, in %d, out %d, SMID %d", request_hdrp->Function,
10311 request_hdrp->MsgFlags, request_size,
10312 pt->data_size, pt->dataout_size, SMID));
10313
10314 /*
10315 * Add an SGE, even if the length is zero.
10316 */
10317 if (mpt->m_MPI25 && pt->simple == 0) {
10318 mptsas_passthru_ieee_sge(acc_hdl, pt,
10319 (pMpi2IeeeSgeSimple64_t)
10320 ((uint8_t *)request_hdrp + pt->sgl_offset));
10321 } else {
10322 mptsas_passthru_sge(acc_hdl, pt,
10323 (pMpi2SGESimple64_t)
10324 ((uint8_t *)request_hdrp + pt->sgl_offset));
10325 }
10326
10327 function = request_hdrp->Function;
10328 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10329 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10330 pMpi2SCSIIORequest_t scsi_io_req;
10331 caddr_t arsbuf;
10332 uint8_t ars_size;
10333 uint32_t ars_dmaaddrlow;
10334
10335 NDBG15(("mptsas_start_passthru: Is SCSI IO Req"));
10336 scsi_io_req = (pMpi2SCSIIORequest_t)request_hdrp;
10337
10338 if (cmd->cmd_extrqslen != 0) {
10339 /*
10340 * Mapping of the buffer was done in
10341 * mptsas_do_passthru().
10342 * Calculate the DMA address with the same offset.
10343 */
10344 arsbuf = cmd->cmd_arq_buf;
10345 ars_size = cmd->cmd_extrqslen;
10346 ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
10347 ((uintptr_t)arsbuf - (uintptr_t)mpt->m_req_sense)) &
10348 0xffffffffu;
10349 } else {
10350 arsbuf = mpt->m_req_sense +
10351 (mpt->m_req_sense_size * (SMID-1));
10352 cmd->cmd_arq_buf = arsbuf;
10353 ars_size = mpt->m_req_sense_size;
10354 ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
10355 (mpt->m_req_sense_size * (SMID-1))) &
10356 0xffffffffu;
10357 }
10358 bzero(arsbuf, ars_size);
10359
10360 ddi_put8(acc_hdl, &scsi_io_req->SenseBufferLength, ars_size);
10361 ddi_put32(acc_hdl, &scsi_io_req->SenseBufferLowAddress,
10362 ars_dmaaddrlow);
10363
10364 /*
10365 * Put SGE for data and data_out buffer at the end of
10366 * scsi_io_request message header.(64 bytes in total)
10367 * Set SGLOffset0 value
10368 */
10369 ddi_put8(acc_hdl, &scsi_io_req->SGLOffset0,
10370 offsetof(MPI2_SCSI_IO_REQUEST, SGL) / 4);
10371
10372 /*
10373 * Setup descriptor info. RAID passthrough must use the
10374 * default request descriptor which is already set, so if this
10375 * is a SCSI IO request, change the descriptor to SCSI IO.
10376 */
10377 if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
10378 desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
10379 request_desc = ((uint64_t)ddi_get16(acc_hdl,
10380 &scsi_io_req->DevHandle) << 48);
10381 }
10382 (void) ddi_dma_sync(mpt->m_dma_req_sense_hdl, 0, 0,
10383 DDI_DMA_SYNC_FORDEV);
10384 }
10385
10386 /*
10387 * We must wait till the message has been completed before
10388 * beginning the next message so we wait for this one to
10389 * finish.
10390 */
10391 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
10392 request_desc |= (SMID << 16) + desc_type;
10393 cmd->cmd_rfm = NULL;
10394 MPTSAS_START_CMD(mpt, request_desc);
10395 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
10396 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
10397 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10398 }
10399 }
10400
10401 typedef void (mptsas_pre_f)(mptsas_t *, mptsas_pt_request_t *);
10402 static mptsas_pre_f mpi_pre_ioc_facts;
10403 static mptsas_pre_f mpi_pre_port_facts;
10404 static mptsas_pre_f mpi_pre_fw_download;
10405 static mptsas_pre_f mpi_pre_fw_25_download;
10406 static mptsas_pre_f mpi_pre_fw_upload;
10407 static mptsas_pre_f mpi_pre_fw_25_upload;
10408 static mptsas_pre_f mpi_pre_sata_passthrough;
10409 static mptsas_pre_f mpi_pre_smp_passthrough;
10410 static mptsas_pre_f mpi_pre_config;
10411 static mptsas_pre_f mpi_pre_sas_io_unit_control;
10412 static mptsas_pre_f mpi_pre_scsi_io_req;
10413
10414 /*
10415 * Prepare the pt for a SAS2 FW_DOWNLOAD request.
10416 */
10417 static void
10418 mpi_pre_fw_download(mptsas_t *mpt, mptsas_pt_request_t *pt)
10419 {
10420 pMpi2FWDownloadTCSGE_t tcsge;
10421 pMpi2FWDownloadRequest req;
10422
10423 /*
10424 * If SAS3, call separate function.
10425 */
10426 if (mpt->m_MPI25) {
10427 mpi_pre_fw_25_download(mpt, pt);
10428 return;
10429 }
10430
10431 /*
10432 * User requests should come in with the Transaction
10433 * context element where the SGL will go. Putting the
10434 * SGL after that seems to work, but don't really know
10435 * why. Other drivers tend to create an extra SGL and
10436 * refer to the TCE through that.
10437 */
10438 req = (pMpi2FWDownloadRequest)pt->request;
10439 tcsge = (pMpi2FWDownloadTCSGE_t)&req->SGL;
10440 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10441 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10442 mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10443 }
10444
10445 pt->sgl_offset = offsetof(MPI2_FW_DOWNLOAD_REQUEST, SGL) +
10446 sizeof (*tcsge);
10447 if (pt->request_size != pt->sgl_offset)
10448 NDBG15(("mpi_pre_fw_download(): Incorrect req size, "
10449 "0x%x, should be 0x%x, dataoutsz 0x%x",
10450 (int)pt->request_size, (int)pt->sgl_offset,
10451 (int)pt->dataout_size));
10452 if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY))
10453 NDBG15(("mpi_pre_fw_download(): Incorrect rep size, "
10454 "0x%x, should be 0x%x", pt->data_size,
10455 (int)sizeof (MPI2_FW_DOWNLOAD_REPLY)));
10456 }
10457
10458 /*
10459 * Prepare the pt for a SAS3 FW_DOWNLOAD request.
10460 */
10461 static void
10462 mpi_pre_fw_25_download(mptsas_t *mpt, mptsas_pt_request_t *pt)
10463 {
10464 pMpi2FWDownloadTCSGE_t tcsge;
10465 pMpi2FWDownloadRequest req2;
10466 pMpi25FWDownloadRequest req25;
10467
10468 /*
10469 * User requests should come in with the Transaction
10470 * context element where the SGL will go. The new firmware
10471 * Doesn't use TCE and has space in the main request for
10472 * this information. So move to the right place.
10473 */
10474 req2 = (pMpi2FWDownloadRequest)pt->request;
10475 req25 = (pMpi25FWDownloadRequest)pt->request;
10476 tcsge = (pMpi2FWDownloadTCSGE_t)&req2->SGL;
10477 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10478 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10479 mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10480 }
10481 req25->ImageOffset = tcsge->ImageOffset;
10482 req25->ImageSize = tcsge->ImageSize;
10483
10484 pt->sgl_offset = offsetof(MPI25_FW_DOWNLOAD_REQUEST, SGL);
10485 if (pt->request_size != pt->sgl_offset)
10486 NDBG15(("mpi_pre_fw_25_download(): Incorrect req size, "
10487 "0x%x, should be 0x%x, dataoutsz 0x%x",
10488 pt->request_size, pt->sgl_offset,
10489 pt->dataout_size));
10490 if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY))
10491 NDBG15(("mpi_pre_fw_25_download(): Incorrect rep size, "
10492 "0x%x, should be 0x%x", pt->data_size,
10493 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10494 }
10495
10496 /*
10497 * Prepare the pt for a SAS2 FW_UPLOAD request.
10498 */
10499 static void
10500 mpi_pre_fw_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10501 {
10502 pMpi2FWUploadTCSGE_t tcsge;
10503 pMpi2FWUploadRequest_t req;
10504
10505 /*
10506 * If SAS3, call separate function.
10507 */
10508 if (mpt->m_MPI25) {
10509 mpi_pre_fw_25_upload(mpt, pt);
10510 return;
10511 }
10512
10513 /*
10514 * User requests should come in with the Transaction
10515 * context element where the SGL will go. Putting the
10516 * SGL after that seems to work, but don't really know
10517 * why. Other drivers tend to create an extra SGL and
10518 * refer to the TCE through that.
10519 */
10520 req = (pMpi2FWUploadRequest_t)pt->request;
10521 tcsge = (pMpi2FWUploadTCSGE_t)&req->SGL;
10522 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10523 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10524 mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10525 }
10526
10527 pt->sgl_offset = offsetof(MPI2_FW_UPLOAD_REQUEST, SGL) +
10528 sizeof (*tcsge);
10529 if (pt->request_size != pt->sgl_offset)
10530 NDBG15(("mpi_pre_fw_upload(): Incorrect req size, "
10531 "0x%x, should be 0x%x, dataoutsz 0x%x",
10532 pt->request_size, pt->sgl_offset,
10533 pt->dataout_size));
10534 if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY))
10535 NDBG15(("mpi_pre_fw_upload(): Incorrect rep size, "
10536 "0x%x, should be 0x%x", pt->data_size,
10537 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10538 }
10539
10540 /*
10541 * Prepare the pt a SAS3 FW_UPLOAD request.
10542 */
10543 static void
10544 mpi_pre_fw_25_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10545 {
10546 pMpi2FWUploadTCSGE_t tcsge;
10547 pMpi2FWUploadRequest_t req2;
10548 pMpi25FWUploadRequest_t req25;
10549
10550 /*
10551 * User requests should come in with the Transaction
10552 * context element where the SGL will go. The new firmware
10553 * Doesn't use TCE and has space in the main request for
10554 * this information. So move to the right place.
10555 */
10556 req2 = (pMpi2FWUploadRequest_t)pt->request;
10557 req25 = (pMpi25FWUploadRequest_t)pt->request;
10558 tcsge = (pMpi2FWUploadTCSGE_t)&req2->SGL;
10559 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10560 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10561 mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10562 }
10563 req25->ImageOffset = tcsge->ImageOffset;
10564 req25->ImageSize = tcsge->ImageSize;
10565
10566 pt->sgl_offset = offsetof(MPI25_FW_UPLOAD_REQUEST, SGL);
10567 if (pt->request_size != pt->sgl_offset)
10568 NDBG15(("mpi_pre_fw_25_upload(): Incorrect req size, "
10569 "0x%x, should be 0x%x, dataoutsz 0x%x",
10570 pt->request_size, pt->sgl_offset,
10571 pt->dataout_size));
10572 if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY))
10573 NDBG15(("mpi_pre_fw_25_upload(): Incorrect rep size, "
10574 "0x%x, should be 0x%x", pt->data_size,
10575 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10576 }
10577
10578 /*
10579 * Prepare the pt for an IOC_FACTS request.
10580 */
10581 static void
10582 mpi_pre_ioc_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
10583 {
10584 #ifndef __lock_lint
10585 _NOTE(ARGUNUSED(mpt))
10586 #endif
10587 if (pt->request_size != sizeof (MPI2_IOC_FACTS_REQUEST))
10588 NDBG15(("mpi_pre_ioc_facts(): Incorrect req size, "
10589 "0x%x, should be 0x%x, dataoutsz 0x%x",
10590 pt->request_size,
10591 (int)sizeof (MPI2_IOC_FACTS_REQUEST),
10592 pt->dataout_size));
10593 if (pt->data_size != sizeof (MPI2_IOC_FACTS_REPLY))
10594 NDBG15(("mpi_pre_ioc_facts(): Incorrect rep size, "
10595 "0x%x, should be 0x%x", pt->data_size,
10596 (int)sizeof (MPI2_IOC_FACTS_REPLY)));
10597 pt->sgl_offset = (uint16_t)pt->request_size;
10598 }
10599
10600 /*
10601 * Prepare the pt for a PORT_FACTS request.
10602 */
10603 static void
10604 mpi_pre_port_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
10605 {
10606 #ifndef __lock_lint
10607 _NOTE(ARGUNUSED(mpt))
10608 #endif
10609 if (pt->request_size != sizeof (MPI2_PORT_FACTS_REQUEST))
10610 NDBG15(("mpi_pre_port_facts(): Incorrect req size, "
10611 "0x%x, should be 0x%x, dataoutsz 0x%x",
10612 pt->request_size,
10613 (int)sizeof (MPI2_PORT_FACTS_REQUEST),
10614 pt->dataout_size));
10615 if (pt->data_size != sizeof (MPI2_PORT_FACTS_REPLY))
10616 NDBG15(("mpi_pre_port_facts(): Incorrect rep size, "
10617 "0x%x, should be 0x%x", pt->data_size,
10618 (int)sizeof (MPI2_PORT_FACTS_REPLY)));
10619 pt->sgl_offset = (uint16_t)pt->request_size;
10620 }
10621
10622 /*
10623 * Prepare pt for a SATA_PASSTHROUGH request.
10624 */
10625 static void
10626 mpi_pre_sata_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
10627 {
10628 #ifndef __lock_lint
10629 _NOTE(ARGUNUSED(mpt))
10630 #endif
10631 pt->sgl_offset = offsetof(MPI2_SATA_PASSTHROUGH_REQUEST, SGL);
10632 if (pt->request_size != pt->sgl_offset)
10633 NDBG15(("mpi_pre_sata_passthrough(): Incorrect req size, "
10634 "0x%x, should be 0x%x, dataoutsz 0x%x",
10635 pt->request_size, pt->sgl_offset,
10636 pt->dataout_size));
10637 if (pt->data_size != sizeof (MPI2_SATA_PASSTHROUGH_REPLY))
10638 NDBG15(("mpi_pre_sata_passthrough(): Incorrect rep size, "
10639 "0x%x, should be 0x%x", pt->data_size,
10640 (int)sizeof (MPI2_SATA_PASSTHROUGH_REPLY)));
10641 }
10642
10643 static void
10644 mpi_pre_smp_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
10645 {
10646 #ifndef __lock_lint
10647 _NOTE(ARGUNUSED(mpt))
10648 #endif
10649 pt->sgl_offset = offsetof(MPI2_SMP_PASSTHROUGH_REQUEST, SGL);
10650 if (pt->request_size != pt->sgl_offset)
10651 NDBG15(("mpi_pre_smp_passthrough(): Incorrect req size, "
10652 "0x%x, should be 0x%x, dataoutsz 0x%x",
10653 pt->request_size, pt->sgl_offset,
10654 pt->dataout_size));
10655 if (pt->data_size != sizeof (MPI2_SMP_PASSTHROUGH_REPLY))
10656 NDBG15(("mpi_pre_smp_passthrough(): Incorrect rep size, "
10657 "0x%x, should be 0x%x", pt->data_size,
10658 (int)sizeof (MPI2_SMP_PASSTHROUGH_REPLY)));
10659 }
10660
10661 /*
10662 * Prepare pt for a CONFIG request.
10663 */
10664 static void
10665 mpi_pre_config(mptsas_t *mpt, mptsas_pt_request_t *pt)
10666 {
10667 #ifndef __lock_lint
10668 _NOTE(ARGUNUSED(mpt))
10669 #endif
10670 pt->sgl_offset = offsetof(MPI2_CONFIG_REQUEST, PageBufferSGE);
10671 if (pt->request_size != pt->sgl_offset)
10672 NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
10673 "should be 0x%x, dataoutsz 0x%x", pt->request_size,
10674 pt->sgl_offset, pt->dataout_size));
10675 if (pt->data_size != sizeof (MPI2_CONFIG_REPLY))
10676 NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
10677 "should be 0x%x", pt->data_size,
10678 (int)sizeof (MPI2_CONFIG_REPLY)));
10679 pt->simple = 1;
10680 }
10681
10682 /*
10683 * Prepare pt for a SCSI_IO_REQ request.
10684 */
10685 static void
10686 mpi_pre_scsi_io_req(mptsas_t *mpt, mptsas_pt_request_t *pt)
10687 {
10688 #ifndef __lock_lint
10689 _NOTE(ARGUNUSED(mpt))
10690 #endif
10691 pt->sgl_offset = offsetof(MPI2_SCSI_IO_REQUEST, SGL);
10692 if (pt->request_size != pt->sgl_offset)
10693 NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
10694 "should be 0x%x, dataoutsz 0x%x", pt->request_size,
10695 pt->sgl_offset,
10696 pt->dataout_size));
10697 if (pt->data_size != sizeof (MPI2_SCSI_IO_REPLY))
10698 NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
10699 "should be 0x%x", pt->data_size,
10700 (int)sizeof (MPI2_SCSI_IO_REPLY)));
10701 }
10702
10703 /*
10704 * Prepare the mptsas_cmd for a SAS_IO_UNIT_CONTROL request.
10705 */
10706 static void
10707 mpi_pre_sas_io_unit_control(mptsas_t *mpt, mptsas_pt_request_t *pt)
10708 {
10709 #ifndef __lock_lint
10710 _NOTE(ARGUNUSED(mpt))
10711 #endif
10712 pt->sgl_offset = (uint16_t)pt->request_size;
10713 }
10714
10715 /*
10716 * A set of functions to prepare an mptsas_cmd for the various
10717 * supported requests.
10718 */
10719 static struct mptsas_func {
10720 U8 Function;
10721 char *Name;
10722 mptsas_pre_f *f_pre;
10723 } mptsas_func_list[] = {
10724 { MPI2_FUNCTION_IOC_FACTS, "IOC_FACTS", mpi_pre_ioc_facts },
10725 { MPI2_FUNCTION_PORT_FACTS, "PORT_FACTS", mpi_pre_port_facts },
10726 { MPI2_FUNCTION_FW_DOWNLOAD, "FW_DOWNLOAD", mpi_pre_fw_download },
10727 { MPI2_FUNCTION_FW_UPLOAD, "FW_UPLOAD", mpi_pre_fw_upload },
10728 { MPI2_FUNCTION_SATA_PASSTHROUGH, "SATA_PASSTHROUGH",
10729 mpi_pre_sata_passthrough },
10730 { MPI2_FUNCTION_SMP_PASSTHROUGH, "SMP_PASSTHROUGH",
10731 mpi_pre_smp_passthrough},
10732 { MPI2_FUNCTION_SCSI_IO_REQUEST, "SCSI_IO_REQUEST",
10733 mpi_pre_scsi_io_req},
10734 { MPI2_FUNCTION_CONFIG, "CONFIG", mpi_pre_config},
10735 { MPI2_FUNCTION_SAS_IO_UNIT_CONTROL, "SAS_IO_UNIT_CONTROL",
10736 mpi_pre_sas_io_unit_control },
10737 { 0xFF, NULL, NULL } /* list end */
10738 };
10739
10740 static void
10741 mptsas_prep_sgl_offset(mptsas_t *mpt, mptsas_pt_request_t *pt)
10742 {
10743 pMPI2RequestHeader_t hdr;
10744 struct mptsas_func *f;
10745
10746 hdr = (pMPI2RequestHeader_t)pt->request;
10747
10748 for (f = mptsas_func_list; f->f_pre != NULL; f++) {
10749 if (hdr->Function == f->Function) {
10750 f->f_pre(mpt, pt);
10751 NDBG15(("mptsas_prep_sgl_offset: Function %s,"
10752 " sgl_offset 0x%x", f->Name,
10753 pt->sgl_offset));
10754 return;
10755 }
10756 }
10757 NDBG15(("mptsas_prep_sgl_offset: Unknown Function 0x%02x,"
10758 " returning req_size 0x%x for sgl_offset",
10759 hdr->Function, pt->request_size));
10760 pt->sgl_offset = (uint16_t)pt->request_size;
10761 }
10762
10763
10764 static int
10765 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
10766 uint8_t *data, uint32_t request_size, uint32_t reply_size,
10767 uint32_t data_size, uint32_t direction, uint8_t *dataout,
10768 uint32_t dataout_size, short timeout, int mode)
10769 {
10770 mptsas_pt_request_t pt;
10771 mptsas_dma_alloc_state_t data_dma_state;
10772 mptsas_dma_alloc_state_t dataout_dma_state;
10773 caddr_t memp;
10774 mptsas_cmd_t *cmd = NULL;
10775 struct scsi_pkt *pkt;
10776 uint32_t reply_len = 0, sense_len = 0;
10777 pMPI2RequestHeader_t request_hdrp;
10778 pMPI2RequestHeader_t request_msg;
10779 pMPI2DefaultReply_t reply_msg;
10780 Mpi2SCSIIOReply_t rep_msg;
10781 int rvalue;
10782 int i, status = 0, pt_flags = 0, rv = 0;
10783 uint8_t function;
10784
10785 ASSERT(mutex_owned(&mpt->m_mutex));
10786
10787 reply_msg = (pMPI2DefaultReply_t)(&rep_msg);
10788 bzero(reply_msg, sizeof (MPI2_DEFAULT_REPLY));
10789 request_msg = kmem_zalloc(request_size, KM_SLEEP);
10790
10791 mutex_exit(&mpt->m_mutex);
10792 /*
10793 * copy in the request buffer since it could be used by
10794 * another thread when the pt request into waitq
10795 */
10796 if (ddi_copyin(request, request_msg, request_size, mode)) {
10797 mutex_enter(&mpt->m_mutex);
10798 status = EFAULT;
10799 mptsas_log(mpt, CE_WARN, "failed to copy request data");
10800 goto out;
10801 }
10802 NDBG27(("mptsas_do_passthru: mode 0x%x, size 0x%x, Func 0x%x",
10803 mode, request_size, request_msg->Function));
10804 mutex_enter(&mpt->m_mutex);
10805
10806 function = request_msg->Function;
10807 if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
10808 pMpi2SCSITaskManagementRequest_t task;
10809 task = (pMpi2SCSITaskManagementRequest_t)request_msg;
10810 mptsas_setup_bus_reset_delay(mpt);
10811 rv = mptsas_ioc_task_management(mpt, task->TaskType,
10812 task->DevHandle, (int)task->LUN[1], reply, reply_size,
10813 mode);
10814
10815 if (rv != TRUE) {
10816 status = EIO;
10817 mptsas_log(mpt, CE_WARN, "task management failed");
10818 }
10819 goto out;
10820 }
10821
10822 if (data_size != 0) {
10823 data_dma_state.size = data_size;
10824 if (mptsas_dma_alloc(mpt, &data_dma_state) != DDI_SUCCESS) {
10825 status = ENOMEM;
10826 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
10827 "resource");
10828 goto out;
10829 }
10830 pt_flags |= MPTSAS_DATA_ALLOCATED;
10831 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10832 mutex_exit(&mpt->m_mutex);
10833 for (i = 0; i < data_size; i++) {
10834 if (ddi_copyin(data + i, (uint8_t *)
10835 data_dma_state.memp + i, 1, mode)) {
10836 mutex_enter(&mpt->m_mutex);
10837 status = EFAULT;
10838 mptsas_log(mpt, CE_WARN, "failed to "
10839 "copy read data");
10840 goto out;
10841 }
10842 }
10843 mutex_enter(&mpt->m_mutex);
10844 }
10845 } else {
10846 bzero(&data_dma_state, sizeof (data_dma_state));
10847 }
10848
10849 if (dataout_size != 0) {
10850 dataout_dma_state.size = dataout_size;
10851 if (mptsas_dma_alloc(mpt, &dataout_dma_state) != DDI_SUCCESS) {
10852 status = ENOMEM;
10853 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
10854 "resource");
10855 goto out;
10856 }
10857 pt_flags |= MPTSAS_DATAOUT_ALLOCATED;
10858 mutex_exit(&mpt->m_mutex);
10859 for (i = 0; i < dataout_size; i++) {
10860 if (ddi_copyin(dataout + i, (uint8_t *)
10861 dataout_dma_state.memp + i, 1, mode)) {
10862 mutex_enter(&mpt->m_mutex);
10863 mptsas_log(mpt, CE_WARN, "failed to copy out"
10864 " data");
10865 status = EFAULT;
10866 goto out;
10867 }
10868 }
10869 mutex_enter(&mpt->m_mutex);
10870 } else {
10871 bzero(&dataout_dma_state, sizeof (dataout_dma_state));
10872 }
10873
10874 if ((rvalue = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10875 status = EAGAIN;
10876 mptsas_log(mpt, CE_NOTE, "event ack command pool is full");
10877 goto out;
10878 }
10879 pt_flags |= MPTSAS_REQUEST_POOL_CMD;
10880
10881 bzero((caddr_t)cmd, sizeof (*cmd));
10882 bzero((caddr_t)pkt, scsi_pkt_size());
10883 bzero((caddr_t)&pt, sizeof (pt));
10884
10885 cmd->ioc_cmd_slot = (uint32_t)(rvalue);
10886
10887 pt.request = (uint8_t *)request_msg;
10888 pt.direction = direction;
10889 pt.simple = 0;
10890 pt.request_size = request_size;
10891 pt.data_size = data_size;
10892 pt.dataout_size = dataout_size;
10893 pt.data_cookie = data_dma_state.cookie;
10894 pt.dataout_cookie = dataout_dma_state.cookie;
10895 mptsas_prep_sgl_offset(mpt, &pt);
10896
10897 /*
10898 * Form a blank cmd/pkt to store the acknowledgement message
10899 */
10900 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb[0];
10901 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
10902 pkt->pkt_ha_private = (opaque_t)&pt;
10903 pkt->pkt_flags = FLAG_HEAD;
10904 pkt->pkt_time = timeout;
10905 cmd->cmd_pkt = pkt;
10906 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_PASSTHRU;
10907
10908 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10909 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10910 uint8_t com, cdb_group_id;
10911 boolean_t ret;
10912
10913 pkt->pkt_cdbp = ((pMpi2SCSIIORequest_t)request_msg)->CDB.CDB32;
10914 com = pkt->pkt_cdbp[0];
10915 cdb_group_id = CDB_GROUPID(com);
10916 switch (cdb_group_id) {
10917 case CDB_GROUPID_0: cmd->cmd_cdblen = CDB_GROUP0; break;
10918 case CDB_GROUPID_1: cmd->cmd_cdblen = CDB_GROUP1; break;
10919 case CDB_GROUPID_2: cmd->cmd_cdblen = CDB_GROUP2; break;
10920 case CDB_GROUPID_4: cmd->cmd_cdblen = CDB_GROUP4; break;
10921 case CDB_GROUPID_5: cmd->cmd_cdblen = CDB_GROUP5; break;
10922 default:
10923 NDBG27(("mptsas_do_passthru: SCSI_IO, reserved "
10924 "CDBGROUP 0x%x requested!", cdb_group_id));
10925 break;
10926 }
10927
10928 reply_len = sizeof (MPI2_SCSI_IO_REPLY);
10929 sense_len = reply_size - reply_len;
10930 ret = mptsas_cmdarqsize(mpt, cmd, sense_len, KM_SLEEP);
10931 VERIFY(ret == B_TRUE);
10932 } else {
10933 reply_len = reply_size;
10934 sense_len = 0;
10935 }
10936
10937 NDBG27(("mptsas_do_passthru: %s, dsz 0x%x, dosz 0x%x, replen 0x%x, "
10938 "snslen 0x%x",
10939 (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE)?"Write":"Read",
10940 data_size, dataout_size, reply_len, sense_len));
10941
10942 /*
10943 * Save the command in a slot
10944 */
10945 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10946 /*
10947 * Once passthru command get slot, set cmd_flags
10948 * CFLAG_PREPARED.
10949 */
10950 cmd->cmd_flags |= CFLAG_PREPARED;
10951 mptsas_start_passthru(mpt, cmd);
10952 } else {
10953 mptsas_waitq_add(mpt, cmd);
10954 }
10955
10956 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10957 cv_wait(&mpt->m_passthru_cv, &mpt->m_mutex);
10958 }
10959
10960 NDBG27(("mptsas_do_passthru: Cmd complete, flags 0x%x, rfm 0x%x "
10961 "pktreason 0x%x", cmd->cmd_flags, cmd->cmd_rfm,
10962 pkt->pkt_reason));
10963
10964 if (cmd->cmd_flags & CFLAG_PREPARED) {
10965 memp = mpt->m_req_frame + (mpt->m_req_frame_size *
10966 cmd->cmd_slot);
10967 request_hdrp = (pMPI2RequestHeader_t)memp;
10968 }
10969
10970 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10971 status = ETIMEDOUT;
10972 mptsas_log(mpt, CE_WARN, "passthrough command timeout");
10973 pt_flags |= MPTSAS_CMD_TIMEOUT;
10974 goto out;
10975 }
10976
10977 if (cmd->cmd_rfm) {
10978 /*
10979 * cmd_rfm is zero means the command reply is a CONTEXT
10980 * reply and no PCI Write to post the free reply SMFA
10981 * because no reply message frame is used.
10982 * cmd_rfm is non-zero means the reply is a ADDRESS
10983 * reply and reply message frame is used.
10984 */
10985 pt_flags |= MPTSAS_ADDRESS_REPLY;
10986 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10987 DDI_DMA_SYNC_FORCPU);
10988 reply_msg = (pMPI2DefaultReply_t)
10989 (mpt->m_reply_frame + (cmd->cmd_rfm -
10990 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
10991 }
10992
10993 mptsas_fma_check(mpt, cmd);
10994 if (pkt->pkt_reason == CMD_TRAN_ERR) {
10995 status = EAGAIN;
10996 mptsas_log(mpt, CE_WARN, "passthru fma error");
10997 goto out;
10998 }
10999 if (pkt->pkt_reason == CMD_RESET) {
11000 status = EAGAIN;
11001 mptsas_log(mpt, CE_WARN, "ioc reset abort passthru");
11002 goto out;
11003 }
11004
11005 if (pkt->pkt_reason == CMD_INCOMPLETE) {
11006 status = EIO;
11007 mptsas_log(mpt, CE_WARN, "passthrough command incomplete");
11008 goto out;
11009 }
11010
11011 mutex_exit(&mpt->m_mutex);
11012 if (cmd->cmd_flags & CFLAG_PREPARED) {
11013 function = request_hdrp->Function;
11014 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
11015 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
11016 reply_len = sizeof (MPI2_SCSI_IO_REPLY);
11017 sense_len = cmd->cmd_extrqslen ?
11018 min(sense_len, cmd->cmd_extrqslen) :
11019 min(sense_len, cmd->cmd_rqslen);
11020 } else {
11021 reply_len = reply_size;
11022 sense_len = 0;
11023 }
11024
11025 for (i = 0; i < reply_len; i++) {
11026 if (ddi_copyout((uint8_t *)reply_msg + i, reply + i, 1,
11027 mode)) {
11028 mutex_enter(&mpt->m_mutex);
11029 status = EFAULT;
11030 mptsas_log(mpt, CE_WARN, "failed to copy out "
11031 "reply data");
11032 goto out;
11033 }
11034 }
11035 for (i = 0; i < sense_len; i++) {
11036 if (ddi_copyout((uint8_t *)request_hdrp + 64 + i,
11037 reply + reply_len + i, 1, mode)) {
11038 mutex_enter(&mpt->m_mutex);
11039 status = EFAULT;
11040 mptsas_log(mpt, CE_WARN, "failed to copy out "
11041 "sense data");
11042 goto out;
11043 }
11044 }
11045 }
11046
11047 if (data_size) {
11048 if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) {
11049 (void) ddi_dma_sync(data_dma_state.handle, 0, 0,
11050 DDI_DMA_SYNC_FORCPU);
11051 for (i = 0; i < data_size; i++) {
11052 if (ddi_copyout((uint8_t *)(
11053 data_dma_state.memp + i), data + i, 1,
11054 mode)) {
11055 mutex_enter(&mpt->m_mutex);
11056 status = EFAULT;
11057 mptsas_log(mpt, CE_WARN, "failed to "
11058 "copy out the reply data");
11059 goto out;
11060 }
11061 }
11062 }
11063 }
11064 mutex_enter(&mpt->m_mutex);
11065 out:
11066 /*
11067 * Put the reply frame back on the free queue, increment the free
11068 * index, and write the new index to the free index register. But only
11069 * if this reply is an ADDRESS reply.
11070 */
11071 if (pt_flags & MPTSAS_ADDRESS_REPLY) {
11072 ddi_put32(mpt->m_acc_free_queue_hdl,
11073 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11074 cmd->cmd_rfm);
11075 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11076 DDI_DMA_SYNC_FORDEV);
11077 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11078 mpt->m_free_index = 0;
11079 }
11080 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11081 mpt->m_free_index);
11082 }
11083 if (cmd) {
11084 if (cmd->cmd_extrqslen != 0) {
11085 rmfree(mpt->m_erqsense_map, cmd->cmd_extrqschunks,
11086 cmd->cmd_extrqsidx + 1);
11087 }
11088 if (cmd->cmd_flags & CFLAG_PREPARED) {
11089 mptsas_remove_cmd(mpt, cmd);
11090 pt_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11091 }
11092 }
11093 if (pt_flags & MPTSAS_REQUEST_POOL_CMD)
11094 mptsas_return_to_pool(mpt, cmd);
11095 if (pt_flags & MPTSAS_DATA_ALLOCATED) {
11096 if (mptsas_check_dma_handle(data_dma_state.handle) !=
11097 DDI_SUCCESS) {
11098 ddi_fm_service_impact(mpt->m_dip,
11099 DDI_SERVICE_UNAFFECTED);
11100 status = EFAULT;
11101 }
11102 mptsas_dma_free(&data_dma_state);
11103 }
11104 if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
11105 if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
11106 DDI_SUCCESS) {
11107 ddi_fm_service_impact(mpt->m_dip,
11108 DDI_SERVICE_UNAFFECTED);
11109 status = EFAULT;
11110 }
11111 mptsas_dma_free(&dataout_dma_state);
11112 }
11113 if (pt_flags & MPTSAS_CMD_TIMEOUT) {
11114 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
11115 mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
11116 }
11117 }
11118 if (request_msg)
11119 kmem_free(request_msg, request_size);
11120 NDBG27(("mptsas_do_passthru: Done status 0x%x", status));
11121
11122 return (status);
11123 }
11124
11125 static int
11126 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
11127 {
11128 /*
11129 * If timeout is 0, set timeout to default of 60 seconds.
11130 */
11131 if (data->Timeout == 0) {
11132 data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
11133 }
11134
11135 if (((data->DataSize == 0) &&
11136 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_NONE)) ||
11137 ((data->DataSize != 0) &&
11138 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_READ) ||
11139 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_WRITE) ||
11140 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) &&
11141 (data->DataOutSize != 0))))) {
11142 if (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) {
11143 data->DataDirection = MPTSAS_PASS_THRU_DIRECTION_READ;
11144 } else {
11145 data->DataOutSize = 0;
11146 }
11147 /*
11148 * Send passthru request messages
11149 */
11150 return (mptsas_do_passthru(mpt,
11151 (uint8_t *)((uintptr_t)data->PtrRequest),
11152 (uint8_t *)((uintptr_t)data->PtrReply),
11153 (uint8_t *)((uintptr_t)data->PtrData),
11154 data->RequestSize, data->ReplySize,
11155 data->DataSize, data->DataDirection,
11156 (uint8_t *)((uintptr_t)data->PtrDataOut),
11157 data->DataOutSize, data->Timeout, mode));
11158 } else {
11159 return (EINVAL);
11160 }
11161 }
11162
11163 static uint8_t
11164 mptsas_get_fw_diag_buffer_number(mptsas_t *mpt, uint32_t unique_id)
11165 {
11166 uint8_t index;
11167
11168 for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
11169 if (mpt->m_fw_diag_buffer_list[index].unique_id == unique_id) {
11170 return (index);
11171 }
11172 }
11173
11174 return (MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND);
11175 }
11176
11177 static void
11178 mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd)
11179 {
11180 pMpi2DiagBufferPostRequest_t pDiag_post_msg;
11181 pMpi2DiagReleaseRequest_t pDiag_release_msg;
11182 struct scsi_pkt *pkt = cmd->cmd_pkt;
11183 mptsas_diag_request_t *diag = pkt->pkt_ha_private;
11184 uint32_t i;
11185 uint64_t request_desc;
11186
11187 ASSERT(mutex_owned(&mpt->m_mutex));
11188
11189 /*
11190 * Form the diag message depending on the post or release function.
11191 */
11192 if (diag->function == MPI2_FUNCTION_DIAG_BUFFER_POST) {
11193 pDiag_post_msg = (pMpi2DiagBufferPostRequest_t)
11194 (mpt->m_req_frame + (mpt->m_req_frame_size *
11195 cmd->cmd_slot));
11196 bzero(pDiag_post_msg, mpt->m_req_frame_size);
11197 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->Function,
11198 diag->function);
11199 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->BufferType,
11200 diag->pBuffer->buffer_type);
11201 ddi_put8(mpt->m_acc_req_frame_hdl,
11202 &pDiag_post_msg->ExtendedType,
11203 diag->pBuffer->extended_type);
11204 ddi_put32(mpt->m_acc_req_frame_hdl,
11205 &pDiag_post_msg->BufferLength,
11206 diag->pBuffer->buffer_data.size);
11207 for (i = 0; i < (sizeof (pDiag_post_msg->ProductSpecific) / 4);
11208 i++) {
11209 ddi_put32(mpt->m_acc_req_frame_hdl,
11210 &pDiag_post_msg->ProductSpecific[i],
11211 diag->pBuffer->product_specific[i]);
11212 }
11213 ddi_put32(mpt->m_acc_req_frame_hdl,
11214 &pDiag_post_msg->BufferAddress.Low,
11215 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
11216 & 0xffffffffull));
11217 ddi_put32(mpt->m_acc_req_frame_hdl,
11218 &pDiag_post_msg->BufferAddress.High,
11219 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
11220 >> 32));
11221 } else {
11222 pDiag_release_msg = (pMpi2DiagReleaseRequest_t)
11223 (mpt->m_req_frame + (mpt->m_req_frame_size *
11224 cmd->cmd_slot));
11225 bzero(pDiag_release_msg, mpt->m_req_frame_size);
11226 ddi_put8(mpt->m_acc_req_frame_hdl,
11227 &pDiag_release_msg->Function, diag->function);
11228 ddi_put8(mpt->m_acc_req_frame_hdl,
11229 &pDiag_release_msg->BufferType,
11230 diag->pBuffer->buffer_type);
11231 }
11232
11233 /*
11234 * Send the message
11235 */
11236 (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
11237 DDI_DMA_SYNC_FORDEV);
11238 request_desc = (cmd->cmd_slot << 16) +
11239 MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
11240 cmd->cmd_rfm = NULL;
11241 MPTSAS_START_CMD(mpt, request_desc);
11242 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
11243 DDI_SUCCESS) ||
11244 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
11245 DDI_SUCCESS)) {
11246 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11247 }
11248 }
11249
11250 static int
11251 mptsas_post_fw_diag_buffer(mptsas_t *mpt,
11252 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
11253 {
11254 mptsas_diag_request_t diag;
11255 int status, slot_num, post_flags = 0;
11256 mptsas_cmd_t *cmd = NULL;
11257 struct scsi_pkt *pkt;
11258 pMpi2DiagBufferPostReply_t reply;
11259 uint16_t iocstatus;
11260 uint32_t iocloginfo, transfer_length;
11261
11262 /*
11263 * If buffer is not enabled, just leave.
11264 */
11265 *return_code = MPTSAS_FW_DIAG_ERROR_POST_FAILED;
11266 if (!pBuffer->enabled) {
11267 status = DDI_FAILURE;
11268 goto out;
11269 }
11270
11271 /*
11272 * Clear some flags initially.
11273 */
11274 pBuffer->force_release = FALSE;
11275 pBuffer->valid_data = FALSE;
11276 pBuffer->owned_by_firmware = FALSE;
11277
11278 /*
11279 * Get a cmd buffer from the cmd buffer pool
11280 */
11281 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11282 status = DDI_FAILURE;
11283 mptsas_log(mpt, CE_NOTE, "command pool is full: Post FW Diag");
11284 goto out;
11285 }
11286 post_flags |= MPTSAS_REQUEST_POOL_CMD;
11287
11288 bzero((caddr_t)cmd, sizeof (*cmd));
11289 bzero((caddr_t)pkt, scsi_pkt_size());
11290
11291 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
11292
11293 diag.pBuffer = pBuffer;
11294 diag.function = MPI2_FUNCTION_DIAG_BUFFER_POST;
11295
11296 /*
11297 * Form a blank cmd/pkt to store the acknowledgement message
11298 */
11299 pkt->pkt_ha_private = (opaque_t)&diag;
11300 pkt->pkt_flags = FLAG_HEAD;
11301 pkt->pkt_time = 60;
11302 cmd->cmd_pkt = pkt;
11303 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
11304
11305 /*
11306 * Save the command in a slot
11307 */
11308 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11309 /*
11310 * Once passthru command get slot, set cmd_flags
11311 * CFLAG_PREPARED.
11312 */
11313 cmd->cmd_flags |= CFLAG_PREPARED;
11314 mptsas_start_diag(mpt, cmd);
11315 } else {
11316 mptsas_waitq_add(mpt, cmd);
11317 }
11318
11319 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11320 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
11321 }
11322
11323 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11324 status = DDI_FAILURE;
11325 mptsas_log(mpt, CE_WARN, "Post FW Diag command timeout");
11326 goto out;
11327 }
11328
11329 /*
11330 * cmd_rfm points to the reply message if a reply was given. Check the
11331 * IOCStatus to make sure everything went OK with the FW diag request
11332 * and set buffer flags.
11333 */
11334 if (cmd->cmd_rfm) {
11335 post_flags |= MPTSAS_ADDRESS_REPLY;
11336 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11337 DDI_DMA_SYNC_FORCPU);
11338 reply = (pMpi2DiagBufferPostReply_t)(mpt->m_reply_frame +
11339 (cmd->cmd_rfm -
11340 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
11341
11342 /*
11343 * Get the reply message data
11344 */
11345 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
11346 &reply->IOCStatus);
11347 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
11348 &reply->IOCLogInfo);
11349 transfer_length = ddi_get32(mpt->m_acc_reply_frame_hdl,
11350 &reply->TransferLength);
11351
11352 /*
11353 * If post failed quit.
11354 */
11355 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
11356 status = DDI_FAILURE;
11357 NDBG13(("post FW Diag Buffer failed: IOCStatus=0x%x, "
11358 "IOCLogInfo=0x%x, TransferLength=0x%x", iocstatus,
11359 iocloginfo, transfer_length));
11360 goto out;
11361 }
11362
11363 /*
11364 * Post was successful.
11365 */
11366 pBuffer->valid_data = TRUE;
11367 pBuffer->owned_by_firmware = TRUE;
11368 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11369 status = DDI_SUCCESS;
11370 }
11371
11372 out:
11373 /*
11374 * Put the reply frame back on the free queue, increment the free
11375 * index, and write the new index to the free index register. But only
11376 * if this reply is an ADDRESS reply.
11377 */
11378 if (post_flags & MPTSAS_ADDRESS_REPLY) {
11379 ddi_put32(mpt->m_acc_free_queue_hdl,
11380 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11381 cmd->cmd_rfm);
11382 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11383 DDI_DMA_SYNC_FORDEV);
11384 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11385 mpt->m_free_index = 0;
11386 }
11387 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11388 mpt->m_free_index);
11389 }
11390 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
11391 mptsas_remove_cmd(mpt, cmd);
11392 post_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11393 }
11394 if (post_flags & MPTSAS_REQUEST_POOL_CMD) {
11395 mptsas_return_to_pool(mpt, cmd);
11396 }
11397
11398 return (status);
11399 }
11400
11401 static int
11402 mptsas_release_fw_diag_buffer(mptsas_t *mpt,
11403 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
11404 uint32_t diag_type)
11405 {
11406 mptsas_diag_request_t diag;
11407 int status, slot_num, rel_flags = 0;
11408 mptsas_cmd_t *cmd = NULL;
11409 struct scsi_pkt *pkt;
11410 pMpi2DiagReleaseReply_t reply;
11411 uint16_t iocstatus;
11412 uint32_t iocloginfo;
11413
11414 /*
11415 * If buffer is not enabled, just leave.
11416 */
11417 *return_code = MPTSAS_FW_DIAG_ERROR_RELEASE_FAILED;
11418 if (!pBuffer->enabled) {
11419 mptsas_log(mpt, CE_NOTE, "This buffer type is not supported "
11420 "by the IOC");
11421 status = DDI_FAILURE;
11422 goto out;
11423 }
11424
11425 /*
11426 * Clear some flags initially.
11427 */
11428 pBuffer->force_release = FALSE;
11429 pBuffer->valid_data = FALSE;
11430 pBuffer->owned_by_firmware = FALSE;
11431
11432 /*
11433 * Get a cmd buffer from the cmd buffer pool
11434 */
11435 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11436 status = DDI_FAILURE;
11437 mptsas_log(mpt, CE_NOTE, "command pool is full: Release FW "
11438 "Diag");
11439 goto out;
11440 }
11441 rel_flags |= MPTSAS_REQUEST_POOL_CMD;
11442
11443 bzero((caddr_t)cmd, sizeof (*cmd));
11444 bzero((caddr_t)pkt, scsi_pkt_size());
11445
11446 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
11447
11448 diag.pBuffer = pBuffer;
11449 diag.function = MPI2_FUNCTION_DIAG_RELEASE;
11450
11451 /*
11452 * Form a blank cmd/pkt to store the acknowledgement message
11453 */
11454 pkt->pkt_ha_private = (opaque_t)&diag;
11455 pkt->pkt_flags = FLAG_HEAD;
11456 pkt->pkt_time = 60;
11457 cmd->cmd_pkt = pkt;
11458 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
11459
11460 /*
11461 * Save the command in a slot
11462 */
11463 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11464 /*
11465 * Once passthru command get slot, set cmd_flags
11466 * CFLAG_PREPARED.
11467 */
11468 cmd->cmd_flags |= CFLAG_PREPARED;
11469 mptsas_start_diag(mpt, cmd);
11470 } else {
11471 mptsas_waitq_add(mpt, cmd);
11472 }
11473
11474 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11475 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
11476 }
11477
11478 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11479 status = DDI_FAILURE;
11480 mptsas_log(mpt, CE_WARN, "Release FW Diag command timeout");
11481 goto out;
11482 }
11483
11484 /*
11485 * cmd_rfm points to the reply message if a reply was given. Check the
11486 * IOCStatus to make sure everything went OK with the FW diag request
11487 * and set buffer flags.
11488 */
11489 if (cmd->cmd_rfm) {
11490 rel_flags |= MPTSAS_ADDRESS_REPLY;
11491 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11492 DDI_DMA_SYNC_FORCPU);
11493 reply = (pMpi2DiagReleaseReply_t)(mpt->m_reply_frame +
11494 (cmd->cmd_rfm -
11495 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
11496
11497 /*
11498 * Get the reply message data
11499 */
11500 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
11501 &reply->IOCStatus);
11502 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
11503 &reply->IOCLogInfo);
11504
11505 /*
11506 * If release failed quit.
11507 */
11508 if ((iocstatus != MPI2_IOCSTATUS_SUCCESS) ||
11509 pBuffer->owned_by_firmware) {
11510 status = DDI_FAILURE;
11511 NDBG13(("release FW Diag Buffer failed: "
11512 "IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus,
11513 iocloginfo));
11514 goto out;
11515 }
11516
11517 /*
11518 * Release was successful.
11519 */
11520 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11521 status = DDI_SUCCESS;
11522
11523 /*
11524 * If this was for an UNREGISTER diag type command, clear the
11525 * unique ID.
11526 */
11527 if (diag_type == MPTSAS_FW_DIAG_TYPE_UNREGISTER) {
11528 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
11529 }
11530 }
11531
11532 out:
11533 /*
11534 * Put the reply frame back on the free queue, increment the free
11535 * index, and write the new index to the free index register. But only
11536 * if this reply is an ADDRESS reply.
11537 */
11538 if (rel_flags & MPTSAS_ADDRESS_REPLY) {
11539 ddi_put32(mpt->m_acc_free_queue_hdl,
11540 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11541 cmd->cmd_rfm);
11542 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11543 DDI_DMA_SYNC_FORDEV);
11544 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11545 mpt->m_free_index = 0;
11546 }
11547 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11548 mpt->m_free_index);
11549 }
11550 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
11551 mptsas_remove_cmd(mpt, cmd);
11552 rel_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11553 }
11554 if (rel_flags & MPTSAS_REQUEST_POOL_CMD) {
11555 mptsas_return_to_pool(mpt, cmd);
11556 }
11557
11558 return (status);
11559 }
11560
11561 static int
11562 mptsas_diag_register(mptsas_t *mpt, mptsas_fw_diag_register_t *diag_register,
11563 uint32_t *return_code)
11564 {
11565 mptsas_fw_diagnostic_buffer_t *pBuffer;
11566 uint8_t extended_type, buffer_type, i;
11567 uint32_t buffer_size;
11568 uint32_t unique_id;
11569 int status;
11570
11571 ASSERT(mutex_owned(&mpt->m_mutex));
11572
11573 extended_type = diag_register->ExtendedType;
11574 buffer_type = diag_register->BufferType;
11575 buffer_size = diag_register->RequestedBufferSize;
11576 unique_id = diag_register->UniqueId;
11577
11578 /*
11579 * Check for valid buffer type
11580 */
11581 if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
11582 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11583 return (DDI_FAILURE);
11584 }
11585
11586 /*
11587 * Get the current buffer and look up the unique ID. The unique ID
11588 * should not be found. If it is, the ID is already in use.
11589 */
11590 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11591 pBuffer = &mpt->m_fw_diag_buffer_list[buffer_type];
11592 if (i != MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11593 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11594 return (DDI_FAILURE);
11595 }
11596
11597 /*
11598 * The buffer's unique ID should not be registered yet, and the given
11599 * unique ID cannot be 0.
11600 */
11601 if ((pBuffer->unique_id != MPTSAS_FW_DIAG_INVALID_UID) ||
11602 (unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
11603 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11604 return (DDI_FAILURE);
11605 }
11606
11607 /*
11608 * If this buffer is already posted as immediate, just change owner.
11609 */
11610 if (pBuffer->immediate && pBuffer->owned_by_firmware &&
11611 (pBuffer->unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
11612 pBuffer->immediate = FALSE;
11613 pBuffer->unique_id = unique_id;
11614 return (DDI_SUCCESS);
11615 }
11616
11617 /*
11618 * Post a new buffer after checking if it's enabled. The DMA buffer
11619 * that is allocated will be contiguous (sgl_len = 1).
11620 */
11621 if (!pBuffer->enabled) {
11622 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
11623 return (DDI_FAILURE);
11624 }
11625 bzero(&pBuffer->buffer_data, sizeof (mptsas_dma_alloc_state_t));
11626 pBuffer->buffer_data.size = buffer_size;
11627 if (mptsas_dma_alloc(mpt, &pBuffer->buffer_data) != DDI_SUCCESS) {
11628 mptsas_log(mpt, CE_WARN, "failed to alloc DMA resource for "
11629 "diag buffer: size = %d bytes", buffer_size);
11630 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
11631 return (DDI_FAILURE);
11632 }
11633
11634 /*
11635 * Copy the given info to the diag buffer and post the buffer.
11636 */
11637 pBuffer->buffer_type = buffer_type;
11638 pBuffer->immediate = FALSE;
11639 if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
11640 for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
11641 i++) {
11642 pBuffer->product_specific[i] =
11643 diag_register->ProductSpecific[i];
11644 }
11645 }
11646 pBuffer->extended_type = extended_type;
11647 pBuffer->unique_id = unique_id;
11648 status = mptsas_post_fw_diag_buffer(mpt, pBuffer, return_code);
11649
11650 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
11651 DDI_SUCCESS) {
11652 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed in "
11653 "mptsas_diag_register.");
11654 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11655 status = DDI_FAILURE;
11656 }
11657
11658 /*
11659 * In case there was a failure, free the DMA buffer.
11660 */
11661 if (status == DDI_FAILURE) {
11662 mptsas_dma_free(&pBuffer->buffer_data);
11663 }
11664
11665 return (status);
11666 }
11667
11668 static int
11669 mptsas_diag_unregister(mptsas_t *mpt,
11670 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
11671 {
11672 mptsas_fw_diagnostic_buffer_t *pBuffer;
11673 uint8_t i;
11674 uint32_t unique_id;
11675 int status;
11676
11677 ASSERT(mutex_owned(&mpt->m_mutex));
11678
11679 unique_id = diag_unregister->UniqueId;
11680
11681 /*
11682 * Get the current buffer and look up the unique ID. The unique ID
11683 * should be there.
11684 */
11685 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11686 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11687 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11688 return (DDI_FAILURE);
11689 }
11690
11691 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11692
11693 /*
11694 * Try to release the buffer from FW before freeing it. If release
11695 * fails, don't free the DMA buffer in case FW tries to access it
11696 * later. If buffer is not owned by firmware, can't release it.
11697 */
11698 if (!pBuffer->owned_by_firmware) {
11699 status = DDI_SUCCESS;
11700 } else {
11701 status = mptsas_release_fw_diag_buffer(mpt, pBuffer,
11702 return_code, MPTSAS_FW_DIAG_TYPE_UNREGISTER);
11703 }
11704
11705 /*
11706 * At this point, return the current status no matter what happens with
11707 * the DMA buffer.
11708 */
11709 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
11710 if (status == DDI_SUCCESS) {
11711 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
11712 DDI_SUCCESS) {
11713 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed "
11714 "in mptsas_diag_unregister.");
11715 ddi_fm_service_impact(mpt->m_dip,
11716 DDI_SERVICE_UNAFFECTED);
11717 }
11718 mptsas_dma_free(&pBuffer->buffer_data);
11719 }
11720
11721 return (status);
11722 }
11723
11724 static int
11725 mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
11726 uint32_t *return_code)
11727 {
11728 mptsas_fw_diagnostic_buffer_t *pBuffer;
11729 uint8_t i;
11730 uint32_t unique_id;
11731
11732 ASSERT(mutex_owned(&mpt->m_mutex));
11733
11734 unique_id = diag_query->UniqueId;
11735
11736 /*
11737 * If ID is valid, query on ID.
11738 * If ID is invalid, query on buffer type.
11739 */
11740 if (unique_id == MPTSAS_FW_DIAG_INVALID_UID) {
11741 i = diag_query->BufferType;
11742 if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
11743 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11744 return (DDI_FAILURE);
11745 }
11746 } else {
11747 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11748 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11749 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11750 return (DDI_FAILURE);
11751 }
11752 }
11753
11754 /*
11755 * Fill query structure with the diag buffer info.
11756 */
11757 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11758 diag_query->BufferType = pBuffer->buffer_type;
11759 diag_query->ExtendedType = pBuffer->extended_type;
11760 if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
11761 for (i = 0; i < (sizeof (diag_query->ProductSpecific) / 4);
11762 i++) {
11763 diag_query->ProductSpecific[i] =
11764 pBuffer->product_specific[i];
11765 }
11766 }
11767 diag_query->TotalBufferSize = pBuffer->buffer_data.size;
11768 diag_query->DriverAddedBufferSize = 0;
11769 diag_query->UniqueId = pBuffer->unique_id;
11770 diag_query->ApplicationFlags = 0;
11771 diag_query->DiagnosticFlags = 0;
11772
11773 /*
11774 * Set/Clear application flags
11775 */
11776 if (pBuffer->immediate) {
11777 diag_query->ApplicationFlags &= ~MPTSAS_FW_DIAG_FLAG_APP_OWNED;
11778 } else {
11779 diag_query->ApplicationFlags |= MPTSAS_FW_DIAG_FLAG_APP_OWNED;
11780 }
11781 if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
11782 diag_query->ApplicationFlags |=
11783 MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
11784 } else {
11785 diag_query->ApplicationFlags &=
11786 ~MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
11787 }
11788 if (pBuffer->owned_by_firmware) {
11789 diag_query->ApplicationFlags |=
11790 MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
11791 } else {
11792 diag_query->ApplicationFlags &=
11793 ~MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
11794 }
11795
11796 return (DDI_SUCCESS);
11797 }
11798
11799 static int
11800 mptsas_diag_read_buffer(mptsas_t *mpt,
11801 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
11802 uint32_t *return_code, int ioctl_mode)
11803 {
11804 mptsas_fw_diagnostic_buffer_t *pBuffer;
11805 uint8_t i, *pData;
11806 uint32_t unique_id, byte;
11807 int status;
11808
11809 ASSERT(mutex_owned(&mpt->m_mutex));
11810
11811 unique_id = diag_read_buffer->UniqueId;
11812
11813 /*
11814 * Get the current buffer and look up the unique ID. The unique ID
11815 * should be there.
11816 */
11817 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11818 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11819 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11820 return (DDI_FAILURE);
11821 }
11822
11823 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11824
11825 /*
11826 * Make sure requested read is within limits
11827 */
11828 if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
11829 pBuffer->buffer_data.size) {
11830 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11831 return (DDI_FAILURE);
11832 }
11833
11834 /*
11835 * Copy the requested data from DMA to the diag_read_buffer. The DMA
11836 * buffer that was allocated is one contiguous buffer.
11837 */
11838 pData = (uint8_t *)(pBuffer->buffer_data.memp +
11839 diag_read_buffer->StartingOffset);
11840 (void) ddi_dma_sync(pBuffer->buffer_data.handle, 0, 0,
11841 DDI_DMA_SYNC_FORCPU);
11842 for (byte = 0; byte < diag_read_buffer->BytesToRead; byte++) {
11843 if (ddi_copyout(pData + byte, ioctl_buf + byte, 1, ioctl_mode)
11844 != 0) {
11845 return (DDI_FAILURE);
11846 }
11847 }
11848 diag_read_buffer->Status = 0;
11849
11850 /*
11851 * Set or clear the Force Release flag.
11852 */
11853 if (pBuffer->force_release) {
11854 diag_read_buffer->Flags |= MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
11855 } else {
11856 diag_read_buffer->Flags &= ~MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
11857 }
11858
11859 /*
11860 * If buffer is to be reregistered, make sure it's not already owned by
11861 * firmware first.
11862 */
11863 status = DDI_SUCCESS;
11864 if (!pBuffer->owned_by_firmware) {
11865 if (diag_read_buffer->Flags & MPTSAS_FW_DIAG_FLAG_REREGISTER) {
11866 status = mptsas_post_fw_diag_buffer(mpt, pBuffer,
11867 return_code);
11868 }
11869 }
11870
11871 return (status);
11872 }
11873
11874 static int
11875 mptsas_diag_release(mptsas_t *mpt, mptsas_fw_diag_release_t *diag_release,
11876 uint32_t *return_code)
11877 {
11878 mptsas_fw_diagnostic_buffer_t *pBuffer;
11879 uint8_t i;
11880 uint32_t unique_id;
11881 int status;
11882
11883 ASSERT(mutex_owned(&mpt->m_mutex));
11884
11885 unique_id = diag_release->UniqueId;
11886
11887 /*
11888 * Get the current buffer and look up the unique ID. The unique ID
11889 * should be there.
11890 */
11891 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11892 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11893 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11894 return (DDI_FAILURE);
11895 }
11896
11897 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11898
11899 /*
11900 * If buffer is not owned by firmware, it's already been released.
11901 */
11902 if (!pBuffer->owned_by_firmware) {
11903 *return_code = MPTSAS_FW_DIAG_ERROR_ALREADY_RELEASED;
11904 return (DDI_FAILURE);
11905 }
11906
11907 /*
11908 * Release the buffer.
11909 */
11910 status = mptsas_release_fw_diag_buffer(mpt, pBuffer, return_code,
11911 MPTSAS_FW_DIAG_TYPE_RELEASE);
11912 return (status);
11913 }
11914
11915 static int
11916 mptsas_do_diag_action(mptsas_t *mpt, uint32_t action, uint8_t *diag_action,
11917 uint32_t length, uint32_t *return_code, int ioctl_mode)
11918 {
11919 mptsas_fw_diag_register_t diag_register;
11920 mptsas_fw_diag_unregister_t diag_unregister;
11921 mptsas_fw_diag_query_t diag_query;
11922 mptsas_diag_read_buffer_t diag_read_buffer;
11923 mptsas_fw_diag_release_t diag_release;
11924 int status = DDI_SUCCESS;
11925 uint32_t original_return_code, read_buf_len;
11926
11927 ASSERT(mutex_owned(&mpt->m_mutex));
11928
11929 original_return_code = *return_code;
11930 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11931
11932 switch (action) {
11933 case MPTSAS_FW_DIAG_TYPE_REGISTER:
11934 if (!length) {
11935 *return_code =
11936 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11937 status = DDI_FAILURE;
11938 break;
11939 }
11940 if (ddi_copyin(diag_action, &diag_register,
11941 sizeof (diag_register), ioctl_mode) != 0) {
11942 return (DDI_FAILURE);
11943 }
11944 status = mptsas_diag_register(mpt, &diag_register,
11945 return_code);
11946 break;
11947
11948 case MPTSAS_FW_DIAG_TYPE_UNREGISTER:
11949 if (length < sizeof (diag_unregister)) {
11950 *return_code =
11951 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11952 status = DDI_FAILURE;
11953 break;
11954 }
11955 if (ddi_copyin(diag_action, &diag_unregister,
11956 sizeof (diag_unregister), ioctl_mode) != 0) {
11957 return (DDI_FAILURE);
11958 }
11959 status = mptsas_diag_unregister(mpt, &diag_unregister,
11960 return_code);
11961 break;
11962
11963 case MPTSAS_FW_DIAG_TYPE_QUERY:
11964 if (length < sizeof (diag_query)) {
11965 *return_code =
11966 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11967 status = DDI_FAILURE;
11968 break;
11969 }
11970 if (ddi_copyin(diag_action, &diag_query,
11971 sizeof (diag_query), ioctl_mode) != 0) {
11972 return (DDI_FAILURE);
11973 }
11974 status = mptsas_diag_query(mpt, &diag_query,
11975 return_code);
11976 if (status == DDI_SUCCESS) {
11977 if (ddi_copyout(&diag_query, diag_action,
11978 sizeof (diag_query), ioctl_mode) != 0) {
11979 return (DDI_FAILURE);
11980 }
11981 }
11982 break;
11983
11984 case MPTSAS_FW_DIAG_TYPE_READ_BUFFER:
11985 if (ddi_copyin(diag_action, &diag_read_buffer,
11986 sizeof (diag_read_buffer) - 4, ioctl_mode) != 0) {
11987 return (DDI_FAILURE);
11988 }
11989 read_buf_len = sizeof (diag_read_buffer) -
11990 sizeof (diag_read_buffer.DataBuffer) +
11991 diag_read_buffer.BytesToRead;
11992 if (length < read_buf_len) {
11993 *return_code =
11994 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11995 status = DDI_FAILURE;
11996 break;
11997 }
11998 status = mptsas_diag_read_buffer(mpt,
11999 &diag_read_buffer, diag_action +
12000 sizeof (diag_read_buffer) - 4, return_code,
12001 ioctl_mode);
12002 if (status == DDI_SUCCESS) {
12003 if (ddi_copyout(&diag_read_buffer, diag_action,
12004 sizeof (diag_read_buffer) - 4, ioctl_mode)
12005 != 0) {
12006 return (DDI_FAILURE);
12007 }
12008 }
12009 break;
12010
12011 case MPTSAS_FW_DIAG_TYPE_RELEASE:
12012 if (length < sizeof (diag_release)) {
12013 *return_code =
12014 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12015 status = DDI_FAILURE;
12016 break;
12017 }
12018 if (ddi_copyin(diag_action, &diag_release,
12019 sizeof (diag_release), ioctl_mode) != 0) {
12020 return (DDI_FAILURE);
12021 }
12022 status = mptsas_diag_release(mpt, &diag_release,
12023 return_code);
12024 break;
12025
12026 default:
12027 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12028 status = DDI_FAILURE;
12029 break;
12030 }
12031
12032 if ((status == DDI_FAILURE) &&
12033 (original_return_code == MPTSAS_FW_DIAG_NEW) &&
12034 (*return_code != MPTSAS_FW_DIAG_ERROR_SUCCESS)) {
12035 status = DDI_SUCCESS;
12036 }
12037
12038 return (status);
12039 }
12040
12041 static int
12042 mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *user_data, int mode)
12043 {
12044 int status;
12045 mptsas_diag_action_t driver_data;
12046
12047 ASSERT(mutex_owned(&mpt->m_mutex));
12048
12049 /*
12050 * Copy the user data to a driver data buffer.
12051 */
12052 if (ddi_copyin(user_data, &driver_data, sizeof (mptsas_diag_action_t),
12053 mode) == 0) {
12054 /*
12055 * Send diag action request if Action is valid
12056 */
12057 if (driver_data.Action == MPTSAS_FW_DIAG_TYPE_REGISTER ||
12058 driver_data.Action == MPTSAS_FW_DIAG_TYPE_UNREGISTER ||
12059 driver_data.Action == MPTSAS_FW_DIAG_TYPE_QUERY ||
12060 driver_data.Action == MPTSAS_FW_DIAG_TYPE_READ_BUFFER ||
12061 driver_data.Action == MPTSAS_FW_DIAG_TYPE_RELEASE) {
12062 status = mptsas_do_diag_action(mpt, driver_data.Action,
12063 (void *)(uintptr_t)driver_data.PtrDiagAction,
12064 driver_data.Length, &driver_data.ReturnCode,
12065 mode);
12066 if (status == DDI_SUCCESS) {
12067 if (ddi_copyout(&driver_data.ReturnCode,
12068 &user_data->ReturnCode,
12069 sizeof (user_data->ReturnCode), mode)
12070 != 0) {
12071 status = EFAULT;
12072 } else {
12073 status = 0;
12074 }
12075 } else {
12076 status = EIO;
12077 }
12078 } else {
12079 status = EINVAL;
12080 }
12081 } else {
12082 status = EFAULT;
12083 }
12084
12085 return (status);
12086 }
12087
12088 /*
12089 * This routine handles the "event query" ioctl.
12090 */
12091 static int
12092 mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, int mode,
12093 int *rval)
12094 {
12095 int status;
12096 mptsas_event_query_t driverdata;
12097 uint8_t i;
12098
12099 driverdata.Entries = MPTSAS_EVENT_QUEUE_SIZE;
12100
12101 mutex_enter(&mpt->m_mutex);
12102 for (i = 0; i < 4; i++) {
12103 driverdata.Types[i] = mpt->m_event_mask[i];
12104 }
12105 mutex_exit(&mpt->m_mutex);
12106
12107 if (ddi_copyout(&driverdata, data, sizeof (driverdata), mode) != 0) {
12108 status = EFAULT;
12109 } else {
12110 *rval = MPTIOCTL_STATUS_GOOD;
12111 status = 0;
12112 }
12113
12114 return (status);
12115 }
12116
12117 /*
12118 * This routine handles the "event enable" ioctl.
12119 */
12120 static int
12121 mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, int mode,
12122 int *rval)
12123 {
12124 int status;
12125 mptsas_event_enable_t driverdata;
12126 uint8_t i;
12127
12128 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
12129 mutex_enter(&mpt->m_mutex);
12130 for (i = 0; i < 4; i++) {
12131 mpt->m_event_mask[i] = driverdata.Types[i];
12132 }
12133 mutex_exit(&mpt->m_mutex);
12134
12135 *rval = MPTIOCTL_STATUS_GOOD;
12136 status = 0;
12137 } else {
12138 status = EFAULT;
12139 }
12140 return (status);
12141 }
12142
12143 /*
12144 * This routine handles the "event report" ioctl.
12145 */
12146 static int
12147 mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, int mode,
12148 int *rval)
12149 {
12150 int status;
12151 mptsas_event_report_t driverdata;
12152
12153 mutex_enter(&mpt->m_mutex);
12154
12155 if (ddi_copyin(&data->Size, &driverdata.Size, sizeof (driverdata.Size),
12156 mode) == 0) {
12157 if (driverdata.Size >= sizeof (mpt->m_events)) {
12158 if (ddi_copyout(mpt->m_events, data->Events,
12159 sizeof (mpt->m_events), mode) != 0) {
12160 status = EFAULT;
12161 } else {
12162 if (driverdata.Size > sizeof (mpt->m_events)) {
12163 driverdata.Size =
12164 sizeof (mpt->m_events);
12165 if (ddi_copyout(&driverdata.Size,
12166 &data->Size,
12167 sizeof (driverdata.Size),
12168 mode) != 0) {
12169 status = EFAULT;
12170 } else {
12171 *rval = MPTIOCTL_STATUS_GOOD;
12172 status = 0;
12173 }
12174 } else {
12175 *rval = MPTIOCTL_STATUS_GOOD;
12176 status = 0;
12177 }
12178 }
12179 } else {
12180 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
12181 status = 0;
12182 }
12183 } else {
12184 status = EFAULT;
12185 }
12186
12187 mutex_exit(&mpt->m_mutex);
12188 return (status);
12189 }
12190
12191 static void
12192 mptsas_lookup_pci_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
12193 {
12194 int *reg_data;
12195 uint_t reglen;
12196
12197 /*
12198 * Lookup the 'reg' property and extract the other data
12199 */
12200 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
12201 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
12202 DDI_PROP_SUCCESS) {
12203 /*
12204 * Extract the PCI data from the 'reg' property first DWORD.
12205 * The entry looks like the following:
12206 * First DWORD:
12207 * Bits 0 - 7 8-bit Register number
12208 * Bits 8 - 10 3-bit Function number
12209 * Bits 11 - 15 5-bit Device number
12210 * Bits 16 - 23 8-bit Bus number
12211 * Bits 24 - 25 2-bit Address Space type identifier
12212 *
12213 */
12214 adapter_data->PciInformation.u.bits.BusNumber =
12215 (reg_data[0] & 0x00FF0000) >> 16;
12216 adapter_data->PciInformation.u.bits.DeviceNumber =
12217 (reg_data[0] & 0x0000F800) >> 11;
12218 adapter_data->PciInformation.u.bits.FunctionNumber =
12219 (reg_data[0] & 0x00000700) >> 8;
12220 ddi_prop_free((void *)reg_data);
12221 } else {
12222 /*
12223 * If we can't determine the PCI data then we fill in FF's for
12224 * the data to indicate this.
12225 */
12226 adapter_data->PCIDeviceHwId = 0xFFFFFFFF;
12227 adapter_data->MpiPortNumber = 0xFFFFFFFF;
12228 adapter_data->PciInformation.u.AsDWORD = 0xFFFFFFFF;
12229 }
12230
12231 /*
12232 * Saved in the mpt->m_fwversion
12233 */
12234 adapter_data->MpiFirmwareVersion = mpt->m_fwversion;
12235 }
12236
12237 static void
12238 mptsas_read_adapter_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
12239 {
12240 char *driver_verstr = MPTSAS_MOD_STRING;
12241
12242 mptsas_lookup_pci_data(mpt, adapter_data);
12243 adapter_data->AdapterType = mpt->m_MPI25 ?
12244 MPTIOCTL_ADAPTER_TYPE_SAS3 :
12245 MPTIOCTL_ADAPTER_TYPE_SAS2;
12246 adapter_data->PCIDeviceHwId = (uint32_t)mpt->m_devid;
12247 adapter_data->PCIDeviceHwRev = (uint32_t)mpt->m_revid;
12248 adapter_data->SubSystemId = (uint32_t)mpt->m_ssid;
12249 adapter_data->SubsystemVendorId = (uint32_t)mpt->m_svid;
12250 (void) strcpy((char *)&adapter_data->DriverVersion[0], driver_verstr);
12251 adapter_data->BiosVersion = 0;
12252 (void) mptsas_get_bios_page3(mpt, &adapter_data->BiosVersion);
12253 }
12254
12255 static void
12256 mptsas_read_pci_info(mptsas_t *mpt, mptsas_pci_info_t *pci_info)
12257 {
12258 int *reg_data, i;
12259 uint_t reglen;
12260
12261 /*
12262 * Lookup the 'reg' property and extract the other data
12263 */
12264 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
12265 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
12266 DDI_PROP_SUCCESS) {
12267 /*
12268 * Extract the PCI data from the 'reg' property first DWORD.
12269 * The entry looks like the following:
12270 * First DWORD:
12271 * Bits 8 - 10 3-bit Function number
12272 * Bits 11 - 15 5-bit Device number
12273 * Bits 16 - 23 8-bit Bus number
12274 */
12275 pci_info->BusNumber = (reg_data[0] & 0x00FF0000) >> 16;
12276 pci_info->DeviceNumber = (reg_data[0] & 0x0000F800) >> 11;
12277 pci_info->FunctionNumber = (reg_data[0] & 0x00000700) >> 8;
12278 ddi_prop_free((void *)reg_data);
12279 } else {
12280 /*
12281 * If we can't determine the PCI info then we fill in FF's for
12282 * the data to indicate this.
12283 */
12284 pci_info->BusNumber = 0xFFFFFFFF;
12285 pci_info->DeviceNumber = 0xFF;
12286 pci_info->FunctionNumber = 0xFF;
12287 }
12288
12289 /*
12290 * Now get the interrupt vector and the pci header. The vector can
12291 * only be 0 right now. The header is the first 256 bytes of config
12292 * space.
12293 */
12294 pci_info->InterruptVector = 0;
12295 for (i = 0; i < sizeof (pci_info->PciHeader); i++) {
12296 pci_info->PciHeader[i] = pci_config_get8(mpt->m_config_handle,
12297 i);
12298 }
12299 }
12300
12301 static int
12302 mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data, int mode)
12303 {
12304 int status = 0;
12305 mptsas_reg_access_t driverdata;
12306
12307 mutex_enter(&mpt->m_mutex);
12308 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
12309 switch (driverdata.Command) {
12310 /*
12311 * IO access is not supported.
12312 */
12313 case REG_IO_READ:
12314 case REG_IO_WRITE:
12315 mptsas_log(mpt, CE_WARN, "IO access is not "
12316 "supported. Use memory access.");
12317 status = EINVAL;
12318 break;
12319
12320 case REG_MEM_READ:
12321 driverdata.RegData = ddi_get32(mpt->m_datap,
12322 (uint32_t *)(void *)mpt->m_reg +
12323 driverdata.RegOffset);
12324 if (ddi_copyout(&driverdata.RegData,
12325 &data->RegData,
12326 sizeof (driverdata.RegData), mode) != 0) {
12327 mptsas_log(mpt, CE_WARN, "Register "
12328 "Read Failed");
12329 status = EFAULT;
12330 }
12331 break;
12332
12333 case REG_MEM_WRITE:
12334 ddi_put32(mpt->m_datap,
12335 (uint32_t *)(void *)mpt->m_reg +
12336 driverdata.RegOffset,
12337 driverdata.RegData);
12338 break;
12339
12340 default:
12341 status = EINVAL;
12342 break;
12343 }
12344 } else {
12345 status = EFAULT;
12346 }
12347
12348 mutex_exit(&mpt->m_mutex);
12349 return (status);
12350 }
12351
12352 static int
12353 led_control(mptsas_t *mpt, intptr_t data, int mode)
12354 {
12355 int ret = 0;
12356 mptsas_led_control_t lc;
12357 mptsas_target_t *ptgt;
12358
12359 if (ddi_copyin((void *)data, &lc, sizeof (lc), mode) != 0) {
12360 return (EFAULT);
12361 }
12362
12363 if ((lc.Command != MPTSAS_LEDCTL_FLAG_SET &&
12364 lc.Command != MPTSAS_LEDCTL_FLAG_GET) ||
12365 lc.Led < MPTSAS_LEDCTL_LED_MIN ||
12366 lc.Led > MPTSAS_LEDCTL_LED_MAX ||
12367 (lc.Command == MPTSAS_LEDCTL_FLAG_SET && lc.LedStatus != 0 &&
12368 lc.LedStatus != 1)) {
12369 return (EINVAL);
12370 }
12371
12372 if ((lc.Command == MPTSAS_LEDCTL_FLAG_SET && (mode & FWRITE) == 0) ||
12373 (lc.Command == MPTSAS_LEDCTL_FLAG_GET && (mode & FREAD) == 0))
12374 return (EACCES);
12375
12376 /* Locate the target we're interrogating... */
12377 mutex_enter(&mpt->m_mutex);
12378 ptgt = refhash_linear_search(mpt->m_targets,
12379 mptsas_target_eval_slot, &lc);
12380 if (ptgt == NULL) {
12381 /* We could not find a target for that enclosure/slot. */
12382 mutex_exit(&mpt->m_mutex);
12383 return (ENOENT);
12384 }
12385
12386 if (lc.Command == MPTSAS_LEDCTL_FLAG_SET) {
12387 /* Update our internal LED state. */
12388 ptgt->m_led_status &= ~(1 << (lc.Led - 1));
12389 ptgt->m_led_status |= lc.LedStatus << (lc.Led - 1);
12390
12391 /* Flush it to the controller. */
12392 ret = mptsas_flush_led_status(mpt, ptgt);
12393 mutex_exit(&mpt->m_mutex);
12394 return (ret);
12395 }
12396
12397 /* Return our internal LED state. */
12398 lc.LedStatus = (ptgt->m_led_status >> (lc.Led - 1)) & 1;
12399 mutex_exit(&mpt->m_mutex);
12400
12401 if (ddi_copyout(&lc, (void *)data, sizeof (lc), mode) != 0) {
12402 return (EFAULT);
12403 }
12404
12405 return (0);
12406 }
12407
12408 static int
12409 get_disk_info(mptsas_t *mpt, intptr_t data, int mode)
12410 {
12411 uint16_t i = 0;
12412 uint16_t count = 0;
12413 int ret = 0;
12414 mptsas_target_t *ptgt;
12415 mptsas_disk_info_t *di;
12416 STRUCT_DECL(mptsas_get_disk_info, gdi);
12417
12418 if ((mode & FREAD) == 0)
12419 return (EACCES);
12420
12421 STRUCT_INIT(gdi, get_udatamodel());
12422
12423 if (ddi_copyin((void *)data, STRUCT_BUF(gdi), STRUCT_SIZE(gdi),
12424 mode) != 0) {
12425 return (EFAULT);
12426 }
12427
12428 /* Find out how many targets there are. */
12429 mutex_enter(&mpt->m_mutex);
12430 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12431 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12432 count++;
12433 }
12434 mutex_exit(&mpt->m_mutex);
12435
12436 /*
12437 * If we haven't been asked to copy out information on each target,
12438 * then just return the count.
12439 */
12440 STRUCT_FSET(gdi, DiskCount, count);
12441 if (STRUCT_FGETP(gdi, PtrDiskInfoArray) == NULL)
12442 goto copy_out;
12443
12444 /*
12445 * If we haven't been given a large enough buffer to copy out into,
12446 * let the caller know.
12447 */
12448 if (STRUCT_FGET(gdi, DiskInfoArraySize) <
12449 count * sizeof (mptsas_disk_info_t)) {
12450 ret = ENOSPC;
12451 goto copy_out;
12452 }
12453
12454 di = kmem_zalloc(count * sizeof (mptsas_disk_info_t), KM_SLEEP);
12455
12456 mutex_enter(&mpt->m_mutex);
12457 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12458 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12459 if (i >= count) {
12460 /*
12461 * The number of targets changed while we weren't
12462 * looking, so give up.
12463 */
12464 refhash_rele(mpt->m_targets, ptgt);
12465 mutex_exit(&mpt->m_mutex);
12466 kmem_free(di, count * sizeof (mptsas_disk_info_t));
12467 return (EAGAIN);
12468 }
12469 di[i].Instance = mpt->m_instance;
12470 di[i].Enclosure = ptgt->m_enclosure;
12471 di[i].Slot = ptgt->m_slot_num;
12472 di[i].SasAddress = ptgt->m_addr.mta_wwn;
12473 i++;
12474 }
12475 mutex_exit(&mpt->m_mutex);
12476 STRUCT_FSET(gdi, DiskCount, i);
12477
12478 /* Copy out the disk information to the caller. */
12479 if (ddi_copyout((void *)di, STRUCT_FGETP(gdi, PtrDiskInfoArray),
12480 i * sizeof (mptsas_disk_info_t), mode) != 0) {
12481 ret = EFAULT;
12482 }
12483
12484 kmem_free(di, count * sizeof (mptsas_disk_info_t));
12485
12486 copy_out:
12487 if (ddi_copyout(STRUCT_BUF(gdi), (void *)data, STRUCT_SIZE(gdi),
12488 mode) != 0) {
12489 ret = EFAULT;
12490 }
12491
12492 return (ret);
12493 }
12494
12495 static int
12496 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
12497 int *rval)
12498 {
12499 int status = 0;
12500 mptsas_t *mpt;
12501 mptsas_update_flash_t flashdata;
12502 mptsas_pass_thru_t passthru_data;
12503 mptsas_adapter_data_t adapter_data;
12504 mptsas_pci_info_t pci_info;
12505 int copylen;
12506
12507 int iport_flag = 0;
12508 dev_info_t *dip = NULL;
12509 mptsas_phymask_t phymask = 0;
12510 struct devctl_iocdata *dcp = NULL;
12511 char *addr = NULL;
12512 mptsas_target_t *ptgt = NULL;
12513
12514 *rval = MPTIOCTL_STATUS_GOOD;
12515 if (secpolicy_sys_config(credp, B_FALSE) != 0) {
12516 return (EPERM);
12517 }
12518
12519 mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
12520 if (mpt == NULL) {
12521 /*
12522 * Called from iport node, get the states
12523 */
12524 iport_flag = 1;
12525 dip = mptsas_get_dip_from_dev(dev, &phymask);
12526 if (dip == NULL) {
12527 return (ENXIO);
12528 }
12529 mpt = DIP2MPT(dip);
12530 }
12531 /* Make sure power level is D0 before accessing registers */
12532 mutex_enter(&mpt->m_mutex);
12533 if (mpt->m_options & MPTSAS_OPT_PM) {
12534 (void) pm_busy_component(mpt->m_dip, 0);
12535 if (mpt->m_power_level != PM_LEVEL_D0) {
12536 mutex_exit(&mpt->m_mutex);
12537 if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
12538 DDI_SUCCESS) {
12539 mptsas_log(mpt, CE_WARN,
12540 "mptsas%d: mptsas_ioctl: Raise power "
12541 "request failed.", mpt->m_instance);
12542 (void) pm_idle_component(mpt->m_dip, 0);
12543 return (ENXIO);
12544 }
12545 } else {
12546 mutex_exit(&mpt->m_mutex);
12547 }
12548 } else {
12549 mutex_exit(&mpt->m_mutex);
12550 }
12551
12552 if (iport_flag) {
12553 status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
12554 if (status != 0) {
12555 goto out;
12556 }
12557 /*
12558 * The following code control the OK2RM LED, it doesn't affect
12559 * the ioctl return status.
12560 */
12561 if ((cmd == DEVCTL_DEVICE_ONLINE) ||
12562 (cmd == DEVCTL_DEVICE_OFFLINE)) {
12563 if (ndi_dc_allochdl((void *)data, &dcp) !=
12564 NDI_SUCCESS) {
12565 goto out;
12566 }
12567 addr = ndi_dc_getaddr(dcp);
12568 ptgt = mptsas_addr_to_ptgt(mpt, addr, phymask);
12569 if (ptgt == NULL) {
12570 NDBG14(("mptsas_ioctl led control: tgt %s not "
12571 "found", addr));
12572 ndi_dc_freehdl(dcp);
12573 goto out;
12574 }
12575 mutex_enter(&mpt->m_mutex);
12576 if (cmd == DEVCTL_DEVICE_ONLINE) {
12577 ptgt->m_tgt_unconfigured = 0;
12578 } else if (cmd == DEVCTL_DEVICE_OFFLINE) {
12579 ptgt->m_tgt_unconfigured = 1;
12580 }
12581 if (cmd == DEVCTL_DEVICE_OFFLINE) {
12582 ptgt->m_led_status |=
12583 (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1));
12584 } else {
12585 ptgt->m_led_status &=
12586 ~(1 << (MPTSAS_LEDCTL_LED_OK2RM - 1));
12587 }
12588 (void) mptsas_flush_led_status(mpt, ptgt);
12589 mutex_exit(&mpt->m_mutex);
12590 ndi_dc_freehdl(dcp);
12591 }
12592 goto out;
12593 }
12594 switch (cmd) {
12595 case MPTIOCTL_GET_DISK_INFO:
12596 status = get_disk_info(mpt, data, mode);
12597 break;
12598 case MPTIOCTL_LED_CONTROL:
12599 status = led_control(mpt, data, mode);
12600 break;
12601 case MPTIOCTL_UPDATE_FLASH:
12602 if (ddi_copyin((void *)data, &flashdata,
12603 sizeof (struct mptsas_update_flash), mode)) {
12604 status = EFAULT;
12605 break;
12606 }
12607
12608 mutex_enter(&mpt->m_mutex);
12609 if (mptsas_update_flash(mpt,
12610 (caddr_t)(long)flashdata.PtrBuffer,
12611 flashdata.ImageSize, flashdata.ImageType, mode)) {
12612 status = EFAULT;
12613 }
12614
12615 /*
12616 * Reset the chip to start using the new
12617 * firmware. Reset if failed also.
12618 */
12619 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12620 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
12621 status = EFAULT;
12622 }
12623 mutex_exit(&mpt->m_mutex);
12624 break;
12625 case MPTIOCTL_PASS_THRU:
12626 /*
12627 * The user has requested to pass through a command to
12628 * be executed by the MPT firmware. Call our routine
12629 * which does this. Only allow one passthru IOCTL at
12630 * one time. Other threads will block on
12631 * m_passthru_mutex, which is of adaptive variant.
12632 */
12633 if (ddi_copyin((void *)data, &passthru_data,
12634 sizeof (mptsas_pass_thru_t), mode)) {
12635 status = EFAULT;
12636 break;
12637 }
12638 mutex_enter(&mpt->m_passthru_mutex);
12639 mutex_enter(&mpt->m_mutex);
12640 status = mptsas_pass_thru(mpt, &passthru_data, mode);
12641 mutex_exit(&mpt->m_mutex);
12642 mutex_exit(&mpt->m_passthru_mutex);
12643
12644 break;
12645 case MPTIOCTL_GET_ADAPTER_DATA:
12646 /*
12647 * The user has requested to read adapter data. Call
12648 * our routine which does this.
12649 */
12650 bzero(&adapter_data, sizeof (mptsas_adapter_data_t));
12651 if (ddi_copyin((void *)data, (void *)&adapter_data,
12652 sizeof (mptsas_adapter_data_t), mode)) {
12653 status = EFAULT;
12654 break;
12655 }
12656 if (adapter_data.StructureLength >=
12657 sizeof (mptsas_adapter_data_t)) {
12658 adapter_data.StructureLength = (uint32_t)
12659 sizeof (mptsas_adapter_data_t);
12660 copylen = sizeof (mptsas_adapter_data_t);
12661 mutex_enter(&mpt->m_mutex);
12662 mptsas_read_adapter_data(mpt, &adapter_data);
12663 mutex_exit(&mpt->m_mutex);
12664 } else {
12665 adapter_data.StructureLength = (uint32_t)
12666 sizeof (mptsas_adapter_data_t);
12667 copylen = sizeof (adapter_data.StructureLength);
12668 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
12669 }
12670 if (ddi_copyout((void *)(&adapter_data), (void *)data,
12671 copylen, mode) != 0) {
12672 status = EFAULT;
12673 }
12674 break;
12675 case MPTIOCTL_GET_PCI_INFO:
12676 /*
12677 * The user has requested to read pci info. Call
12678 * our routine which does this.
12679 */
12680 bzero(&pci_info, sizeof (mptsas_pci_info_t));
12681 mutex_enter(&mpt->m_mutex);
12682 mptsas_read_pci_info(mpt, &pci_info);
12683 mutex_exit(&mpt->m_mutex);
12684 if (ddi_copyout((void *)(&pci_info), (void *)data,
12685 sizeof (mptsas_pci_info_t), mode) != 0) {
12686 status = EFAULT;
12687 }
12688 break;
12689 case MPTIOCTL_RESET_ADAPTER:
12690 mutex_enter(&mpt->m_mutex);
12691 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12692 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
12693 mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
12694 "failed");
12695 status = EFAULT;
12696 }
12697 mutex_exit(&mpt->m_mutex);
12698 break;
12699 case MPTIOCTL_DIAG_ACTION:
12700 /*
12701 * The user has done a diag buffer action. Call our
12702 * routine which does this. Only allow one diag action
12703 * at one time.
12704 */
12705 mutex_enter(&mpt->m_mutex);
12706 if (mpt->m_diag_action_in_progress) {
12707 mutex_exit(&mpt->m_mutex);
12708 return (EBUSY);
12709 }
12710 mpt->m_diag_action_in_progress = 1;
12711 status = mptsas_diag_action(mpt,
12712 (mptsas_diag_action_t *)data, mode);
12713 mpt->m_diag_action_in_progress = 0;
12714 mutex_exit(&mpt->m_mutex);
12715 break;
12716 case MPTIOCTL_EVENT_QUERY:
12717 /*
12718 * The user has done an event query. Call our routine
12719 * which does this.
12720 */
12721 status = mptsas_event_query(mpt,
12722 (mptsas_event_query_t *)data, mode, rval);
12723 break;
12724 case MPTIOCTL_EVENT_ENABLE:
12725 /*
12726 * The user has done an event enable. Call our routine
12727 * which does this.
12728 */
12729 status = mptsas_event_enable(mpt,
12730 (mptsas_event_enable_t *)data, mode, rval);
12731 break;
12732 case MPTIOCTL_EVENT_REPORT:
12733 /*
12734 * The user has done an event report. Call our routine
12735 * which does this.
12736 */
12737 status = mptsas_event_report(mpt,
12738 (mptsas_event_report_t *)data, mode, rval);
12739 break;
12740 case MPTIOCTL_REG_ACCESS:
12741 /*
12742 * The user has requested register access. Call our
12743 * routine which does this.
12744 */
12745 status = mptsas_reg_access(mpt,
12746 (mptsas_reg_access_t *)data, mode);
12747 break;
12748 default:
12749 status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
12750 rval);
12751 break;
12752 }
12753
12754 out:
12755 return (status);
12756 }
12757
12758 /* Dirty wrapper for taskq */
12759 void
12760 mptsas_handle_restart_ioc(void *mpt) {
12761 mptsas_restart_ioc((mptsas_t *) mpt);
12762 }
12763
12764 int
12765 mptsas_restart_ioc(mptsas_t *mpt)
12766 {
12767 int rval = DDI_SUCCESS;
12768 mptsas_target_t *ptgt = NULL;
12769
12770 ASSERT(mutex_owned(&mpt->m_mutex));
12771
12772 /*
12773 * Set a flag telling I/O path that we're processing a reset. This is
12774 * needed because after the reset is complete, the hash table still
12775 * needs to be rebuilt. If I/Os are started before the hash table is
12776 * rebuilt, I/O errors will occur. This flag allows I/Os to be marked
12777 * so that they can be retried.
12778 */
12779 mpt->m_in_reset = TRUE;
12780
12781 /*
12782 * Set all throttles to HOLD
12783 */
12784 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12785 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12786 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
12787 }
12788
12789 /*
12790 * Disable interrupts
12791 */
12792 MPTSAS_DISABLE_INTR(mpt);
12793
12794 /*
12795 * Abort all commands: outstanding commands, commands in waitq and
12796 * tx_waitq.
12797 */
12798 mptsas_flush_hba(mpt);
12799
12800 /*
12801 * Reinitialize the chip.
12802 */
12803 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
12804 rval = DDI_FAILURE;
12805 }
12806
12807 /*
12808 * Enable interrupts again
12809 */
12810 MPTSAS_ENABLE_INTR(mpt);
12811
12812 /*
12813 * If mptsas_init_chip was successful, update the driver data.
12814 */
12815 if (rval == DDI_SUCCESS) {
12816 mptsas_update_driver_data(mpt);
12817 }
12818
12819 /*
12820 * Reset the throttles
12821 */
12822 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12823 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12824 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
12825 }
12826
12827 mptsas_doneq_empty(mpt);
12828 mptsas_restart_hba(mpt);
12829
12830 if (rval != DDI_SUCCESS) {
12831 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
12832 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
12833 }
12834
12835 /*
12836 * Clear the reset flag so that I/Os can continue.
12837 */
12838 mpt->m_in_reset = FALSE;
12839
12840 return (rval);
12841 }
12842
12843 static int
12844 mptsas_init_chip(mptsas_t *mpt, int first_time)
12845 {
12846 ddi_dma_cookie_t cookie;
12847 uint32_t i;
12848 int rval;
12849
12850 /*
12851 * Check to see if the firmware image is valid
12852 */
12853 if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) &
12854 MPI2_DIAG_FLASH_BAD_SIG) {
12855 mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
12856 goto fail;
12857 }
12858
12859 /*
12860 * Reset the chip
12861 */
12862 rval = mptsas_ioc_reset(mpt, first_time);
12863 if (rval == MPTSAS_RESET_FAIL) {
12864 mptsas_log(mpt, CE_WARN, "hard reset failed!");
12865 goto fail;
12866 }
12867
12868 if ((rval == MPTSAS_SUCCESS_MUR) && (!first_time)) {
12869 goto mur;
12870 }
12871 /*
12872 * Setup configuration space
12873 */
12874 if (mptsas_config_space_init(mpt) == FALSE) {
12875 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
12876 "failed!");
12877 goto fail;
12878 }
12879
12880 /*
12881 * IOC facts can change after a diag reset so all buffers that are
12882 * based on these numbers must be de-allocated and re-allocated. Get
12883 * new IOC facts each time chip is initialized.
12884 */
12885 if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
12886 mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed");
12887 goto fail;
12888 }
12889
12890 if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
12891 goto fail;
12892 }
12893 /*
12894 * Allocate request message frames, reply free queue, reply descriptor
12895 * post queue, and reply message frames using latest IOC facts.
12896 */
12897 if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
12898 mptsas_log(mpt, CE_WARN, "mptsas_alloc_request_frames failed");
12899 goto fail;
12900 }
12901 if (mptsas_alloc_sense_bufs(mpt) == DDI_FAILURE) {
12902 mptsas_log(mpt, CE_WARN, "mptsas_alloc_sense_bufs failed");
12903 goto fail;
12904 }
12905 if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
12906 mptsas_log(mpt, CE_WARN, "mptsas_alloc_free_queue failed!");
12907 goto fail;
12908 }
12909 if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
12910 mptsas_log(mpt, CE_WARN, "mptsas_alloc_post_queue failed!");
12911 goto fail;
12912 }
12913 if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
12914 mptsas_log(mpt, CE_WARN, "mptsas_alloc_reply_frames failed!");
12915 goto fail;
12916 }
12917
12918 mur:
12919 /*
12920 * Re-Initialize ioc to operational state
12921 */
12922 if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
12923 mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
12924 goto fail;
12925 }
12926
12927 mptsas_alloc_reply_args(mpt);
12928
12929 /*
12930 * Initialize reply post index. Reply free index is initialized after
12931 * the next loop.
12932 */
12933 mpt->m_post_index = 0;
12934
12935 /*
12936 * Initialize the Reply Free Queue with the physical addresses of our
12937 * reply frames.
12938 */
12939 cookie.dmac_address = mpt->m_reply_frame_dma_addr & 0xffffffffu;
12940 for (i = 0; i < mpt->m_max_replies; i++) {
12941 ddi_put32(mpt->m_acc_free_queue_hdl,
12942 &((uint32_t *)(void *)mpt->m_free_queue)[i],
12943 cookie.dmac_address);
12944 cookie.dmac_address += mpt->m_reply_frame_size;
12945 }
12946 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
12947 DDI_DMA_SYNC_FORDEV);
12948
12949 /*
12950 * Initialize the reply free index to one past the last frame on the
12951 * queue. This will signify that the queue is empty to start with.
12952 */
12953 mpt->m_free_index = i;
12954 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, i);
12955
12956 /*
12957 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
12958 */
12959 for (i = 0; i < mpt->m_post_queue_depth; i++) {
12960 ddi_put64(mpt->m_acc_post_queue_hdl,
12961 &((uint64_t *)(void *)mpt->m_post_queue)[i],
12962 0xFFFFFFFFFFFFFFFF);
12963 }
12964 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
12965 DDI_DMA_SYNC_FORDEV);
12966
12967 /*
12968 * Enable ports
12969 */
12970 if (mptsas_ioc_enable_port(mpt) == DDI_FAILURE) {
12971 mptsas_log(mpt, CE_WARN, "mptsas_ioc_enable_port failed");
12972 goto fail;
12973 }
12974
12975 /*
12976 * enable events
12977 */
12978 if (mptsas_ioc_enable_event_notification(mpt)) {
12979 mptsas_log(mpt, CE_WARN,
12980 "mptsas_ioc_enable_event_notification failed");
12981 goto fail;
12982 }
12983
12984 /*
12985 * We need checks in attach and these.
12986 * chip_init is called in mult. places
12987 */
12988
12989 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
12990 DDI_SUCCESS) ||
12991 (mptsas_check_dma_handle(mpt->m_dma_req_sense_hdl) !=
12992 DDI_SUCCESS) ||
12993 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
12994 DDI_SUCCESS) ||
12995 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
12996 DDI_SUCCESS) ||
12997 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
12998 DDI_SUCCESS) ||
12999 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
13000 DDI_SUCCESS)) {
13001 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
13002 goto fail;
13003 }
13004
13005 /* Check all acc handles */
13006 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
13007 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
13008 DDI_SUCCESS) ||
13009 (mptsas_check_acc_handle(mpt->m_acc_req_sense_hdl) !=
13010 DDI_SUCCESS) ||
13011 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
13012 DDI_SUCCESS) ||
13013 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
13014 DDI_SUCCESS) ||
13015 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
13016 DDI_SUCCESS) ||
13017 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
13018 DDI_SUCCESS) ||
13019 (mptsas_check_acc_handle(mpt->m_config_handle) !=
13020 DDI_SUCCESS)) {
13021 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
13022 goto fail;
13023 }
13024
13025 return (DDI_SUCCESS);
13026
13027 fail:
13028 return (DDI_FAILURE);
13029 }
13030
13031 static int
13032 mptsas_get_pci_cap(mptsas_t *mpt)
13033 {
13034 ushort_t caps_ptr, cap, cap_count;
13035
13036 if (mpt->m_config_handle == NULL)
13037 return (FALSE);
13038 /*
13039 * Check if capabilities list is supported and if so,
13040 * get initial capabilities pointer and clear bits 0,1.
13041 */
13042 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
13043 & PCI_STAT_CAP) {
13044 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
13045 PCI_CONF_CAP_PTR), 4);
13046 } else {
13047 caps_ptr = PCI_CAP_NEXT_PTR_NULL;
13048 }
13049
13050 /*
13051 * Walk capabilities if supported.
13052 */
13053 for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
13054
13055 /*
13056 * Check that we haven't exceeded the maximum number of
13057 * capabilities and that the pointer is in a valid range.
13058 */
13059 if (++cap_count > 48) {
13060 mptsas_log(mpt, CE_WARN,
13061 "too many device capabilities.\n");
13062 break;
13063 }
13064 if (caps_ptr < 64) {
13065 mptsas_log(mpt, CE_WARN,
13066 "capabilities pointer 0x%x out of range.\n",
13067 caps_ptr);
13068 break;
13069 }
13070
13071 /*
13072 * Get next capability and check that it is valid.
13073 * For now, we only support power management.
13074 */
13075 cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
13076 switch (cap) {
13077 case PCI_CAP_ID_PM:
13078 mptsas_log(mpt, CE_NOTE,
13079 "?mptsas%d supports power management.\n",
13080 mpt->m_instance);
13081 mpt->m_options |= MPTSAS_OPT_PM;
13082
13083 /* Save PMCSR offset */
13084 mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
13085 break;
13086 /*
13087 * The following capabilities are valid. Any others
13088 * will cause a message to be logged.
13089 */
13090 case PCI_CAP_ID_VPD:
13091 case PCI_CAP_ID_MSI:
13092 case PCI_CAP_ID_PCIX:
13093 case PCI_CAP_ID_PCI_E:
13094 case PCI_CAP_ID_MSI_X:
13095 break;
13096 default:
13097 mptsas_log(mpt, CE_NOTE,
13098 "?mptsas%d unrecognized capability "
13099 "0x%x.\n", mpt->m_instance, cap);
13100 break;
13101 }
13102
13103 /*
13104 * Get next capabilities pointer and clear bits 0,1.
13105 */
13106 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
13107 (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
13108 }
13109 return (TRUE);
13110 }
13111
13112 static int
13113 mptsas_init_pm(mptsas_t *mpt)
13114 {
13115 char pmc_name[16];
13116 char *pmc[] = {
13117 NULL,
13118 "0=Off (PCI D3 State)",
13119 "3=On (PCI D0 State)",
13120 NULL
13121 };
13122 uint16_t pmcsr_stat;
13123
13124 if (mptsas_get_pci_cap(mpt) == FALSE) {
13125 return (DDI_FAILURE);
13126 }
13127 /*
13128 * If PCI's capability does not support PM, then don't need
13129 * to registe the pm-components
13130 */
13131 if (!(mpt->m_options & MPTSAS_OPT_PM))
13132 return (DDI_SUCCESS);
13133 /*
13134 * If power management is supported by this chip, create
13135 * pm-components property for the power management framework
13136 */
13137 (void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
13138 pmc[0] = pmc_name;
13139 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
13140 "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
13141 mpt->m_options &= ~MPTSAS_OPT_PM;
13142 mptsas_log(mpt, CE_WARN,
13143 "mptsas%d: pm-component property creation failed.",
13144 mpt->m_instance);
13145 return (DDI_FAILURE);
13146 }
13147
13148 /*
13149 * Power on device.
13150 */
13151 (void) pm_busy_component(mpt->m_dip, 0);
13152 pmcsr_stat = pci_config_get16(mpt->m_config_handle,
13153 mpt->m_pmcsr_offset);
13154 if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
13155 mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
13156 mpt->m_instance);
13157 pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
13158 PCI_PMCSR_D0);
13159 }
13160 if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
13161 mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
13162 return (DDI_FAILURE);
13163 }
13164 mpt->m_power_level = PM_LEVEL_D0;
13165 /*
13166 * Set pm idle delay.
13167 */
13168 mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
13169 mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
13170
13171 return (DDI_SUCCESS);
13172 }
13173
13174 static int
13175 mptsas_register_intrs(mptsas_t *mpt)
13176 {
13177 dev_info_t *dip;
13178 int intr_types;
13179
13180 dip = mpt->m_dip;
13181
13182 /* Get supported interrupt types */
13183 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
13184 mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
13185 "failed\n");
13186 return (FALSE);
13187 }
13188
13189 NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
13190
13191 /*
13192 * Try MSI, but fall back to FIXED
13193 */
13194 if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
13195 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
13196 NDBG0(("Using MSI interrupt type"));
13197 mpt->m_intr_type = DDI_INTR_TYPE_MSI;
13198 return (TRUE);
13199 }
13200 }
13201 if (intr_types & DDI_INTR_TYPE_FIXED) {
13202 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
13203 NDBG0(("Using FIXED interrupt type"));
13204 mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
13205 return (TRUE);
13206 } else {
13207 NDBG0(("FIXED interrupt registration failed"));
13208 return (FALSE);
13209 }
13210 }
13211
13212 return (FALSE);
13213 }
13214
13215 static void
13216 mptsas_unregister_intrs(mptsas_t *mpt)
13217 {
13218 mptsas_rem_intrs(mpt);
13219 }
13220
13221 /*
13222 * mptsas_add_intrs:
13223 *
13224 * Register FIXED or MSI interrupts.
13225 */
13226 static int
13227 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
13228 {
13229 dev_info_t *dip = mpt->m_dip;
13230 int avail, actual, count = 0;
13231 int i, flag, ret;
13232
13233 NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
13234
13235 /* Get number of interrupts */
13236 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
13237 if ((ret != DDI_SUCCESS) || (count <= 0)) {
13238 mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
13239 "ret %d count %d\n", ret, count);
13240
13241 return (DDI_FAILURE);
13242 }
13243
13244 /* Get number of available interrupts */
13245 ret = ddi_intr_get_navail(dip, intr_type, &avail);
13246 if ((ret != DDI_SUCCESS) || (avail == 0)) {
13247 mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
13248 "ret %d avail %d\n", ret, avail);
13249
13250 return (DDI_FAILURE);
13251 }
13252
13253 if (avail < count) {
13254 mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, "
13255 "navail() returned %d", count, avail);
13256 }
13257
13258 /* Mpt only have one interrupt routine */
13259 if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
13260 count = 1;
13261 }
13262
13263 /* Allocate an array of interrupt handles */
13264 mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
13265 mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
13266
13267 flag = DDI_INTR_ALLOC_NORMAL;
13268
13269 /* call ddi_intr_alloc() */
13270 ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
13271 count, &actual, flag);
13272
13273 if ((ret != DDI_SUCCESS) || (actual == 0)) {
13274 mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d\n",
13275 ret);
13276 kmem_free(mpt->m_htable, mpt->m_intr_size);
13277 return (DDI_FAILURE);
13278 }
13279
13280 /* use interrupt count returned or abort? */
13281 if (actual < count) {
13282 mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d\n",
13283 count, actual);
13284 }
13285
13286 mpt->m_intr_cnt = actual;
13287
13288 /*
13289 * Get priority for first msi, assume remaining are all the same
13290 */
13291 if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
13292 &mpt->m_intr_pri)) != DDI_SUCCESS) {
13293 mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d\n", ret);
13294
13295 /* Free already allocated intr */
13296 for (i = 0; i < actual; i++) {
13297 (void) ddi_intr_free(mpt->m_htable[i]);
13298 }
13299
13300 kmem_free(mpt->m_htable, mpt->m_intr_size);
13301 return (DDI_FAILURE);
13302 }
13303
13304 /* Test for high level mutex */
13305 if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
13306 mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
13307 "Hi level interrupt not supported\n");
13308
13309 /* Free already allocated intr */
13310 for (i = 0; i < actual; i++) {
13311 (void) ddi_intr_free(mpt->m_htable[i]);
13312 }
13313
13314 kmem_free(mpt->m_htable, mpt->m_intr_size);
13315 return (DDI_FAILURE);
13316 }
13317
13318 /* Call ddi_intr_add_handler() */
13319 for (i = 0; i < actual; i++) {
13320 if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
13321 (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
13322 mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
13323 "failed %d\n", ret);
13324
13325 /* Free already allocated intr */
13326 for (i = 0; i < actual; i++) {
13327 (void) ddi_intr_free(mpt->m_htable[i]);
13328 }
13329
13330 kmem_free(mpt->m_htable, mpt->m_intr_size);
13331 return (DDI_FAILURE);
13332 }
13333 }
13334
13335 if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
13336 != DDI_SUCCESS) {
13337 mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d\n", ret);
13338
13339 /* Free already allocated intr */
13340 for (i = 0; i < actual; i++) {
13341 (void) ddi_intr_free(mpt->m_htable[i]);
13342 }
13343
13344 kmem_free(mpt->m_htable, mpt->m_intr_size);
13345 return (DDI_FAILURE);
13346 }
13347
13348 /*
13349 * Enable interrupts
13350 */
13351 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
13352 /* Call ddi_intr_block_enable() for MSI interrupts */
13353 (void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
13354 } else {
13355 /* Call ddi_intr_enable for MSI or FIXED interrupts */
13356 for (i = 0; i < mpt->m_intr_cnt; i++) {
13357 (void) ddi_intr_enable(mpt->m_htable[i]);
13358 }
13359 }
13360 return (DDI_SUCCESS);
13361 }
13362
13363 /*
13364 * mptsas_rem_intrs:
13365 *
13366 * Unregister FIXED or MSI interrupts
13367 */
13368 static void
13369 mptsas_rem_intrs(mptsas_t *mpt)
13370 {
13371 int i;
13372
13373 NDBG6(("mptsas_rem_intrs"));
13374
13375 /* Disable all interrupts */
13376 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
13377 /* Call ddi_intr_block_disable() */
13378 (void) ddi_intr_block_disable(mpt->m_htable, mpt->m_intr_cnt);
13379 } else {
13380 for (i = 0; i < mpt->m_intr_cnt; i++) {
13381 (void) ddi_intr_disable(mpt->m_htable[i]);
13382 }
13383 }
13384
13385 /* Call ddi_intr_remove_handler() */
13386 for (i = 0; i < mpt->m_intr_cnt; i++) {
13387 (void) ddi_intr_remove_handler(mpt->m_htable[i]);
13388 (void) ddi_intr_free(mpt->m_htable[i]);
13389 }
13390
13391 kmem_free(mpt->m_htable, mpt->m_intr_size);
13392 }
13393
13394 /*
13395 * The IO fault service error handling callback function
13396 */
13397 /*ARGSUSED*/
13398 static int
13399 mptsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
13400 {
13401 /*
13402 * as the driver can always deal with an error in any dma or
13403 * access handle, we can just return the fme_status value.
13404 */
13405 pci_ereport_post(dip, err, NULL);
13406 return (err->fme_status);
13407 }
13408
13409 /*
13410 * mptsas_fm_init - initialize fma capabilities and register with IO
13411 * fault services.
13412 */
13413 static void
13414 mptsas_fm_init(mptsas_t *mpt)
13415 {
13416 /*
13417 * Need to change iblock to priority for new MSI intr
13418 */
13419 ddi_iblock_cookie_t fm_ibc;
13420
13421 /* Only register with IO Fault Services if we have some capability */
13422 if (mpt->m_fm_capabilities) {
13423 /* Adjust access and dma attributes for FMA */
13424 mpt->m_reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
13425 mpt->m_msg_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
13426 mpt->m_io_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
13427
13428 /*
13429 * Register capabilities with IO Fault Services.
13430 * mpt->m_fm_capabilities will be updated to indicate
13431 * capabilities actually supported (not requested.)
13432 */
13433 ddi_fm_init(mpt->m_dip, &mpt->m_fm_capabilities, &fm_ibc);
13434
13435 /*
13436 * Initialize pci ereport capabilities if ereport
13437 * capable (should always be.)
13438 */
13439 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
13440 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13441 pci_ereport_setup(mpt->m_dip);
13442 }
13443
13444 /*
13445 * Register error callback if error callback capable.
13446 */
13447 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13448 ddi_fm_handler_register(mpt->m_dip,
13449 mptsas_fm_error_cb, (void *) mpt);
13450 }
13451 }
13452 }
13453
13454 /*
13455 * mptsas_fm_fini - Releases fma capabilities and un-registers with IO
13456 * fault services.
13457 *
13458 */
13459 static void
13460 mptsas_fm_fini(mptsas_t *mpt)
13461 {
13462 /* Only unregister FMA capabilities if registered */
13463 if (mpt->m_fm_capabilities) {
13464
13465 /*
13466 * Un-register error callback if error callback capable.
13467 */
13468
13469 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13470 ddi_fm_handler_unregister(mpt->m_dip);
13471 }
13472
13473 /*
13474 * Release any resources allocated by pci_ereport_setup()
13475 */
13476
13477 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
13478 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13479 pci_ereport_teardown(mpt->m_dip);
13480 }
13481
13482 /* Unregister from IO Fault Services */
13483 ddi_fm_fini(mpt->m_dip);
13484
13485 /* Adjust access and dma attributes for FMA */
13486 mpt->m_reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
13487 mpt->m_msg_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
13488 mpt->m_io_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
13489
13490 }
13491 }
13492
13493 int
13494 mptsas_check_acc_handle(ddi_acc_handle_t handle)
13495 {
13496 ddi_fm_error_t de;
13497
13498 if (handle == NULL)
13499 return (DDI_FAILURE);
13500 ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
13501 return (de.fme_status);
13502 }
13503
13504 int
13505 mptsas_check_dma_handle(ddi_dma_handle_t handle)
13506 {
13507 ddi_fm_error_t de;
13508
13509 if (handle == NULL)
13510 return (DDI_FAILURE);
13511 ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
13512 return (de.fme_status);
13513 }
13514
13515 void
13516 mptsas_fm_ereport(mptsas_t *mpt, char *detail)
13517 {
13518 uint64_t ena;
13519 char buf[FM_MAX_CLASS];
13520
13521 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
13522 ena = fm_ena_generate(0, FM_ENA_FMT1);
13523 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities)) {
13524 ddi_fm_ereport_post(mpt->m_dip, buf, ena, DDI_NOSLEEP,
13525 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
13526 }
13527 }
13528
13529 static int
13530 mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
13531 uint16_t *dev_handle, mptsas_target_t **pptgt)
13532 {
13533 int rval;
13534 uint32_t dev_info;
13535 uint64_t sas_wwn;
13536 mptsas_phymask_t phymask;
13537 uint8_t physport, phynum, config, disk;
13538 uint64_t devicename;
13539 uint16_t pdev_hdl;
13540 mptsas_target_t *tmp_tgt = NULL;
13541 uint16_t bay_num, enclosure, io_flags;
13542
13543 ASSERT(*pptgt == NULL);
13544
13545 rval = mptsas_get_sas_device_page0(mpt, page_address, dev_handle,
13546 &sas_wwn, &dev_info, &physport, &phynum, &pdev_hdl,
13547 &bay_num, &enclosure, &io_flags);
13548 if (rval != DDI_SUCCESS) {
13549 rval = DEV_INFO_FAIL_PAGE0;
13550 return (rval);
13551 }
13552
13553 if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
13554 MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13555 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) == NULL) {
13556 rval = DEV_INFO_WRONG_DEVICE_TYPE;
13557 return (rval);
13558 }
13559
13560 /*
13561 * Check if the dev handle is for a Phys Disk. If so, set return value
13562 * and exit. Don't add Phys Disks to hash.
13563 */
13564 for (config = 0; config < mpt->m_num_raid_configs; config++) {
13565 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
13566 if (*dev_handle == mpt->m_raidconfig[config].
13567 m_physdisk_devhdl[disk]) {
13568 rval = DEV_INFO_PHYS_DISK;
13569 return (rval);
13570 }
13571 }
13572 }
13573
13574 /*
13575 * Get SATA Device Name from SAS device page0 for
13576 * sata device, if device name doesn't exist, set mta_wwn to
13577 * 0 for direct attached SATA. For the device behind the expander
13578 * we still can use STP address assigned by expander.
13579 */
13580 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13581 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
13582 mutex_exit(&mpt->m_mutex);
13583 /* alloc a tmp_tgt to send the cmd */
13584 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target),
13585 KM_SLEEP);
13586 tmp_tgt->m_devhdl = *dev_handle;
13587 tmp_tgt->m_deviceinfo = dev_info;
13588 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
13589 tmp_tgt->m_qfull_retry_interval =
13590 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
13591 tmp_tgt->m_t_throttle = MAX_THROTTLE;
13592 devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
13593 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
13594 mutex_enter(&mpt->m_mutex);
13595 if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
13596 sas_wwn = devicename;
13597 } else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
13598 sas_wwn = 0;
13599 }
13600 }
13601
13602 phymask = mptsas_physport_to_phymask(mpt, physport);
13603 *pptgt = mptsas_tgt_alloc(mpt, *dev_handle, sas_wwn,
13604 dev_info, phymask, phynum);
13605 if (*pptgt == NULL) {
13606 mptsas_log(mpt, CE_WARN, "Failed to allocated target"
13607 "structure!");
13608 rval = DEV_INFO_FAIL_ALLOC;
13609 return (rval);
13610 }
13611 (*pptgt)->m_io_flags = io_flags;
13612 (*pptgt)->m_enclosure = enclosure;
13613 (*pptgt)->m_slot_num = bay_num;
13614 return (DEV_INFO_SUCCESS);
13615 }
13616
13617 uint64_t
13618 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
13619 {
13620 uint64_t sata_guid = 0, *pwwn = NULL;
13621 int target = ptgt->m_devhdl;
13622 uchar_t *inq83 = NULL;
13623 int inq83_len = 0xFF;
13624 uchar_t *dblk = NULL;
13625 int inq83_retry = 3;
13626 int rval = DDI_FAILURE;
13627
13628 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
13629
13630 inq83_retry:
13631 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
13632 inq83_len, NULL, 1);
13633 if (rval != DDI_SUCCESS) {
13634 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
13635 "0x83 for target:%x, lun:%x failed!", target, lun);
13636 goto out;
13637 }
13638 /* According to SAT2, the first descriptor is logic unit name */
13639 dblk = &inq83[4];
13640 if ((dblk[1] & 0x30) != 0) {
13641 mptsas_log(mpt, CE_WARN, "!Descriptor is not lun associated.");
13642 goto out;
13643 }
13644 pwwn = (uint64_t *)(void *)(&dblk[4]);
13645 if ((dblk[4] & 0xf0) == 0x50) {
13646 sata_guid = BE_64(*pwwn);
13647 goto out;
13648 } else if (dblk[4] == 'A') {
13649 NDBG20(("SATA drive has no NAA format GUID."));
13650 goto out;
13651 } else {
13652 /* The data is not ready, wait and retry */
13653 inq83_retry--;
13654 if (inq83_retry <= 0) {
13655 goto out;
13656 }
13657 NDBG20(("The GUID is not ready, retry..."));
13658 delay(1 * drv_usectohz(1000000));
13659 goto inq83_retry;
13660 }
13661 out:
13662 kmem_free(inq83, inq83_len);
13663 return (sata_guid);
13664 }
13665
13666 static int
13667 mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, uchar_t page,
13668 unsigned char *buf, int len, int *reallen, uchar_t evpd)
13669 {
13670 uchar_t cdb[CDB_GROUP0];
13671 struct scsi_address ap;
13672 struct buf *data_bp = NULL;
13673 int resid = 0;
13674 int ret = DDI_FAILURE;
13675
13676 ASSERT(len <= 0xffff);
13677
13678 ap.a_target = MPTSAS_INVALID_DEVHDL;
13679 ap.a_lun = (uchar_t)(lun);
13680 ap.a_hba_tran = mpt->m_tran;
13681
13682 data_bp = scsi_alloc_consistent_buf(&ap,
13683 (struct buf *)NULL, len, B_READ, NULL_FUNC, NULL);
13684 if (data_bp == NULL) {
13685 return (ret);
13686 }
13687 bzero(cdb, CDB_GROUP0);
13688 cdb[0] = SCMD_INQUIRY;
13689 cdb[1] = evpd;
13690 cdb[2] = page;
13691 cdb[3] = (len & 0xff00) >> 8;
13692 cdb[4] = (len & 0x00ff);
13693 cdb[5] = 0;
13694
13695 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP0, data_bp,
13696 &resid);
13697 if (ret == DDI_SUCCESS) {
13698 if (reallen) {
13699 *reallen = len - resid;
13700 }
13701 bcopy((caddr_t)data_bp->b_un.b_addr, buf, len);
13702 }
13703 if (data_bp) {
13704 scsi_free_consistent_buf(data_bp);
13705 }
13706 return (ret);
13707 }
13708
13709 static int
13710 mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
13711 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
13712 int *resid)
13713 {
13714 struct scsi_pkt *pktp = NULL;
13715 scsi_hba_tran_t *tran_clone = NULL;
13716 mptsas_tgt_private_t *tgt_private = NULL;
13717 int ret = DDI_FAILURE;
13718
13719 /*
13720 * scsi_hba_tran_t->tran_tgt_private is used to pass the address
13721 * information to scsi_init_pkt, allocate a scsi_hba_tran structure
13722 * to simulate the cmds from sd
13723 */
13724 tran_clone = kmem_alloc(
13725 sizeof (scsi_hba_tran_t), KM_SLEEP);
13726 if (tran_clone == NULL) {
13727 goto out;
13728 }
13729 bcopy((caddr_t)mpt->m_tran,
13730 (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
13731 tgt_private = kmem_alloc(
13732 sizeof (mptsas_tgt_private_t), KM_SLEEP);
13733 if (tgt_private == NULL) {
13734 goto out;
13735 }
13736 tgt_private->t_lun = ap->a_lun;
13737 tgt_private->t_private = ptgt;
13738 tran_clone->tran_tgt_private = tgt_private;
13739 ap->a_hba_tran = tran_clone;
13740
13741 pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
13742 data_bp, cdblen, sizeof (struct scsi_arq_status),
13743 0, PKT_CONSISTENT, NULL, NULL);
13744 if (pktp == NULL) {
13745 goto out;
13746 }
13747 bcopy(cdb, pktp->pkt_cdbp, cdblen);
13748 pktp->pkt_flags = FLAG_NOPARITY;
13749 if (scsi_poll(pktp) < 0) {
13750 goto out;
13751 }
13752 if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
13753 goto out;
13754 }
13755 if (resid != NULL) {
13756 *resid = pktp->pkt_resid;
13757 }
13758
13759 ret = DDI_SUCCESS;
13760 out:
13761 if (pktp) {
13762 scsi_destroy_pkt(pktp);
13763 }
13764 if (tran_clone) {
13765 kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
13766 }
13767 if (tgt_private) {
13768 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
13769 }
13770 return (ret);
13771 }
13772 static int
13773 mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, int *lun)
13774 {
13775 char *cp = NULL;
13776 char *ptr = NULL;
13777 size_t s = 0;
13778 char *wwid_str = NULL;
13779 char *lun_str = NULL;
13780 long lunnum;
13781 long phyid = -1;
13782 int rc = DDI_FAILURE;
13783
13784 ptr = name;
13785 ASSERT(ptr[0] == 'w' || ptr[0] == 'p');
13786 ptr++;
13787 if ((cp = strchr(ptr, ',')) == NULL) {
13788 return (DDI_FAILURE);
13789 }
13790
13791 wwid_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13792 s = (uintptr_t)cp - (uintptr_t)ptr;
13793
13794 bcopy(ptr, wwid_str, s);
13795 wwid_str[s] = '\0';
13796
13797 ptr = ++cp;
13798
13799 if ((cp = strchr(ptr, '\0')) == NULL) {
13800 goto out;
13801 }
13802 lun_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13803 s = (uintptr_t)cp - (uintptr_t)ptr;
13804
13805 bcopy(ptr, lun_str, s);
13806 lun_str[s] = '\0';
13807
13808 if (name[0] == 'p') {
13809 rc = ddi_strtol(wwid_str, NULL, 0x10, &phyid);
13810 } else {
13811 rc = scsi_wwnstr_to_wwn(wwid_str, wwid);
13812 }
13813 if (rc != DDI_SUCCESS)
13814 goto out;
13815
13816 if (phyid != -1) {
13817 ASSERT(phyid < MPTSAS_MAX_PHYS);
13818 *phy = (uint8_t)phyid;
13819 }
13820 rc = ddi_strtol(lun_str, NULL, 0x10, &lunnum);
13821 if (rc != 0)
13822 goto out;
13823
13824 *lun = (int)lunnum;
13825 rc = DDI_SUCCESS;
13826 out:
13827 if (wwid_str)
13828 kmem_free(wwid_str, SCSI_MAXNAMELEN);
13829 if (lun_str)
13830 kmem_free(lun_str, SCSI_MAXNAMELEN);
13831
13832 return (rc);
13833 }
13834
13835 /*
13836 * mptsas_parse_smp_name() is to parse sas wwn string
13837 * which format is "wWWN"
13838 */
13839 static int
13840 mptsas_parse_smp_name(char *name, uint64_t *wwn)
13841 {
13842 char *ptr = name;
13843
13844 if (*ptr != 'w') {
13845 return (DDI_FAILURE);
13846 }
13847
13848 ptr++;
13849 if (scsi_wwnstr_to_wwn(ptr, wwn)) {
13850 return (DDI_FAILURE);
13851 }
13852 return (DDI_SUCCESS);
13853 }
13854
13855 static int
13856 mptsas_bus_config(dev_info_t *pdip, uint_t flag,
13857 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
13858 {
13859 int ret = NDI_FAILURE;
13860 int circ = 0;
13861 int circ1 = 0;
13862 mptsas_t *mpt;
13863 char *ptr = NULL;
13864 char *devnm = NULL;
13865 uint64_t wwid = 0;
13866 uint8_t phy = 0xFF;
13867 int lun = 0;
13868 uint_t mflags = flag;
13869 int bconfig = TRUE;
13870
13871 if (scsi_hba_iport_unit_address(pdip) == 0) {
13872 return (DDI_FAILURE);
13873 }
13874
13875 mpt = DIP2MPT(pdip);
13876 if (!mpt) {
13877 return (DDI_FAILURE);
13878 }
13879 /*
13880 * Hold the nexus across the bus_config
13881 */
13882 ndi_devi_enter(scsi_vhci_dip, &circ);
13883 ndi_devi_enter(pdip, &circ1);
13884 switch (op) {
13885 case BUS_CONFIG_ONE:
13886 /* parse wwid/target name out of name given */
13887 if ((ptr = strchr((char *)arg, '@')) == NULL) {
13888 ret = NDI_FAILURE;
13889 break;
13890 }
13891 ptr++;
13892 if (strncmp((char *)arg, "smp", 3) == 0) {
13893 /*
13894 * This is a SMP target device
13895 */
13896 ret = mptsas_parse_smp_name(ptr, &wwid);
13897 if (ret != DDI_SUCCESS) {
13898 ret = NDI_FAILURE;
13899 break;
13900 }
13901 ret = mptsas_config_smp(pdip, wwid, childp);
13902 } else if ((ptr[0] == 'w') || (ptr[0] == 'p')) {
13903 /*
13904 * OBP could pass down a non-canonical form
13905 * bootpath without LUN part when LUN is 0.
13906 * So driver need adjust the string.
13907 */
13908 if (strchr(ptr, ',') == NULL) {
13909 devnm = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13910 (void) sprintf(devnm, "%s,0", (char *)arg);
13911 ptr = strchr(devnm, '@');
13912 ptr++;
13913 }
13914
13915 /*
13916 * The device path is wWWID format and the device
13917 * is not SMP target device.
13918 */
13919 ret = mptsas_parse_address(ptr, &wwid, &phy, &lun);
13920 if (ret != DDI_SUCCESS) {
13921 ret = NDI_FAILURE;
13922 break;
13923 }
13924 *childp = NULL;
13925 if (ptr[0] == 'w') {
13926 ret = mptsas_config_one_addr(pdip, wwid,
13927 lun, childp);
13928 } else if (ptr[0] == 'p') {
13929 ret = mptsas_config_one_phy(pdip, phy, lun,
13930 childp);
13931 }
13932
13933 /*
13934 * If this is CD/DVD device in OBP path, the
13935 * ndi_busop_bus_config can be skipped as config one
13936 * operation is done above.
13937 */
13938 if ((ret == NDI_SUCCESS) && (*childp != NULL) &&
13939 (strcmp(ddi_node_name(*childp), "cdrom") == 0) &&
13940 (strncmp((char *)arg, "disk", 4) == 0)) {
13941 bconfig = FALSE;
13942 ndi_hold_devi(*childp);
13943 }
13944 } else {
13945 ret = NDI_FAILURE;
13946 break;
13947 }
13948
13949 /*
13950 * DDI group instructed us to use this flag.
13951 */
13952 mflags |= NDI_MDI_FALLBACK;
13953 break;
13954 case BUS_CONFIG_DRIVER:
13955 case BUS_CONFIG_ALL:
13956 mptsas_config_all(pdip);
13957 ret = NDI_SUCCESS;
13958 break;
13959 }
13960
13961 if ((ret == NDI_SUCCESS) && bconfig) {
13962 ret = ndi_busop_bus_config(pdip, mflags, op,
13963 (devnm == NULL) ? arg : devnm, childp, 0);
13964 }
13965
13966 ndi_devi_exit(pdip, circ1);
13967 ndi_devi_exit(scsi_vhci_dip, circ);
13968 if (devnm != NULL)
13969 kmem_free(devnm, SCSI_MAXNAMELEN);
13970 return (ret);
13971 }
13972
13973 static int
13974 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
13975 mptsas_target_t *ptgt)
13976 {
13977 int rval = DDI_FAILURE;
13978 struct scsi_inquiry *sd_inq = NULL;
13979 mptsas_t *mpt = DIP2MPT(pdip);
13980
13981 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
13982
13983 rval = mptsas_inquiry(mpt, ptgt, lun, 0, (uchar_t *)sd_inq,
13984 SUN_INQSIZE, 0, (uchar_t)0);
13985
13986 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
13987 rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
13988 } else {
13989 rval = DDI_FAILURE;
13990 }
13991
13992 kmem_free(sd_inq, SUN_INQSIZE);
13993 return (rval);
13994 }
13995
13996 static int
13997 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
13998 dev_info_t **lundip)
13999 {
14000 int rval;
14001 mptsas_t *mpt = DIP2MPT(pdip);
14002 int phymask;
14003 mptsas_target_t *ptgt = NULL;
14004
14005 /*
14006 * Get the physical port associated to the iport
14007 */
14008 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14009 "phymask", 0);
14010
14011 ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
14012 if (ptgt == NULL) {
14013 /*
14014 * didn't match any device by searching
14015 */
14016 return (DDI_FAILURE);
14017 }
14018 /*
14019 * If the LUN already exists and the status is online,
14020 * we just return the pointer to dev_info_t directly.
14021 * For the mdi_pathinfo node, we'll handle it in
14022 * mptsas_create_virt_lun()
14023 * TODO should be also in mptsas_handle_dr
14024 */
14025
14026 *lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
14027 if (*lundip != NULL) {
14028 /*
14029 * TODO Another senario is, we hotplug the same disk
14030 * on the same slot, the devhdl changed, is this
14031 * possible?
14032 * tgt_private->t_private != ptgt
14033 */
14034 if (sasaddr != ptgt->m_addr.mta_wwn) {
14035 /*
14036 * The device has changed although the devhdl is the
14037 * same (Enclosure mapping mode, change drive on the
14038 * same slot)
14039 */
14040 return (DDI_FAILURE);
14041 }
14042 return (DDI_SUCCESS);
14043 }
14044
14045 if (phymask == 0) {
14046 /*
14047 * Configure IR volume
14048 */
14049 rval = mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
14050 return (rval);
14051 }
14052 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
14053
14054 return (rval);
14055 }
14056
14057 static int
14058 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
14059 dev_info_t **lundip)
14060 {
14061 int rval;
14062 mptsas_t *mpt = DIP2MPT(pdip);
14063 mptsas_phymask_t phymask;
14064 mptsas_target_t *ptgt = NULL;
14065
14066 /*
14067 * Get the physical port associated to the iport
14068 */
14069 phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14070 "phymask", 0);
14071
14072 ptgt = mptsas_phy_to_tgt(mpt, phymask, phy);
14073 if (ptgt == NULL) {
14074 /*
14075 * didn't match any device by searching
14076 */
14077 return (DDI_FAILURE);
14078 }
14079
14080 /*
14081 * If the LUN already exists and the status is online,
14082 * we just return the pointer to dev_info_t directly.
14083 * For the mdi_pathinfo node, we'll handle it in
14084 * mptsas_create_virt_lun().
14085 */
14086
14087 *lundip = mptsas_find_child_phy(pdip, phy);
14088 if (*lundip != NULL) {
14089 return (DDI_SUCCESS);
14090 }
14091
14092 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
14093
14094 return (rval);
14095 }
14096
14097 static int
14098 mptsas_retrieve_lundata(int lun_cnt, uint8_t *buf, uint16_t *lun_num,
14099 uint8_t *lun_addr_type)
14100 {
14101 uint32_t lun_idx = 0;
14102
14103 ASSERT(lun_num != NULL);
14104 ASSERT(lun_addr_type != NULL);
14105
14106 lun_idx = (lun_cnt + 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
14107 /* determine report luns addressing type */
14108 switch (buf[lun_idx] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) {
14109 /*
14110 * Vendors in the field have been found to be concatenating
14111 * bus/target/lun to equal the complete lun value instead
14112 * of switching to flat space addressing
14113 */
14114 /* 00b - peripheral device addressing method */
14115 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL:
14116 /* FALLTHRU */
14117 /* 10b - logical unit addressing method */
14118 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT:
14119 /* FALLTHRU */
14120 /* 01b - flat space addressing method */
14121 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE:
14122 /* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
14123 *lun_addr_type = (buf[lun_idx] &
14124 MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) >> 6;
14125 *lun_num = (buf[lun_idx] & 0x3F) << 8;
14126 *lun_num |= buf[lun_idx + 1];
14127 return (DDI_SUCCESS);
14128 default:
14129 return (DDI_FAILURE);
14130 }
14131 }
14132
14133 static int
14134 mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt)
14135 {
14136 struct buf *repluns_bp = NULL;
14137 struct scsi_address ap;
14138 uchar_t cdb[CDB_GROUP5];
14139 int ret = DDI_FAILURE;
14140 int retry = 0;
14141 int lun_list_len = 0;
14142 uint16_t lun_num = 0;
14143 uint8_t lun_addr_type = 0;
14144 uint32_t lun_cnt = 0;
14145 uint32_t lun_total = 0;
14146 dev_info_t *cdip = NULL;
14147 uint16_t *saved_repluns = NULL;
14148 char *buffer = NULL;
14149 int buf_len = 128;
14150 mptsas_t *mpt = DIP2MPT(pdip);
14151 uint64_t sas_wwn = 0;
14152 uint8_t phy = 0xFF;
14153 uint32_t dev_info = 0;
14154
14155 mutex_enter(&mpt->m_mutex);
14156 sas_wwn = ptgt->m_addr.mta_wwn;
14157 phy = ptgt->m_phynum;
14158 dev_info = ptgt->m_deviceinfo;
14159 mutex_exit(&mpt->m_mutex);
14160
14161 if (sas_wwn == 0) {
14162 /*
14163 * It's a SATA without Device Name
14164 * So don't try multi-LUNs
14165 */
14166 if (mptsas_find_child_phy(pdip, phy)) {
14167 return (DDI_SUCCESS);
14168 } else {
14169 /*
14170 * need configure and create node
14171 */
14172 return (DDI_FAILURE);
14173 }
14174 }
14175
14176 /*
14177 * WWN (SAS address or Device Name exist)
14178 */
14179 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14180 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14181 /*
14182 * SATA device with Device Name
14183 * So don't try multi-LUNs
14184 */
14185 if (mptsas_find_child_addr(pdip, sas_wwn, 0)) {
14186 return (DDI_SUCCESS);
14187 } else {
14188 return (DDI_FAILURE);
14189 }
14190 }
14191
14192 do {
14193 ap.a_target = MPTSAS_INVALID_DEVHDL;
14194 ap.a_lun = 0;
14195 ap.a_hba_tran = mpt->m_tran;
14196 repluns_bp = scsi_alloc_consistent_buf(&ap,
14197 (struct buf *)NULL, buf_len, B_READ, NULL_FUNC, NULL);
14198 if (repluns_bp == NULL) {
14199 retry++;
14200 continue;
14201 }
14202 bzero(cdb, CDB_GROUP5);
14203 cdb[0] = SCMD_REPORT_LUNS;
14204 cdb[6] = (buf_len & 0xff000000) >> 24;
14205 cdb[7] = (buf_len & 0x00ff0000) >> 16;
14206 cdb[8] = (buf_len & 0x0000ff00) >> 8;
14207 cdb[9] = (buf_len & 0x000000ff);
14208
14209 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP5,
14210 repluns_bp, NULL);
14211 if (ret != DDI_SUCCESS) {
14212 scsi_free_consistent_buf(repluns_bp);
14213 retry++;
14214 continue;
14215 }
14216 lun_list_len = BE_32(*(int *)((void *)(
14217 repluns_bp->b_un.b_addr)));
14218 if (buf_len >= lun_list_len + 8) {
14219 ret = DDI_SUCCESS;
14220 break;
14221 }
14222 scsi_free_consistent_buf(repluns_bp);
14223 buf_len = lun_list_len + 8;
14224
14225 } while (retry < 3);
14226
14227 if (ret != DDI_SUCCESS)
14228 return (ret);
14229 buffer = (char *)repluns_bp->b_un.b_addr;
14230 /*
14231 * find out the number of luns returned by the SCSI ReportLun call
14232 * and allocate buffer space
14233 */
14234 lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
14235 saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
14236 if (saved_repluns == NULL) {
14237 scsi_free_consistent_buf(repluns_bp);
14238 return (DDI_FAILURE);
14239 }
14240 for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
14241 if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
14242 &lun_num, &lun_addr_type) != DDI_SUCCESS) {
14243 continue;
14244 }
14245 saved_repluns[lun_cnt] = lun_num;
14246 if (cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num))
14247 ret = DDI_SUCCESS;
14248 else
14249 ret = mptsas_probe_lun(pdip, lun_num, &cdip,
14250 ptgt);
14251 if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
14252 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
14253 MPTSAS_DEV_GONE);
14254 }
14255 }
14256 mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
14257 kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
14258 scsi_free_consistent_buf(repluns_bp);
14259 return (DDI_SUCCESS);
14260 }
14261
14262 static int
14263 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
14264 {
14265 int rval = DDI_FAILURE;
14266 struct scsi_inquiry *sd_inq = NULL;
14267 mptsas_t *mpt = DIP2MPT(pdip);
14268 mptsas_target_t *ptgt = NULL;
14269
14270 mutex_enter(&mpt->m_mutex);
14271 ptgt = refhash_linear_search(mpt->m_targets,
14272 mptsas_target_eval_devhdl, &target);
14273 mutex_exit(&mpt->m_mutex);
14274 if (ptgt == NULL) {
14275 mptsas_log(mpt, CE_WARN, "Volume with VolDevHandle of 0x%x "
14276 "not found.", target);
14277 return (rval);
14278 }
14279
14280 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
14281 rval = mptsas_inquiry(mpt, ptgt, 0, 0, (uchar_t *)sd_inq,
14282 SUN_INQSIZE, 0, (uchar_t)0);
14283
14284 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
14285 rval = mptsas_create_phys_lun(pdip, sd_inq, NULL, dip, ptgt,
14286 0);
14287 } else {
14288 rval = DDI_FAILURE;
14289 }
14290
14291 kmem_free(sd_inq, SUN_INQSIZE);
14292 return (rval);
14293 }
14294
14295 /*
14296 * configure all RAID volumes for virtual iport
14297 */
14298 static void
14299 mptsas_config_all_viport(dev_info_t *pdip)
14300 {
14301 mptsas_t *mpt = DIP2MPT(pdip);
14302 int config, vol;
14303 int target;
14304 dev_info_t *lundip = NULL;
14305
14306 /*
14307 * Get latest RAID info and search for any Volume DevHandles. If any
14308 * are found, configure the volume.
14309 */
14310 mutex_enter(&mpt->m_mutex);
14311 for (config = 0; config < mpt->m_num_raid_configs; config++) {
14312 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
14313 if (mpt->m_raidconfig[config].m_raidvol[vol].m_israid
14314 == 1) {
14315 target = mpt->m_raidconfig[config].
14316 m_raidvol[vol].m_raidhandle;
14317 mutex_exit(&mpt->m_mutex);
14318 (void) mptsas_config_raid(pdip, target,
14319 &lundip);
14320 mutex_enter(&mpt->m_mutex);
14321 }
14322 }
14323 }
14324 mutex_exit(&mpt->m_mutex);
14325 }
14326
14327 static void
14328 mptsas_offline_missed_luns(dev_info_t *pdip, uint16_t *repluns,
14329 int lun_cnt, mptsas_target_t *ptgt)
14330 {
14331 dev_info_t *child = NULL, *savechild = NULL;
14332 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
14333 uint64_t sas_wwn, wwid;
14334 uint8_t phy;
14335 int lun;
14336 int i;
14337 int find;
14338 char *addr;
14339 char *nodename;
14340 mptsas_t *mpt = DIP2MPT(pdip);
14341
14342 mutex_enter(&mpt->m_mutex);
14343 wwid = ptgt->m_addr.mta_wwn;
14344 mutex_exit(&mpt->m_mutex);
14345
14346 child = ddi_get_child(pdip);
14347 while (child) {
14348 find = 0;
14349 savechild = child;
14350 child = ddi_get_next_sibling(child);
14351
14352 nodename = ddi_node_name(savechild);
14353 if (strcmp(nodename, "smp") == 0) {
14354 continue;
14355 }
14356
14357 addr = ddi_get_name_addr(savechild);
14358 if (addr == NULL) {
14359 continue;
14360 }
14361
14362 if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
14363 DDI_SUCCESS) {
14364 continue;
14365 }
14366
14367 if (wwid == sas_wwn) {
14368 for (i = 0; i < lun_cnt; i++) {
14369 if (repluns[i] == lun) {
14370 find = 1;
14371 break;
14372 }
14373 }
14374 } else {
14375 continue;
14376 }
14377 if (find == 0) {
14378 /*
14379 * The lun has not been there already
14380 */
14381 (void) mptsas_offline_lun(pdip, savechild, NULL,
14382 NDI_DEVI_REMOVE);
14383 }
14384 }
14385
14386 pip = mdi_get_next_client_path(pdip, NULL);
14387 while (pip) {
14388 find = 0;
14389 savepip = pip;
14390 addr = MDI_PI(pip)->pi_addr;
14391
14392 pip = mdi_get_next_client_path(pdip, pip);
14393
14394 if (addr == NULL) {
14395 continue;
14396 }
14397
14398 if (mptsas_parse_address(addr, &sas_wwn, &phy,
14399 &lun) != DDI_SUCCESS) {
14400 continue;
14401 }
14402
14403 if (sas_wwn == wwid) {
14404 for (i = 0; i < lun_cnt; i++) {
14405 if (repluns[i] == lun) {
14406 find = 1;
14407 break;
14408 }
14409 }
14410 } else {
14411 continue;
14412 }
14413
14414 if (find == 0) {
14415 /*
14416 * The lun has not been there already
14417 */
14418 (void) mptsas_offline_lun(pdip, NULL, savepip,
14419 NDI_DEVI_REMOVE);
14420 }
14421 }
14422 }
14423
14424 void
14425 mptsas_update_hashtab(struct mptsas *mpt)
14426 {
14427 uint32_t page_address;
14428 int rval = 0;
14429 uint16_t dev_handle;
14430 mptsas_target_t *ptgt = NULL;
14431 mptsas_smp_t smp_node;
14432
14433 /*
14434 * Get latest RAID info.
14435 */
14436 (void) mptsas_get_raid_info(mpt);
14437
14438 dev_handle = mpt->m_smp_devhdl;
14439 for (; mpt->m_done_traverse_smp == 0; ) {
14440 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
14441 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)dev_handle;
14442 if (mptsas_get_sas_expander_page0(mpt, page_address, &smp_node)
14443 != DDI_SUCCESS) {
14444 break;
14445 }
14446 mpt->m_smp_devhdl = dev_handle = smp_node.m_devhdl;
14447 (void) mptsas_smp_alloc(mpt, &smp_node);
14448 }
14449
14450 /*
14451 * Config target devices
14452 */
14453 dev_handle = mpt->m_dev_handle;
14454
14455 /*
14456 * Do loop to get sas device page 0 by GetNextHandle till the
14457 * the last handle. If the sas device is a SATA/SSP target,
14458 * we try to config it.
14459 */
14460 for (; mpt->m_done_traverse_dev == 0; ) {
14461 ptgt = NULL;
14462 page_address =
14463 (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
14464 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14465 (uint32_t)dev_handle;
14466 rval = mptsas_get_target_device_info(mpt, page_address,
14467 &dev_handle, &ptgt);
14468 if ((rval == DEV_INFO_FAIL_PAGE0) ||
14469 (rval == DEV_INFO_FAIL_ALLOC)) {
14470 break;
14471 }
14472
14473 mpt->m_dev_handle = dev_handle;
14474 }
14475
14476 }
14477
14478 void
14479 mptsas_update_driver_data(struct mptsas *mpt)
14480 {
14481 mptsas_target_t *tp;
14482 mptsas_smp_t *sp;
14483
14484 ASSERT(MUTEX_HELD(&mpt->m_mutex));
14485
14486 /*
14487 * TODO after hard reset, update the driver data structures
14488 * 1. update port/phymask mapping table mpt->m_phy_info
14489 * 2. invalid all the entries in hash table
14490 * m_devhdl = 0xffff and m_deviceinfo = 0
14491 * 3. call sas_device_page/expander_page to update hash table
14492 */
14493 mptsas_update_phymask(mpt);
14494
14495 /*
14496 * Remove all the devhdls for existing entries but leave their
14497 * addresses alone. In update_hashtab() below, we'll find all
14498 * targets that are still present and reassociate them with
14499 * their potentially new devhdls. Leaving the targets around in
14500 * this fashion allows them to be used on the tx waitq even
14501 * while IOC reset is occurring.
14502 */
14503 for (tp = refhash_first(mpt->m_targets); tp != NULL;
14504 tp = refhash_next(mpt->m_targets, tp)) {
14505 tp->m_devhdl = MPTSAS_INVALID_DEVHDL;
14506 tp->m_deviceinfo = 0;
14507 tp->m_dr_flag = MPTSAS_DR_INACTIVE;
14508 }
14509 for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
14510 sp = refhash_next(mpt->m_smp_targets, sp)) {
14511 sp->m_devhdl = MPTSAS_INVALID_DEVHDL;
14512 sp->m_deviceinfo = 0;
14513 }
14514 mpt->m_done_traverse_dev = 0;
14515 mpt->m_done_traverse_smp = 0;
14516 mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
14517 mptsas_update_hashtab(mpt);
14518 }
14519
14520 static void
14521 mptsas_config_all(dev_info_t *pdip)
14522 {
14523 dev_info_t *smpdip = NULL;
14524 mptsas_t *mpt = DIP2MPT(pdip);
14525 int phymask = 0;
14526 mptsas_phymask_t phy_mask;
14527 mptsas_target_t *ptgt = NULL;
14528 mptsas_smp_t *psmp;
14529
14530 /*
14531 * Get the phymask associated to the iport
14532 */
14533 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14534 "phymask", 0);
14535
14536 /*
14537 * Enumerate RAID volumes here (phymask == 0).
14538 */
14539 if (phymask == 0) {
14540 mptsas_config_all_viport(pdip);
14541 return;
14542 }
14543
14544 mutex_enter(&mpt->m_mutex);
14545
14546 if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp) {
14547 mptsas_update_hashtab(mpt);
14548 }
14549
14550 for (psmp = refhash_first(mpt->m_smp_targets); psmp != NULL;
14551 psmp = refhash_next(mpt->m_smp_targets, psmp)) {
14552 phy_mask = psmp->m_addr.mta_phymask;
14553 if (phy_mask == phymask) {
14554 smpdip = NULL;
14555 mutex_exit(&mpt->m_mutex);
14556 (void) mptsas_online_smp(pdip, psmp, &smpdip);
14557 mutex_enter(&mpt->m_mutex);
14558 }
14559 }
14560
14561 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
14562 ptgt = refhash_next(mpt->m_targets, ptgt)) {
14563 phy_mask = ptgt->m_addr.mta_phymask;
14564 if (phy_mask == phymask) {
14565 mutex_exit(&mpt->m_mutex);
14566 (void) mptsas_config_target(pdip, ptgt);
14567 mutex_enter(&mpt->m_mutex);
14568 }
14569 }
14570 mutex_exit(&mpt->m_mutex);
14571 }
14572
14573 static int
14574 mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt)
14575 {
14576 int rval = DDI_FAILURE;
14577 dev_info_t *tdip;
14578
14579 rval = mptsas_config_luns(pdip, ptgt);
14580 if (rval != DDI_SUCCESS) {
14581 /*
14582 * The return value means the SCMD_REPORT_LUNS
14583 * did not execute successfully. The target maybe
14584 * doesn't support such command.
14585 */
14586 rval = mptsas_probe_lun(pdip, 0, &tdip, ptgt);
14587 }
14588 return (rval);
14589 }
14590
14591 /*
14592 * Return fail if not all the childs/paths are freed.
14593 * if there is any path under the HBA, the return value will be always fail
14594 * because we didn't call mdi_pi_free for path
14595 */
14596 static int
14597 mptsas_offline_target(dev_info_t *pdip, char *name)
14598 {
14599 dev_info_t *child = NULL, *prechild = NULL;
14600 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
14601 int tmp_rval, rval = DDI_SUCCESS;
14602 char *addr, *cp;
14603 size_t s;
14604 mptsas_t *mpt = DIP2MPT(pdip);
14605
14606 child = ddi_get_child(pdip);
14607 while (child) {
14608 addr = ddi_get_name_addr(child);
14609 prechild = child;
14610 child = ddi_get_next_sibling(child);
14611
14612 if (addr == NULL) {
14613 continue;
14614 }
14615 if ((cp = strchr(addr, ',')) == NULL) {
14616 continue;
14617 }
14618
14619 s = (uintptr_t)cp - (uintptr_t)addr;
14620
14621 if (strncmp(addr, name, s) != 0) {
14622 continue;
14623 }
14624
14625 tmp_rval = mptsas_offline_lun(pdip, prechild, NULL,
14626 NDI_DEVI_REMOVE);
14627 if (tmp_rval != DDI_SUCCESS) {
14628 rval = DDI_FAILURE;
14629 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14630 prechild, MPTSAS_DEV_GONE) !=
14631 DDI_PROP_SUCCESS) {
14632 mptsas_log(mpt, CE_WARN, "mptsas driver "
14633 "unable to create property for "
14634 "SAS %s (MPTSAS_DEV_GONE)", addr);
14635 }
14636 }
14637 }
14638
14639 pip = mdi_get_next_client_path(pdip, NULL);
14640 while (pip) {
14641 addr = MDI_PI(pip)->pi_addr;
14642 savepip = pip;
14643 pip = mdi_get_next_client_path(pdip, pip);
14644 if (addr == NULL) {
14645 continue;
14646 }
14647
14648 if ((cp = strchr(addr, ',')) == NULL) {
14649 continue;
14650 }
14651
14652 s = (uintptr_t)cp - (uintptr_t)addr;
14653
14654 if (strncmp(addr, name, s) != 0) {
14655 continue;
14656 }
14657
14658 (void) mptsas_offline_lun(pdip, NULL, savepip,
14659 NDI_DEVI_REMOVE);
14660 /*
14661 * driver will not invoke mdi_pi_free, so path will not
14662 * be freed forever, return DDI_FAILURE.
14663 */
14664 rval = DDI_FAILURE;
14665 }
14666 return (rval);
14667 }
14668
14669 static int
14670 mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
14671 mdi_pathinfo_t *rpip, uint_t flags)
14672 {
14673 int rval = DDI_FAILURE;
14674 char *devname;
14675 dev_info_t *cdip, *parent;
14676
14677 if (rpip != NULL) {
14678 parent = scsi_vhci_dip;
14679 cdip = mdi_pi_get_client(rpip);
14680 } else if (rdip != NULL) {
14681 parent = pdip;
14682 cdip = rdip;
14683 } else {
14684 return (DDI_FAILURE);
14685 }
14686
14687 /*
14688 * Make sure node is attached otherwise
14689 * it won't have related cache nodes to
14690 * clean up. i_ddi_devi_attached is
14691 * similiar to i_ddi_node_state(cdip) >=
14692 * DS_ATTACHED.
14693 */
14694 if (i_ddi_devi_attached(cdip)) {
14695
14696 /* Get full devname */
14697 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
14698 (void) ddi_deviname(cdip, devname);
14699 /* Clean cache */
14700 (void) devfs_clean(parent, devname + 1,
14701 DV_CLEAN_FORCE);
14702 kmem_free(devname, MAXNAMELEN + 1);
14703 }
14704 if (rpip != NULL) {
14705 if (MDI_PI_IS_OFFLINE(rpip)) {
14706 rval = DDI_SUCCESS;
14707 } else {
14708 rval = mdi_pi_offline(rpip, 0);
14709 }
14710 } else {
14711 rval = ndi_devi_offline(cdip, flags);
14712 }
14713
14714 return (rval);
14715 }
14716
14717 static dev_info_t *
14718 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
14719 {
14720 dev_info_t *child = NULL;
14721 char *smp_wwn = NULL;
14722
14723 child = ddi_get_child(parent);
14724 while (child) {
14725 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
14726 DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
14727 != DDI_SUCCESS) {
14728 child = ddi_get_next_sibling(child);
14729 continue;
14730 }
14731
14732 if (strcmp(smp_wwn, str_wwn) == 0) {
14733 ddi_prop_free(smp_wwn);
14734 break;
14735 }
14736 child = ddi_get_next_sibling(child);
14737 ddi_prop_free(smp_wwn);
14738 }
14739 return (child);
14740 }
14741
14742 static int
14743 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, uint_t flags)
14744 {
14745 int rval = DDI_FAILURE;
14746 char *devname;
14747 char wwn_str[MPTSAS_WWN_STRLEN];
14748 dev_info_t *cdip;
14749
14750 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
14751
14752 cdip = mptsas_find_smp_child(pdip, wwn_str);
14753
14754 if (cdip == NULL)
14755 return (DDI_SUCCESS);
14756
14757 /*
14758 * Make sure node is attached otherwise
14759 * it won't have related cache nodes to
14760 * clean up. i_ddi_devi_attached is
14761 * similiar to i_ddi_node_state(cdip) >=
14762 * DS_ATTACHED.
14763 */
14764 if (i_ddi_devi_attached(cdip)) {
14765
14766 /* Get full devname */
14767 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
14768 (void) ddi_deviname(cdip, devname);
14769 /* Clean cache */
14770 (void) devfs_clean(pdip, devname + 1,
14771 DV_CLEAN_FORCE);
14772 kmem_free(devname, MAXNAMELEN + 1);
14773 }
14774
14775 rval = ndi_devi_offline(cdip, flags);
14776
14777 return (rval);
14778 }
14779
14780 static dev_info_t *
14781 mptsas_find_child(dev_info_t *pdip, char *name)
14782 {
14783 dev_info_t *child = NULL;
14784 char *rname = NULL;
14785 int rval = DDI_FAILURE;
14786
14787 rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14788
14789 child = ddi_get_child(pdip);
14790 while (child) {
14791 rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
14792 if (rval != DDI_SUCCESS) {
14793 child = ddi_get_next_sibling(child);
14794 bzero(rname, SCSI_MAXNAMELEN);
14795 continue;
14796 }
14797
14798 if (strcmp(rname, name) == 0) {
14799 break;
14800 }
14801 child = ddi_get_next_sibling(child);
14802 bzero(rname, SCSI_MAXNAMELEN);
14803 }
14804
14805 kmem_free(rname, SCSI_MAXNAMELEN);
14806
14807 return (child);
14808 }
14809
14810
14811 static dev_info_t *
14812 mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, int lun)
14813 {
14814 dev_info_t *child = NULL;
14815 char *name = NULL;
14816 char *addr = NULL;
14817
14818 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14819 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14820 (void) sprintf(name, "%016"PRIx64, sasaddr);
14821 (void) sprintf(addr, "w%s,%x", name, lun);
14822 child = mptsas_find_child(pdip, addr);
14823 kmem_free(name, SCSI_MAXNAMELEN);
14824 kmem_free(addr, SCSI_MAXNAMELEN);
14825 return (child);
14826 }
14827
14828 static dev_info_t *
14829 mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy)
14830 {
14831 dev_info_t *child;
14832 char *addr;
14833
14834 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14835 (void) sprintf(addr, "p%x,0", phy);
14836 child = mptsas_find_child(pdip, addr);
14837 kmem_free(addr, SCSI_MAXNAMELEN);
14838 return (child);
14839 }
14840
14841 static mdi_pathinfo_t *
14842 mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy)
14843 {
14844 mdi_pathinfo_t *path;
14845 char *addr = NULL;
14846
14847 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14848 (void) sprintf(addr, "p%x,0", phy);
14849 path = mdi_pi_find(pdip, NULL, addr);
14850 kmem_free(addr, SCSI_MAXNAMELEN);
14851 return (path);
14852 }
14853
14854 static mdi_pathinfo_t *
14855 mptsas_find_path_addr(dev_info_t *parent, uint64_t sasaddr, int lun)
14856 {
14857 mdi_pathinfo_t *path;
14858 char *name = NULL;
14859 char *addr = NULL;
14860
14861 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14862 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14863 (void) sprintf(name, "%016"PRIx64, sasaddr);
14864 (void) sprintf(addr, "w%s,%x", name, lun);
14865 path = mdi_pi_find(parent, NULL, addr);
14866 kmem_free(name, SCSI_MAXNAMELEN);
14867 kmem_free(addr, SCSI_MAXNAMELEN);
14868
14869 return (path);
14870 }
14871
14872 static int
14873 mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
14874 dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
14875 {
14876 int i = 0;
14877 uchar_t *inq83 = NULL;
14878 int inq83_len1 = 0xFF;
14879 int inq83_len = 0;
14880 int rval = DDI_FAILURE;
14881 ddi_devid_t devid;
14882 char *guid = NULL;
14883 int target = ptgt->m_devhdl;
14884 mdi_pathinfo_t *pip = NULL;
14885 mptsas_t *mpt = DIP2MPT(pdip);
14886
14887 /*
14888 * For DVD/CD ROM and tape devices and optical
14889 * devices, we won't try to enumerate them under
14890 * scsi_vhci, so no need to try page83
14891 */
14892 if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
14893 sd_inq->inq_dtype == DTYPE_OPTICAL ||
14894 sd_inq->inq_dtype == DTYPE_ESI))
14895 goto create_lun;
14896
14897 /*
14898 * The LCA returns good SCSI status, but corrupt page 83 data the first
14899 * time it is queried. The solution is to keep trying to request page83
14900 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
14901 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
14902 * give up to get VPD page at this stage and fail the enumeration.
14903 */
14904
14905 inq83 = kmem_zalloc(inq83_len1, KM_SLEEP);
14906
14907 for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
14908 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
14909 inq83_len1, &inq83_len, 1);
14910 if (rval != 0) {
14911 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
14912 "0x83 for target:%x, lun:%x failed!", target, lun);
14913 if (mptsas_physical_bind_failed_page_83 != B_FALSE)
14914 goto create_lun;
14915 goto out;
14916 }
14917 /*
14918 * create DEVID from inquiry data
14919 */
14920 if ((rval = ddi_devid_scsi_encode(
14921 DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
14922 sizeof (struct scsi_inquiry), NULL, 0, inq83,
14923 (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
14924 /*
14925 * extract GUID from DEVID
14926 */
14927 guid = ddi_devid_to_guid(devid);
14928
14929 /*
14930 * Do not enable MPXIO if the strlen(guid) is greater
14931 * than MPTSAS_MAX_GUID_LEN, this constrain would be
14932 * handled by framework later.
14933 */
14934 if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
14935 ddi_devid_free_guid(guid);
14936 guid = NULL;
14937 if (mpt->m_mpxio_enable == TRUE) {
14938 mptsas_log(mpt, CE_NOTE, "!Target:%x, "
14939 "lun:%x doesn't have a valid GUID, "
14940 "multipathing for this drive is "
14941 "not enabled", target, lun);
14942 }
14943 }
14944
14945 /*
14946 * devid no longer needed
14947 */
14948 ddi_devid_free(devid);
14949 break;
14950 } else if (rval == DDI_NOT_WELL_FORMED) {
14951 /*
14952 * return value of ddi_devid_scsi_encode equal to
14953 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
14954 * to retry inquiry page 0x83 and get GUID.
14955 */
14956 NDBG20(("Not well formed devid, retry..."));
14957 delay(1 * drv_usectohz(1000000));
14958 continue;
14959 } else {
14960 mptsas_log(mpt, CE_WARN, "!Encode devid failed for "
14961 "path target:%x, lun:%x", target, lun);
14962 rval = DDI_FAILURE;
14963 goto create_lun;
14964 }
14965 }
14966
14967 if (i == mptsas_inq83_retry_timeout) {
14968 mptsas_log(mpt, CE_WARN, "!Repeated page83 requests timeout "
14969 "for path target:%x, lun:%x", target, lun);
14970 }
14971
14972 rval = DDI_FAILURE;
14973
14974 create_lun:
14975 if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
14976 rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
14977 ptgt, lun);
14978 }
14979 if (rval != DDI_SUCCESS) {
14980 rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
14981 ptgt, lun);
14982
14983 }
14984 out:
14985 if (guid != NULL) {
14986 /*
14987 * guid no longer needed
14988 */
14989 ddi_devid_free_guid(guid);
14990 }
14991 if (inq83 != NULL)
14992 kmem_free(inq83, inq83_len1);
14993 return (rval);
14994 }
14995
14996 static int
14997 mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid,
14998 dev_info_t **lun_dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, int lun)
14999 {
15000 int target;
15001 char *nodename = NULL;
15002 char **compatible = NULL;
15003 int ncompatible = 0;
15004 int mdi_rtn = MDI_FAILURE;
15005 int rval = DDI_FAILURE;
15006 char *old_guid = NULL;
15007 mptsas_t *mpt = DIP2MPT(pdip);
15008 char *lun_addr = NULL;
15009 char *wwn_str = NULL;
15010 char *attached_wwn_str = NULL;
15011 char *component = NULL;
15012 uint8_t phy = 0xFF;
15013 uint64_t sas_wwn;
15014 int64_t lun64 = 0;
15015 uint32_t devinfo;
15016 uint16_t dev_hdl;
15017 uint16_t pdev_hdl;
15018 uint64_t dev_sas_wwn;
15019 uint64_t pdev_sas_wwn;
15020 uint32_t pdev_info;
15021 uint8_t physport;
15022 uint8_t phy_id;
15023 uint32_t page_address;
15024 uint16_t bay_num, enclosure, io_flags;
15025 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
15026 uint32_t dev_info;
15027
15028 mutex_enter(&mpt->m_mutex);
15029 target = ptgt->m_devhdl;
15030 sas_wwn = ptgt->m_addr.mta_wwn;
15031 devinfo = ptgt->m_deviceinfo;
15032 phy = ptgt->m_phynum;
15033 mutex_exit(&mpt->m_mutex);
15034
15035 if (sas_wwn) {
15036 *pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
15037 } else {
15038 *pip = mptsas_find_path_phy(pdip, phy);
15039 }
15040
15041 if (*pip != NULL) {
15042 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
15043 ASSERT(*lun_dip != NULL);
15044 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
15045 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
15046 MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
15047 if (strncmp(guid, old_guid, strlen(guid)) == 0) {
15048 /*
15049 * Same path back online again.
15050 */
15051 (void) ddi_prop_free(old_guid);
15052 if ((!MDI_PI_IS_ONLINE(*pip)) &&
15053 (!MDI_PI_IS_STANDBY(*pip)) &&
15054 (ptgt->m_tgt_unconfigured == 0)) {
15055 rval = mdi_pi_online(*pip, 0);
15056 mutex_enter(&mpt->m_mutex);
15057 ptgt->m_led_status = 0;
15058 (void) mptsas_flush_led_status(mpt,
15059 ptgt);
15060 mutex_exit(&mpt->m_mutex);
15061 } else {
15062 rval = DDI_SUCCESS;
15063 }
15064 if (rval != DDI_SUCCESS) {
15065 mptsas_log(mpt, CE_WARN, "path:target: "
15066 "%x, lun:%x online failed!", target,
15067 lun);
15068 *pip = NULL;
15069 *lun_dip = NULL;
15070 }
15071 return (rval);
15072 } else {
15073 /*
15074 * The GUID of the LUN has changed which maybe
15075 * because customer mapped another volume to the
15076 * same LUN.
15077 */
15078 mptsas_log(mpt, CE_WARN, "The GUID of the "
15079 "target:%x, lun:%x was changed, maybe "
15080 "because someone mapped another volume "
15081 "to the same LUN", target, lun);
15082 (void) ddi_prop_free(old_guid);
15083 if (!MDI_PI_IS_OFFLINE(*pip)) {
15084 rval = mdi_pi_offline(*pip, 0);
15085 if (rval != MDI_SUCCESS) {
15086 mptsas_log(mpt, CE_WARN, "path:"
15087 "target:%x, lun:%x offline "
15088 "failed!", target, lun);
15089 *pip = NULL;
15090 *lun_dip = NULL;
15091 return (DDI_FAILURE);
15092 }
15093 }
15094 if (mdi_pi_free(*pip, 0) != MDI_SUCCESS) {
15095 mptsas_log(mpt, CE_WARN, "path:target:"
15096 "%x, lun:%x free failed!", target,
15097 lun);
15098 *pip = NULL;
15099 *lun_dip = NULL;
15100 return (DDI_FAILURE);
15101 }
15102 }
15103 } else {
15104 mptsas_log(mpt, CE_WARN, "Can't get client-guid "
15105 "property for path:target:%x, lun:%x", target, lun);
15106 *pip = NULL;
15107 *lun_dip = NULL;
15108 return (DDI_FAILURE);
15109 }
15110 }
15111 scsi_hba_nodename_compatible_get(inq, NULL,
15112 inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
15113
15114 /*
15115 * if nodename can't be determined then print a message and skip it
15116 */
15117 if (nodename == NULL) {
15118 mptsas_log(mpt, CE_WARN, "mptsas driver found no compatible "
15119 "driver for target%d lun %d dtype:0x%02x", target, lun,
15120 inq->inq_dtype);
15121 return (DDI_FAILURE);
15122 }
15123
15124 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
15125 /* The property is needed by MPAPI */
15126 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
15127
15128 lun_addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15129 if (guid) {
15130 (void) sprintf(lun_addr, "w%s,%x", wwn_str, lun);
15131 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15132 } else {
15133 (void) sprintf(lun_addr, "p%x,%x", phy, lun);
15134 (void) sprintf(wwn_str, "p%x", phy);
15135 }
15136
15137 mdi_rtn = mdi_pi_alloc_compatible(pdip, nodename,
15138 guid, lun_addr, compatible, ncompatible,
15139 0, pip);
15140 if (mdi_rtn == MDI_SUCCESS) {
15141
15142 if (mdi_prop_update_string(*pip, MDI_GUID,
15143 guid) != DDI_SUCCESS) {
15144 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15145 "create prop for target %d lun %d (MDI_GUID)",
15146 target, lun);
15147 mdi_rtn = MDI_FAILURE;
15148 goto virt_create_done;
15149 }
15150
15151 if (mdi_prop_update_int(*pip, LUN_PROP,
15152 lun) != DDI_SUCCESS) {
15153 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15154 "create prop for target %d lun %d (LUN_PROP)",
15155 target, lun);
15156 mdi_rtn = MDI_FAILURE;
15157 goto virt_create_done;
15158 }
15159 lun64 = (int64_t)lun;
15160 if (mdi_prop_update_int64(*pip, LUN64_PROP,
15161 lun64) != DDI_SUCCESS) {
15162 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15163 "create prop for target %d (LUN64_PROP)",
15164 target);
15165 mdi_rtn = MDI_FAILURE;
15166 goto virt_create_done;
15167 }
15168 if (mdi_prop_update_string_array(*pip, "compatible",
15169 compatible, ncompatible) !=
15170 DDI_PROP_SUCCESS) {
15171 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15172 "create prop for target %d lun %d (COMPATIBLE)",
15173 target, lun);
15174 mdi_rtn = MDI_FAILURE;
15175 goto virt_create_done;
15176 }
15177 if (sas_wwn && (mdi_prop_update_string(*pip,
15178 SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS)) {
15179 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15180 "create prop for target %d lun %d "
15181 "(target-port)", target, lun);
15182 mdi_rtn = MDI_FAILURE;
15183 goto virt_create_done;
15184 } else if ((sas_wwn == 0) && (mdi_prop_update_int(*pip,
15185 "sata-phy", phy) != DDI_PROP_SUCCESS)) {
15186 /*
15187 * Direct attached SATA device without DeviceName
15188 */
15189 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15190 "create prop for SAS target %d lun %d "
15191 "(sata-phy)", target, lun);
15192 mdi_rtn = MDI_FAILURE;
15193 goto virt_create_done;
15194 }
15195 mutex_enter(&mpt->m_mutex);
15196
15197 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15198 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15199 (uint32_t)ptgt->m_devhdl;
15200 rval = mptsas_get_sas_device_page0(mpt, page_address,
15201 &dev_hdl, &dev_sas_wwn, &dev_info, &physport,
15202 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15203 if (rval != DDI_SUCCESS) {
15204 mutex_exit(&mpt->m_mutex);
15205 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15206 "parent device for handle %d", page_address);
15207 mdi_rtn = MDI_FAILURE;
15208 goto virt_create_done;
15209 }
15210
15211 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15212 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
15213 rval = mptsas_get_sas_device_page0(mpt, page_address,
15214 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
15215 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15216 if (rval != DDI_SUCCESS) {
15217 mutex_exit(&mpt->m_mutex);
15218 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
15219 "device info for handle %d", page_address);
15220 mdi_rtn = MDI_FAILURE;
15221 goto virt_create_done;
15222 }
15223
15224 mutex_exit(&mpt->m_mutex);
15225
15226 /*
15227 * If this device direct attached to the controller
15228 * set the attached-port to the base wwid
15229 */
15230 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15231 != DEVINFO_DIRECT_ATTACHED) {
15232 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15233 pdev_sas_wwn);
15234 } else {
15235 /*
15236 * Update the iport's attached-port to guid
15237 */
15238 if (sas_wwn == 0) {
15239 (void) sprintf(wwn_str, "p%x", phy);
15240 } else {
15241 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15242 }
15243 if (ddi_prop_update_string(DDI_DEV_T_NONE,
15244 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15245 DDI_PROP_SUCCESS) {
15246 mptsas_log(mpt, CE_WARN,
15247 "mptsas unable to create "
15248 "property for iport target-port"
15249 " %s (sas_wwn)",
15250 wwn_str);
15251 mdi_rtn = MDI_FAILURE;
15252 goto virt_create_done;
15253 }
15254
15255 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15256 mpt->un.m_base_wwid);
15257 }
15258
15259 if (mdi_prop_update_string(*pip,
15260 SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
15261 DDI_PROP_SUCCESS) {
15262 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15263 "property for iport attached-port %s (sas_wwn)",
15264 attached_wwn_str);
15265 mdi_rtn = MDI_FAILURE;
15266 goto virt_create_done;
15267 }
15268
15269
15270 if (inq->inq_dtype == 0) {
15271 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
15272 /*
15273 * set obp path for pathinfo
15274 */
15275 (void) snprintf(component, MAXPATHLEN,
15276 "disk@%s", lun_addr);
15277
15278 if (mdi_pi_pathname_obp_set(*pip, component) !=
15279 DDI_SUCCESS) {
15280 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
15281 "unable to set obp-path for object %s",
15282 component);
15283 mdi_rtn = MDI_FAILURE;
15284 goto virt_create_done;
15285 }
15286 }
15287
15288 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
15289 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
15290 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
15291 if ((ndi_prop_update_int(DDI_DEV_T_NONE, *lun_dip,
15292 "pm-capable", 1)) !=
15293 DDI_PROP_SUCCESS) {
15294 mptsas_log(mpt, CE_WARN, "mptsas driver"
15295 "failed to create pm-capable "
15296 "property, target %d", target);
15297 mdi_rtn = MDI_FAILURE;
15298 goto virt_create_done;
15299 }
15300 }
15301 /*
15302 * Create the phy-num property
15303 */
15304 if (mdi_prop_update_int(*pip, "phy-num",
15305 ptgt->m_phynum) != DDI_SUCCESS) {
15306 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15307 "create phy-num property for target %d lun %d",
15308 target, lun);
15309 mdi_rtn = MDI_FAILURE;
15310 goto virt_create_done;
15311 }
15312 NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
15313 mdi_rtn = mdi_pi_online(*pip, 0);
15314 if (mdi_rtn == MDI_SUCCESS) {
15315 mutex_enter(&mpt->m_mutex);
15316 ptgt->m_led_status = 0;
15317 (void) mptsas_flush_led_status(mpt, ptgt);
15318 mutex_exit(&mpt->m_mutex);
15319 }
15320 if (mdi_rtn == MDI_NOT_SUPPORTED) {
15321 mdi_rtn = MDI_FAILURE;
15322 }
15323 virt_create_done:
15324 if (*pip && mdi_rtn != MDI_SUCCESS) {
15325 (void) mdi_pi_free(*pip, 0);
15326 *pip = NULL;
15327 *lun_dip = NULL;
15328 }
15329 }
15330
15331 scsi_hba_nodename_compatible_free(nodename, compatible);
15332 if (lun_addr != NULL) {
15333 kmem_free(lun_addr, SCSI_MAXNAMELEN);
15334 }
15335 if (wwn_str != NULL) {
15336 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
15337 }
15338 if (component != NULL) {
15339 kmem_free(component, MAXPATHLEN);
15340 }
15341
15342 return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15343 }
15344
15345 static int
15346 mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq,
15347 char *guid, dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
15348 {
15349 int target;
15350 int rval;
15351 int ndi_rtn = NDI_FAILURE;
15352 uint64_t be_sas_wwn;
15353 char *nodename = NULL;
15354 char **compatible = NULL;
15355 int ncompatible = 0;
15356 int instance = 0;
15357 mptsas_t *mpt = DIP2MPT(pdip);
15358 char *wwn_str = NULL;
15359 char *component = NULL;
15360 char *attached_wwn_str = NULL;
15361 uint8_t phy = 0xFF;
15362 uint64_t sas_wwn;
15363 uint32_t devinfo;
15364 uint16_t dev_hdl;
15365 uint16_t pdev_hdl;
15366 uint64_t pdev_sas_wwn;
15367 uint64_t dev_sas_wwn;
15368 uint32_t pdev_info;
15369 uint8_t physport;
15370 uint8_t phy_id;
15371 uint32_t page_address;
15372 uint16_t bay_num, enclosure, io_flags;
15373 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
15374 uint32_t dev_info;
15375 int64_t lun64 = 0;
15376
15377 mutex_enter(&mpt->m_mutex);
15378 target = ptgt->m_devhdl;
15379 sas_wwn = ptgt->m_addr.mta_wwn;
15380 devinfo = ptgt->m_deviceinfo;
15381 phy = ptgt->m_phynum;
15382 mutex_exit(&mpt->m_mutex);
15383
15384 /*
15385 * generate compatible property with binding-set "mpt"
15386 */
15387 scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL,
15388 &nodename, &compatible, &ncompatible);
15389
15390 /*
15391 * if nodename can't be determined then print a message and skip it
15392 */
15393 if (nodename == NULL) {
15394 mptsas_log(mpt, CE_WARN, "mptsas found no compatible driver "
15395 "for target %d lun %d", target, lun);
15396 return (DDI_FAILURE);
15397 }
15398
15399 ndi_rtn = ndi_devi_alloc(pdip, nodename,
15400 DEVI_SID_NODEID, lun_dip);
15401
15402 /*
15403 * if lun alloc success, set props
15404 */
15405 if (ndi_rtn == NDI_SUCCESS) {
15406
15407 if (ndi_prop_update_int(DDI_DEV_T_NONE,
15408 *lun_dip, LUN_PROP, lun) !=
15409 DDI_PROP_SUCCESS) {
15410 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15411 "property for target %d lun %d (LUN_PROP)",
15412 target, lun);
15413 ndi_rtn = NDI_FAILURE;
15414 goto phys_create_done;
15415 }
15416
15417 lun64 = (int64_t)lun;
15418 if (ndi_prop_update_int64(DDI_DEV_T_NONE,
15419 *lun_dip, LUN64_PROP, lun64) !=
15420 DDI_PROP_SUCCESS) {
15421 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15422 "property for target %d lun64 %d (LUN64_PROP)",
15423 target, lun);
15424 ndi_rtn = NDI_FAILURE;
15425 goto phys_create_done;
15426 }
15427 if (ndi_prop_update_string_array(DDI_DEV_T_NONE,
15428 *lun_dip, "compatible", compatible, ncompatible)
15429 != DDI_PROP_SUCCESS) {
15430 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15431 "property for target %d lun %d (COMPATIBLE)",
15432 target, lun);
15433 ndi_rtn = NDI_FAILURE;
15434 goto phys_create_done;
15435 }
15436
15437 /*
15438 * We need the SAS WWN for non-multipath devices, so
15439 * we'll use the same property as that multipathing
15440 * devices need to present for MPAPI. If we don't have
15441 * a WWN (e.g. parallel SCSI), don't create the prop.
15442 */
15443 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
15444 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15445 if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE,
15446 *lun_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str)
15447 != DDI_PROP_SUCCESS) {
15448 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15449 "create property for SAS target %d lun %d "
15450 "(target-port)", target, lun);
15451 ndi_rtn = NDI_FAILURE;
15452 goto phys_create_done;
15453 }
15454
15455 be_sas_wwn = BE_64(sas_wwn);
15456 if (sas_wwn && ndi_prop_update_byte_array(
15457 DDI_DEV_T_NONE, *lun_dip, "port-wwn",
15458 (uchar_t *)&be_sas_wwn, 8) != DDI_PROP_SUCCESS) {
15459 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15460 "create property for SAS target %d lun %d "
15461 "(port-wwn)", target, lun);
15462 ndi_rtn = NDI_FAILURE;
15463 goto phys_create_done;
15464 } else if ((sas_wwn == 0) && (ndi_prop_update_int(
15465 DDI_DEV_T_NONE, *lun_dip, "sata-phy", phy) !=
15466 DDI_PROP_SUCCESS)) {
15467 /*
15468 * Direct attached SATA device without DeviceName
15469 */
15470 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15471 "create property for SAS target %d lun %d "
15472 "(sata-phy)", target, lun);
15473 ndi_rtn = NDI_FAILURE;
15474 goto phys_create_done;
15475 }
15476
15477 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15478 *lun_dip, SAS_PROP) != DDI_PROP_SUCCESS) {
15479 mptsas_log(mpt, CE_WARN, "mptsas unable to"
15480 "create property for SAS target %d lun %d"
15481 " (SAS_PROP)", target, lun);
15482 ndi_rtn = NDI_FAILURE;
15483 goto phys_create_done;
15484 }
15485 if (guid && (ndi_prop_update_string(DDI_DEV_T_NONE,
15486 *lun_dip, NDI_GUID, guid) != DDI_SUCCESS)) {
15487 mptsas_log(mpt, CE_WARN, "mptsas unable "
15488 "to create guid property for target %d "
15489 "lun %d", target, lun);
15490 ndi_rtn = NDI_FAILURE;
15491 goto phys_create_done;
15492 }
15493
15494 /*
15495 * The following code is to set properties for SM-HBA support,
15496 * it doesn't apply to RAID volumes
15497 */
15498 if (ptgt->m_addr.mta_phymask == 0)
15499 goto phys_raid_lun;
15500
15501 mutex_enter(&mpt->m_mutex);
15502
15503 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15504 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15505 (uint32_t)ptgt->m_devhdl;
15506 rval = mptsas_get_sas_device_page0(mpt, page_address,
15507 &dev_hdl, &dev_sas_wwn, &dev_info,
15508 &physport, &phy_id, &pdev_hdl,
15509 &bay_num, &enclosure, &io_flags);
15510 if (rval != DDI_SUCCESS) {
15511 mutex_exit(&mpt->m_mutex);
15512 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
15513 "parent device for handle %d.", page_address);
15514 ndi_rtn = NDI_FAILURE;
15515 goto phys_create_done;
15516 }
15517
15518 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15519 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
15520 rval = mptsas_get_sas_device_page0(mpt, page_address,
15521 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
15522 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15523 if (rval != DDI_SUCCESS) {
15524 mutex_exit(&mpt->m_mutex);
15525 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15526 "device for handle %d.", page_address);
15527 ndi_rtn = NDI_FAILURE;
15528 goto phys_create_done;
15529 }
15530
15531 mutex_exit(&mpt->m_mutex);
15532
15533 /*
15534 * If this device direct attached to the controller
15535 * set the attached-port to the base wwid
15536 */
15537 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15538 != DEVINFO_DIRECT_ATTACHED) {
15539 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15540 pdev_sas_wwn);
15541 } else {
15542 /*
15543 * Update the iport's attached-port to guid
15544 */
15545 if (sas_wwn == 0) {
15546 (void) sprintf(wwn_str, "p%x", phy);
15547 } else {
15548 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15549 }
15550 if (ddi_prop_update_string(DDI_DEV_T_NONE,
15551 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15552 DDI_PROP_SUCCESS) {
15553 mptsas_log(mpt, CE_WARN,
15554 "mptsas unable to create "
15555 "property for iport target-port"
15556 " %s (sas_wwn)",
15557 wwn_str);
15558 ndi_rtn = NDI_FAILURE;
15559 goto phys_create_done;
15560 }
15561
15562 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15563 mpt->un.m_base_wwid);
15564 }
15565
15566 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15567 *lun_dip, SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
15568 DDI_PROP_SUCCESS) {
15569 mptsas_log(mpt, CE_WARN,
15570 "mptsas unable to create "
15571 "property for iport attached-port %s (sas_wwn)",
15572 attached_wwn_str);
15573 ndi_rtn = NDI_FAILURE;
15574 goto phys_create_done;
15575 }
15576
15577 if (IS_SATA_DEVICE(dev_info)) {
15578 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15579 *lun_dip, MPTSAS_VARIANT, "sata") !=
15580 DDI_PROP_SUCCESS) {
15581 mptsas_log(mpt, CE_WARN,
15582 "mptsas unable to create "
15583 "property for device variant ");
15584 ndi_rtn = NDI_FAILURE;
15585 goto phys_create_done;
15586 }
15587 }
15588
15589 if (IS_ATAPI_DEVICE(dev_info)) {
15590 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15591 *lun_dip, MPTSAS_VARIANT, "atapi") !=
15592 DDI_PROP_SUCCESS) {
15593 mptsas_log(mpt, CE_WARN,
15594 "mptsas unable to create "
15595 "property for device variant ");
15596 ndi_rtn = NDI_FAILURE;
15597 goto phys_create_done;
15598 }
15599 }
15600
15601 phys_raid_lun:
15602 /*
15603 * if this is a SAS controller, and the target is a SATA
15604 * drive, set the 'pm-capable' property for sd and if on
15605 * an OPL platform, also check if this is an ATAPI
15606 * device.
15607 */
15608 instance = ddi_get_instance(mpt->m_dip);
15609 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
15610 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
15611 NDBG2(("mptsas%d: creating pm-capable property, "
15612 "target %d", instance, target));
15613
15614 if ((ndi_prop_update_int(DDI_DEV_T_NONE,
15615 *lun_dip, "pm-capable", 1)) !=
15616 DDI_PROP_SUCCESS) {
15617 mptsas_log(mpt, CE_WARN, "mptsas "
15618 "failed to create pm-capable "
15619 "property, target %d", target);
15620 ndi_rtn = NDI_FAILURE;
15621 goto phys_create_done;
15622 }
15623
15624 }
15625
15626 if ((inq->inq_dtype == 0) || (inq->inq_dtype == 5)) {
15627 /*
15628 * add 'obp-path' properties for devinfo
15629 */
15630 bzero(wwn_str, sizeof (wwn_str));
15631 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
15632 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
15633 if (guid) {
15634 (void) snprintf(component, MAXPATHLEN,
15635 "disk@w%s,%x", wwn_str, lun);
15636 } else {
15637 (void) snprintf(component, MAXPATHLEN,
15638 "disk@p%x,%x", phy, lun);
15639 }
15640 if (ddi_pathname_obp_set(*lun_dip, component)
15641 != DDI_SUCCESS) {
15642 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
15643 "unable to set obp-path for SAS "
15644 "object %s", component);
15645 ndi_rtn = NDI_FAILURE;
15646 goto phys_create_done;
15647 }
15648 }
15649 /*
15650 * Create the phy-num property for non-raid disk
15651 */
15652 if (ptgt->m_addr.mta_phymask != 0) {
15653 if (ndi_prop_update_int(DDI_DEV_T_NONE,
15654 *lun_dip, "phy-num", ptgt->m_phynum) !=
15655 DDI_PROP_SUCCESS) {
15656 mptsas_log(mpt, CE_WARN, "mptsas driver "
15657 "failed to create phy-num property for "
15658 "target %d", target);
15659 ndi_rtn = NDI_FAILURE;
15660 goto phys_create_done;
15661 }
15662 }
15663 phys_create_done:
15664 /*
15665 * If props were setup ok, online the lun
15666 */
15667 if (ndi_rtn == NDI_SUCCESS) {
15668 /*
15669 * Try to online the new node
15670 */
15671 ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
15672 }
15673 if (ndi_rtn == NDI_SUCCESS) {
15674 mutex_enter(&mpt->m_mutex);
15675 ptgt->m_led_status = 0;
15676 (void) mptsas_flush_led_status(mpt, ptgt);
15677 mutex_exit(&mpt->m_mutex);
15678 }
15679
15680 /*
15681 * If success set rtn flag, else unwire alloc'd lun
15682 */
15683 if (ndi_rtn != NDI_SUCCESS) {
15684 NDBG12(("mptsas driver unable to online "
15685 "target %d lun %d", target, lun));
15686 ndi_prop_remove_all(*lun_dip);
15687 (void) ndi_devi_free(*lun_dip);
15688 *lun_dip = NULL;
15689 }
15690 }
15691
15692 scsi_hba_nodename_compatible_free(nodename, compatible);
15693
15694 if (wwn_str != NULL) {
15695 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
15696 }
15697 if (component != NULL) {
15698 kmem_free(component, MAXPATHLEN);
15699 }
15700
15701
15702 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15703 }
15704
15705 static int
15706 mptsas_probe_smp(dev_info_t *pdip, uint64_t wwn)
15707 {
15708 mptsas_t *mpt = DIP2MPT(pdip);
15709 struct smp_device smp_sd;
15710
15711 /* XXX An HBA driver should not be allocating an smp_device. */
15712 bzero(&smp_sd, sizeof (struct smp_device));
15713 smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran;
15714 bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE);
15715
15716 if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS)
15717 return (NDI_FAILURE);
15718 return (NDI_SUCCESS);
15719 }
15720
15721 static int
15722 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
15723 {
15724 mptsas_t *mpt = DIP2MPT(pdip);
15725 mptsas_smp_t *psmp = NULL;
15726 int rval;
15727 int phymask;
15728
15729 /*
15730 * Get the physical port associated to the iport
15731 * PHYMASK TODO
15732 */
15733 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
15734 "phymask", 0);
15735 /*
15736 * Find the smp node in hash table with specified sas address and
15737 * physical port
15738 */
15739 psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
15740 if (psmp == NULL) {
15741 return (DDI_FAILURE);
15742 }
15743
15744 rval = mptsas_online_smp(pdip, psmp, smp_dip);
15745
15746 return (rval);
15747 }
15748
15749 static int
15750 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
15751 dev_info_t **smp_dip)
15752 {
15753 char wwn_str[MPTSAS_WWN_STRLEN];
15754 char attached_wwn_str[MPTSAS_WWN_STRLEN];
15755 int ndi_rtn = NDI_FAILURE;
15756 int rval = 0;
15757 mptsas_smp_t dev_info;
15758 uint32_t page_address;
15759 mptsas_t *mpt = DIP2MPT(pdip);
15760 uint16_t dev_hdl;
15761 uint64_t sas_wwn;
15762 uint64_t smp_sas_wwn;
15763 uint8_t physport;
15764 uint8_t phy_id;
15765 uint16_t pdev_hdl;
15766 uint8_t numphys = 0;
15767 uint16_t i = 0;
15768 char phymask[MPTSAS_MAX_PHYS];
15769 char *iport = NULL;
15770 mptsas_phymask_t phy_mask = 0;
15771 uint16_t attached_devhdl;
15772 uint16_t bay_num, enclosure, io_flags;
15773
15774 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
15775
15776 /*
15777 * Probe smp device, prevent the node of removed device from being
15778 * configured succesfully
15779 */
15780 if (mptsas_probe_smp(pdip, smp_node->m_addr.mta_wwn) != NDI_SUCCESS) {
15781 return (DDI_FAILURE);
15782 }
15783
15784 if ((*smp_dip = mptsas_find_smp_child(pdip, wwn_str)) != NULL) {
15785 return (DDI_SUCCESS);
15786 }
15787
15788 ndi_rtn = ndi_devi_alloc(pdip, "smp", DEVI_SID_NODEID, smp_dip);
15789
15790 /*
15791 * if lun alloc success, set props
15792 */
15793 if (ndi_rtn == NDI_SUCCESS) {
15794 /*
15795 * Set the flavor of the child to be SMP flavored
15796 */
15797 ndi_flavor_set(*smp_dip, SCSA_FLAVOR_SMP);
15798
15799 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15800 *smp_dip, SMP_WWN, wwn_str) !=
15801 DDI_PROP_SUCCESS) {
15802 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15803 "property for smp device %s (sas_wwn)",
15804 wwn_str);
15805 ndi_rtn = NDI_FAILURE;
15806 goto smp_create_done;
15807 }
15808 (void) sprintf(wwn_str, "w%"PRIx64, smp_node->m_addr.mta_wwn);
15809 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15810 *smp_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str) !=
15811 DDI_PROP_SUCCESS) {
15812 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15813 "property for iport target-port %s (sas_wwn)",
15814 wwn_str);
15815 ndi_rtn = NDI_FAILURE;
15816 goto smp_create_done;
15817 }
15818
15819 mutex_enter(&mpt->m_mutex);
15820
15821 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
15822 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | smp_node->m_devhdl;
15823 rval = mptsas_get_sas_expander_page0(mpt, page_address,
15824 &dev_info);
15825 if (rval != DDI_SUCCESS) {
15826 mutex_exit(&mpt->m_mutex);
15827 mptsas_log(mpt, CE_WARN,
15828 "mptsas unable to get expander "
15829 "parent device info for %x", page_address);
15830 ndi_rtn = NDI_FAILURE;
15831 goto smp_create_done;
15832 }
15833
15834 smp_node->m_pdevhdl = dev_info.m_pdevhdl;
15835 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15836 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15837 (uint32_t)dev_info.m_pdevhdl;
15838 rval = mptsas_get_sas_device_page0(mpt, page_address,
15839 &dev_hdl, &sas_wwn, &smp_node->m_pdevinfo, &physport,
15840 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15841 if (rval != DDI_SUCCESS) {
15842 mutex_exit(&mpt->m_mutex);
15843 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15844 "device info for %x", page_address);
15845 ndi_rtn = NDI_FAILURE;
15846 goto smp_create_done;
15847 }
15848
15849 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15850 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15851 (uint32_t)dev_info.m_devhdl;
15852 rval = mptsas_get_sas_device_page0(mpt, page_address,
15853 &dev_hdl, &smp_sas_wwn, &smp_node->m_deviceinfo,
15854 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure,
15855 &io_flags);
15856 if (rval != DDI_SUCCESS) {
15857 mutex_exit(&mpt->m_mutex);
15858 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15859 "device info for %x", page_address);
15860 ndi_rtn = NDI_FAILURE;
15861 goto smp_create_done;
15862 }
15863 mutex_exit(&mpt->m_mutex);
15864
15865 /*
15866 * If this smp direct attached to the controller
15867 * set the attached-port to the base wwid
15868 */
15869 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15870 != DEVINFO_DIRECT_ATTACHED) {
15871 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
15872 sas_wwn);
15873 } else {
15874 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
15875 mpt->un.m_base_wwid);
15876 }
15877
15878 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15879 *smp_dip, SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwn_str) !=
15880 DDI_PROP_SUCCESS) {
15881 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15882 "property for smp attached-port %s (sas_wwn)",
15883 attached_wwn_str);
15884 ndi_rtn = NDI_FAILURE;
15885 goto smp_create_done;
15886 }
15887
15888 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15889 *smp_dip, SMP_PROP) != DDI_PROP_SUCCESS) {
15890 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15891 "create property for SMP %s (SMP_PROP) ",
15892 wwn_str);
15893 ndi_rtn = NDI_FAILURE;
15894 goto smp_create_done;
15895 }
15896
15897 /*
15898 * check the smp to see whether it direct
15899 * attached to the controller
15900 */
15901 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15902 != DEVINFO_DIRECT_ATTACHED) {
15903 goto smp_create_done;
15904 }
15905 numphys = ddi_prop_get_int(DDI_DEV_T_ANY, pdip,
15906 DDI_PROP_DONTPASS, MPTSAS_NUM_PHYS, -1);
15907 if (numphys > 0) {
15908 goto smp_create_done;
15909 }
15910 /*
15911 * this iport is an old iport, we need to
15912 * reconfig the props for it.
15913 */
15914 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
15915 MPTSAS_VIRTUAL_PORT, 0) !=
15916 DDI_PROP_SUCCESS) {
15917 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
15918 MPTSAS_VIRTUAL_PORT);
15919 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
15920 "prop update failed");
15921 goto smp_create_done;
15922 }
15923
15924 mutex_enter(&mpt->m_mutex);
15925 numphys = 0;
15926 iport = ddi_get_name_addr(pdip);
15927 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15928 bzero(phymask, sizeof (phymask));
15929 (void) sprintf(phymask,
15930 "%x", mpt->m_phy_info[i].phy_mask);
15931 if (strcmp(phymask, iport) == 0) {
15932 phy_mask = mpt->m_phy_info[i].phy_mask;
15933 break;
15934 }
15935 }
15936
15937 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15938 if ((phy_mask >> i) & 0x01) {
15939 numphys++;
15940 }
15941 }
15942 /*
15943 * Update PHY info for smhba
15944 */
15945 if (mptsas_smhba_phy_init(mpt)) {
15946 mutex_exit(&mpt->m_mutex);
15947 mptsas_log(mpt, CE_WARN, "mptsas phy update "
15948 "failed");
15949 goto smp_create_done;
15950 }
15951 mutex_exit(&mpt->m_mutex);
15952
15953 mptsas_smhba_set_all_phy_props(mpt, pdip, numphys, phy_mask,
15954 &attached_devhdl);
15955
15956 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
15957 MPTSAS_NUM_PHYS, numphys) !=
15958 DDI_PROP_SUCCESS) {
15959 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
15960 MPTSAS_NUM_PHYS);
15961 mptsas_log(mpt, CE_WARN, "mptsas update "
15962 "num phys props failed");
15963 goto smp_create_done;
15964 }
15965 /*
15966 * Add parent's props for SMHBA support
15967 */
15968 if (ddi_prop_update_string(DDI_DEV_T_NONE, pdip,
15969 SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15970 DDI_PROP_SUCCESS) {
15971 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
15972 SCSI_ADDR_PROP_ATTACHED_PORT);
15973 mptsas_log(mpt, CE_WARN, "mptsas update iport"
15974 "attached-port failed");
15975 goto smp_create_done;
15976 }
15977
15978 smp_create_done:
15979 /*
15980 * If props were setup ok, online the lun
15981 */
15982 if (ndi_rtn == NDI_SUCCESS) {
15983 /*
15984 * Try to online the new node
15985 */
15986 ndi_rtn = ndi_devi_online(*smp_dip, NDI_ONLINE_ATTACH);
15987 }
15988
15989 /*
15990 * If success set rtn flag, else unwire alloc'd lun
15991 */
15992 if (ndi_rtn != NDI_SUCCESS) {
15993 NDBG12(("mptsas unable to online "
15994 "SMP target %s", wwn_str));
15995 ndi_prop_remove_all(*smp_dip);
15996 (void) ndi_devi_free(*smp_dip);
15997 }
15998 }
15999
16000 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
16001 }
16002
16003 /* smp transport routine */
16004 static int mptsas_smp_start(struct smp_pkt *smp_pkt)
16005 {
16006 uint64_t wwn;
16007 Mpi2SmpPassthroughRequest_t req;
16008 Mpi2SmpPassthroughReply_t rep;
16009 uint32_t direction = 0;
16010 mptsas_t *mpt;
16011 int ret;
16012 uint64_t tmp64;
16013
16014 mpt = (mptsas_t *)smp_pkt->smp_pkt_address->
16015 smp_a_hba_tran->smp_tran_hba_private;
16016
16017 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
16018 /*
16019 * Need to compose a SMP request message
16020 * and call mptsas_do_passthru() function
16021 */
16022 bzero(&req, sizeof (req));
16023 bzero(&rep, sizeof (rep));
16024 req.PassthroughFlags = 0;
16025 req.PhysicalPort = 0xff;
16026 req.ChainOffset = 0;
16027 req.Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
16028
16029 if ((smp_pkt->smp_pkt_reqsize & 0xffff0000ul) != 0) {
16030 smp_pkt->smp_pkt_reason = ERANGE;
16031 return (DDI_FAILURE);
16032 }
16033 req.RequestDataLength = LE_16((uint16_t)(smp_pkt->smp_pkt_reqsize - 4));
16034
16035 req.MsgFlags = 0;
16036 tmp64 = LE_64(wwn);
16037 bcopy(&tmp64, &req.SASAddress, SAS_WWN_BYTE_SIZE);
16038 if (smp_pkt->smp_pkt_rspsize > 0) {
16039 direction |= MPTSAS_PASS_THRU_DIRECTION_READ;
16040 }
16041 if (smp_pkt->smp_pkt_reqsize > 0) {
16042 direction |= MPTSAS_PASS_THRU_DIRECTION_WRITE;
16043 }
16044
16045 mutex_enter(&mpt->m_mutex);
16046 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep,
16047 (uint8_t *)smp_pkt->smp_pkt_rsp,
16048 offsetof(Mpi2SmpPassthroughRequest_t, SGL), sizeof (rep),
16049 smp_pkt->smp_pkt_rspsize - 4, direction,
16050 (uint8_t *)smp_pkt->smp_pkt_req, smp_pkt->smp_pkt_reqsize - 4,
16051 smp_pkt->smp_pkt_timeout, FKIOCTL);
16052 mutex_exit(&mpt->m_mutex);
16053 if (ret != 0) {
16054 cmn_err(CE_WARN, "smp_start do passthru error %d", ret);
16055 smp_pkt->smp_pkt_reason = (uchar_t)(ret);
16056 return (DDI_FAILURE);
16057 }
16058 /* do passthrough success, check the smp status */
16059 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
16060 switch (LE_16(rep.IOCStatus)) {
16061 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
16062 smp_pkt->smp_pkt_reason = ENODEV;
16063 break;
16064 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
16065 smp_pkt->smp_pkt_reason = EOVERFLOW;
16066 break;
16067 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
16068 smp_pkt->smp_pkt_reason = EIO;
16069 break;
16070 default:
16071 mptsas_log(mpt, CE_NOTE, "smp_start: get unknown ioc"
16072 "status:%x", LE_16(rep.IOCStatus));
16073 smp_pkt->smp_pkt_reason = EIO;
16074 break;
16075 }
16076 return (DDI_FAILURE);
16077 }
16078 if (rep.SASStatus != MPI2_SASSTATUS_SUCCESS) {
16079 mptsas_log(mpt, CE_NOTE, "smp_start: get error SAS status:%x",
16080 rep.SASStatus);
16081 smp_pkt->smp_pkt_reason = EIO;
16082 return (DDI_FAILURE);
16083 }
16084
16085 return (DDI_SUCCESS);
16086 }
16087
16088 /*
16089 * If we didn't get a match, we need to get sas page0 for each device, and
16090 * untill we get a match. If failed, return NULL
16091 */
16092 static mptsas_target_t *
16093 mptsas_phy_to_tgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint8_t phy)
16094 {
16095 int i, j = 0;
16096 int rval = 0;
16097 uint16_t cur_handle;
16098 uint32_t page_address;
16099 mptsas_target_t *ptgt = NULL;
16100
16101 /*
16102 * PHY named device must be direct attached and attaches to
16103 * narrow port, if the iport is not parent of the device which
16104 * we are looking for.
16105 */
16106 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
16107 if ((1 << i) & phymask)
16108 j++;
16109 }
16110
16111 if (j > 1)
16112 return (NULL);
16113
16114 /*
16115 * Must be a narrow port and single device attached to the narrow port
16116 * So the physical port num of device which is equal to the iport's
16117 * port num is the device what we are looking for.
16118 */
16119
16120 if (mpt->m_phy_info[phy].phy_mask != phymask)
16121 return (NULL);
16122
16123 mutex_enter(&mpt->m_mutex);
16124
16125 ptgt = refhash_linear_search(mpt->m_targets, mptsas_target_eval_nowwn,
16126 &phy);
16127 if (ptgt != NULL) {
16128 mutex_exit(&mpt->m_mutex);
16129 return (ptgt);
16130 }
16131
16132 if (mpt->m_done_traverse_dev) {
16133 mutex_exit(&mpt->m_mutex);
16134 return (NULL);
16135 }
16136
16137 /* If didn't get a match, come here */
16138 cur_handle = mpt->m_dev_handle;
16139 for (; ; ) {
16140 ptgt = NULL;
16141 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
16142 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)cur_handle;
16143 rval = mptsas_get_target_device_info(mpt, page_address,
16144 &cur_handle, &ptgt);
16145 if ((rval == DEV_INFO_FAIL_PAGE0) ||
16146 (rval == DEV_INFO_FAIL_ALLOC)) {
16147 break;
16148 }
16149 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
16150 (rval == DEV_INFO_PHYS_DISK)) {
16151 continue;
16152 }
16153 mpt->m_dev_handle = cur_handle;
16154
16155 if ((ptgt->m_addr.mta_wwn == 0) && (ptgt->m_phynum == phy)) {
16156 break;
16157 }
16158 }
16159
16160 mutex_exit(&mpt->m_mutex);
16161 return (ptgt);
16162 }
16163
16164 /*
16165 * The ptgt->m_addr.mta_wwn contains the wwid for each disk.
16166 * For Raid volumes, we need to check m_raidvol[x].m_raidwwid
16167 * If we didn't get a match, we need to get sas page0 for each device, and
16168 * untill we get a match
16169 * If failed, return NULL
16170 */
16171 static mptsas_target_t *
16172 mptsas_wwid_to_ptgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
16173 {
16174 int rval = 0;
16175 uint16_t cur_handle;
16176 uint32_t page_address;
16177 mptsas_target_t *tmp_tgt = NULL;
16178 mptsas_target_addr_t addr;
16179
16180 addr.mta_wwn = wwid;
16181 addr.mta_phymask = phymask;
16182 mutex_enter(&mpt->m_mutex);
16183 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
16184 if (tmp_tgt != NULL) {
16185 mutex_exit(&mpt->m_mutex);
16186 return (tmp_tgt);
16187 }
16188
16189 if (phymask == 0) {
16190 /*
16191 * It's IR volume
16192 */
16193 rval = mptsas_get_raid_info(mpt);
16194 if (rval) {
16195 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
16196 }
16197 mutex_exit(&mpt->m_mutex);
16198 return (tmp_tgt);
16199 }
16200
16201 if (mpt->m_done_traverse_dev) {
16202 mutex_exit(&mpt->m_mutex);
16203 return (NULL);
16204 }
16205
16206 /* If didn't get a match, come here */
16207 cur_handle = mpt->m_dev_handle;
16208 for (;;) {
16209 tmp_tgt = NULL;
16210 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
16211 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | cur_handle;
16212 rval = mptsas_get_target_device_info(mpt, page_address,
16213 &cur_handle, &tmp_tgt);
16214 if ((rval == DEV_INFO_FAIL_PAGE0) ||
16215 (rval == DEV_INFO_FAIL_ALLOC)) {
16216 tmp_tgt = NULL;
16217 break;
16218 }
16219 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
16220 (rval == DEV_INFO_PHYS_DISK)) {
16221 continue;
16222 }
16223 mpt->m_dev_handle = cur_handle;
16224 if ((tmp_tgt->m_addr.mta_wwn) &&
16225 (tmp_tgt->m_addr.mta_wwn == wwid) &&
16226 (tmp_tgt->m_addr.mta_phymask == phymask)) {
16227 break;
16228 }
16229 }
16230
16231 mutex_exit(&mpt->m_mutex);
16232 return (tmp_tgt);
16233 }
16234
16235 static mptsas_smp_t *
16236 mptsas_wwid_to_psmp(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
16237 {
16238 int rval = 0;
16239 uint16_t cur_handle;
16240 uint32_t page_address;
16241 mptsas_smp_t smp_node, *psmp = NULL;
16242 mptsas_target_addr_t addr;
16243
16244 addr.mta_wwn = wwid;
16245 addr.mta_phymask = phymask;
16246 mutex_enter(&mpt->m_mutex);
16247 psmp = refhash_lookup(mpt->m_smp_targets, &addr);
16248 if (psmp != NULL) {
16249 mutex_exit(&mpt->m_mutex);
16250 return (psmp);
16251 }
16252
16253 if (mpt->m_done_traverse_smp) {
16254 mutex_exit(&mpt->m_mutex);
16255 return (NULL);
16256 }
16257
16258 /* If didn't get a match, come here */
16259 cur_handle = mpt->m_smp_devhdl;
16260 for (;;) {
16261 psmp = NULL;
16262 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
16263 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)cur_handle;
16264 rval = mptsas_get_sas_expander_page0(mpt, page_address,
16265 &smp_node);
16266 if (rval != DDI_SUCCESS) {
16267 break;
16268 }
16269 mpt->m_smp_devhdl = cur_handle = smp_node.m_devhdl;
16270 psmp = mptsas_smp_alloc(mpt, &smp_node);
16271 ASSERT(psmp);
16272 if ((psmp->m_addr.mta_wwn) && (psmp->m_addr.mta_wwn == wwid) &&
16273 (psmp->m_addr.mta_phymask == phymask)) {
16274 break;
16275 }
16276 }
16277
16278 mutex_exit(&mpt->m_mutex);
16279 return (psmp);
16280 }
16281
16282 mptsas_target_t *
16283 mptsas_tgt_alloc(mptsas_t *mpt, uint16_t devhdl, uint64_t wwid,
16284 uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum)
16285 {
16286 mptsas_target_t *tmp_tgt = NULL;
16287 mptsas_target_addr_t addr;
16288
16289 addr.mta_wwn = wwid;
16290 addr.mta_phymask = phymask;
16291 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
16292 if (tmp_tgt != NULL) {
16293 NDBG20(("Hash item already exist"));
16294 tmp_tgt->m_deviceinfo = devinfo;
16295 tmp_tgt->m_devhdl = devhdl; /* XXX - duplicate? */
16296 return (tmp_tgt);
16297 }
16298 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
16299 if (tmp_tgt == NULL) {
16300 cmn_err(CE_WARN, "Fatal, allocated tgt failed");
16301 return (NULL);
16302 }
16303 tmp_tgt->m_devhdl = devhdl;
16304 tmp_tgt->m_addr.mta_wwn = wwid;
16305 tmp_tgt->m_deviceinfo = devinfo;
16306 tmp_tgt->m_addr.mta_phymask = phymask;
16307 tmp_tgt->m_phynum = phynum;
16308 /* Initialized the tgt structure */
16309 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
16310 tmp_tgt->m_qfull_retry_interval =
16311 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
16312 tmp_tgt->m_t_throttle = MAX_THROTTLE;
16313 TAILQ_INIT(&tmp_tgt->m_active_cmdq);
16314
16315 refhash_insert(mpt->m_targets, tmp_tgt);
16316
16317 return (tmp_tgt);
16318 }
16319
16320 static void
16321 mptsas_smp_target_copy(mptsas_smp_t *src, mptsas_smp_t *dst)
16322 {
16323 dst->m_devhdl = src->m_devhdl;
16324 dst->m_deviceinfo = src->m_deviceinfo;
16325 dst->m_pdevhdl = src->m_pdevhdl;
16326 dst->m_pdevinfo = src->m_pdevinfo;
16327 }
16328
16329 static mptsas_smp_t *
16330 mptsas_smp_alloc(mptsas_t *mpt, mptsas_smp_t *data)
16331 {
16332 mptsas_target_addr_t addr;
16333 mptsas_smp_t *ret_data;
16334
16335 addr.mta_wwn = data->m_addr.mta_wwn;
16336 addr.mta_phymask = data->m_addr.mta_phymask;
16337 ret_data = refhash_lookup(mpt->m_smp_targets, &addr);
16338 /*
16339 * If there's already a matching SMP target, update its fields
16340 * in place. Since the address is not changing, it's safe to do
16341 * this. We cannot just bcopy() here because the structure we've
16342 * been given has invalid hash links.
16343 */
16344 if (ret_data != NULL) {
16345 mptsas_smp_target_copy(data, ret_data);
16346 return (ret_data);
16347 }
16348
16349 ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
16350 bcopy(data, ret_data, sizeof (mptsas_smp_t));
16351 refhash_insert(mpt->m_smp_targets, ret_data);
16352 return (ret_data);
16353 }
16354
16355 /*
16356 * Functions for SGPIO LED support
16357 */
16358 static dev_info_t *
16359 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
16360 {
16361 dev_info_t *dip;
16362 int prop;
16363 dip = e_ddi_hold_devi_by_dev(dev, 0);
16364 if (dip == NULL)
16365 return (dip);
16366 prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
16367 "phymask", 0);
16368 *phymask = (mptsas_phymask_t)prop;
16369 ddi_release_devi(dip);
16370 return (dip);
16371 }
16372 static mptsas_target_t *
16373 mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask)
16374 {
16375 uint8_t phynum;
16376 uint64_t wwn;
16377 int lun;
16378 mptsas_target_t *ptgt = NULL;
16379
16380 if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) {
16381 return (NULL);
16382 }
16383 if (addr[0] == 'w') {
16384 ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn);
16385 } else {
16386 ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum);
16387 }
16388 return (ptgt);
16389 }
16390
16391 static int
16392 mptsas_flush_led_status(mptsas_t *mpt, mptsas_target_t *ptgt)
16393 {
16394 uint32_t slotstatus = 0;
16395
16396 /* Build an MPI2 Slot Status based on our view of the world */
16397 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_IDENT - 1)))
16398 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST;
16399 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_FAIL - 1)))
16400 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT;
16401 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1)))
16402 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
16403
16404 /* Write it to the controller */
16405 NDBG14(("mptsas_ioctl: set LED status %x for slot %x",
16406 slotstatus, ptgt->m_slot_num));
16407 return (mptsas_send_sep(mpt, ptgt, &slotstatus,
16408 MPI2_SEP_REQ_ACTION_WRITE_STATUS));
16409 }
16410
16411 /*
16412 * send sep request, use enclosure/slot addressing
16413 */
16414 static int
16415 mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
16416 uint32_t *status, uint8_t act)
16417 {
16418 Mpi2SepRequest_t req;
16419 Mpi2SepReply_t rep;
16420 int ret;
16421
16422 ASSERT(mutex_owned(&mpt->m_mutex));
16423
16424 /*
16425 * We only support SEP control of directly-attached targets, in which
16426 * case the "SEP" we're talking to is a virtual one contained within
16427 * the HBA itself. This is necessary because DA targets typically have
16428 * no other mechanism for LED control. Targets for which a separate
16429 * enclosure service processor exists should be controlled via ses(7d)
16430 * or sgen(7d). Furthermore, since such requests can time out, they
16431 * should be made in user context rather than in response to
16432 * asynchronous fabric changes.
16433 *
16434 * In addition, we do not support this operation for RAID volumes,
16435 * since there is no slot associated with them.
16436 */
16437 if (!(ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED) ||
16438 ptgt->m_addr.mta_phymask == 0) {
16439 return (ENOTTY);
16440 }
16441
16442 bzero(&req, sizeof (req));
16443 bzero(&rep, sizeof (rep));
16444
16445 req.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
16446 req.Action = act;
16447 req.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
16448 req.EnclosureHandle = LE_16(ptgt->m_enclosure);
16449 req.Slot = LE_16(ptgt->m_slot_num);
16450 if (act == MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16451 req.SlotStatus = LE_32(*status);
16452 }
16453 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
16454 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
16455 if (ret != 0) {
16456 mptsas_log(mpt, CE_NOTE, "mptsas_send_sep: passthru SEP "
16457 "Processor Request message error %d", ret);
16458 return (ret);
16459 }
16460 /* do passthrough success, check the ioc status */
16461 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
16462 mptsas_log(mpt, CE_NOTE, "send_sep act %x: ioc "
16463 "status:%x loginfo %x", act, LE_16(rep.IOCStatus),
16464 LE_32(rep.IOCLogInfo));
16465 switch (LE_16(rep.IOCStatus) & MPI2_IOCSTATUS_MASK) {
16466 case MPI2_IOCSTATUS_INVALID_FUNCTION:
16467 case MPI2_IOCSTATUS_INVALID_VPID:
16468 case MPI2_IOCSTATUS_INVALID_FIELD:
16469 case MPI2_IOCSTATUS_INVALID_STATE:
16470 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
16471 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
16472 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
16473 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
16474 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
16475 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
16476 return (EINVAL);
16477 case MPI2_IOCSTATUS_BUSY:
16478 return (EBUSY);
16479 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
16480 return (EAGAIN);
16481 case MPI2_IOCSTATUS_INVALID_SGL:
16482 case MPI2_IOCSTATUS_INTERNAL_ERROR:
16483 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
16484 default:
16485 return (EIO);
16486 }
16487 }
16488 if (act != MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16489 *status = LE_32(rep.SlotStatus);
16490 }
16491
16492 return (0);
16493 }
16494
16495 int
16496 mptsas_dma_addr_create(mptsas_t *mpt, ddi_dma_attr_t dma_attr,
16497 ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp, caddr_t *dma_memp,
16498 uint32_t alloc_size, ddi_dma_cookie_t *cookiep)
16499 {
16500 ddi_dma_cookie_t new_cookie;
16501 size_t alloc_len;
16502 uint_t ncookie;
16503
16504 if (cookiep == NULL)
16505 cookiep = &new_cookie;
16506
16507 if (ddi_dma_alloc_handle(mpt->m_dip, &dma_attr, DDI_DMA_SLEEP,
16508 NULL, dma_hdp) != DDI_SUCCESS) {
16509 return (FALSE);
16510 }
16511
16512 if (ddi_dma_mem_alloc(*dma_hdp, alloc_size, &mpt->m_dev_acc_attr,
16513 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, dma_memp, &alloc_len,
16514 acc_hdp) != DDI_SUCCESS) {
16515 ddi_dma_free_handle(dma_hdp);
16516 *dma_hdp = NULL;
16517 return (FALSE);
16518 }
16519
16520 if (ddi_dma_addr_bind_handle(*dma_hdp, NULL, *dma_memp, alloc_len,
16521 (DDI_DMA_RDWR | DDI_DMA_CONSISTENT), DDI_DMA_SLEEP, NULL,
16522 cookiep, &ncookie) != DDI_DMA_MAPPED) {
16523 (void) ddi_dma_mem_free(acc_hdp);
16524 ddi_dma_free_handle(dma_hdp);
16525 *dma_hdp = NULL;
16526 return (FALSE);
16527 }
16528
16529 return (TRUE);
16530 }
16531
16532 void
16533 mptsas_dma_addr_destroy(ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp)
16534 {
16535 if (*dma_hdp == NULL)
16536 return;
16537
16538 (void) ddi_dma_unbind_handle(*dma_hdp);
16539 (void) ddi_dma_mem_free(acc_hdp);
16540 ddi_dma_free_handle(dma_hdp);
16541 *dma_hdp = NULL;
16542 }