1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
26 */
27
28 /*
29 * Copyright (c) 2000 to 2010, LSI Corporation.
30 * All rights reserved.
31 *
32 * Redistribution and use in source and binary forms of all code within
33 * this file that is exclusively owned by LSI, with or without
34 * modification, is permitted provided that, in addition to the CDDL 1.0
35 * License requirements, the following conditions are met:
36 *
37 * Neither the name of the author nor the names of its contributors may be
38 * used to endorse or promote products derived from this software without
39 * specific prior written permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
44 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
45 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
46 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
47 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
48 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
49 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
50 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
51 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
52 * DAMAGE.
53 */
54
55 /*
56 * mptsas - This is a driver based on LSI Logic's MPT2.0 interface.
57 *
58 */
59
60 #if defined(lint) || defined(DEBUG)
61 #define MPTSAS_DEBUG
62 #endif
63
64 /*
65 * standard header files.
66 */
67 #include <sys/note.h>
68 #include <sys/scsi/scsi.h>
69 #include <sys/pci.h>
70 #include <sys/file.h>
71 #include <sys/cpuvar.h>
72 #include <sys/policy.h>
73 #include <sys/model.h>
74 #include <sys/sysevent.h>
75 #include <sys/sysevent/eventdefs.h>
76 #include <sys/sysevent/dr.h>
77 #include <sys/sata/sata_defs.h>
78 #include <sys/scsi/generic/sas.h>
79 #include <sys/scsi/impl/scsi_sas.h>
80
81 #pragma pack(1)
82 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
83 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
84 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
85 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
86 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
87 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
88 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
89 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
90 #pragma pack()
91
92 /*
93 * private header files.
94 *
95 */
96 #include <sys/scsi/impl/scsi_reset_notify.h>
97 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
98 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
99 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
100
101 #include <sys/raidioctl.h>
102
103 #include <sys/fs/dv_node.h> /* devfs_clean */
104
105 /*
106 * FMA header files
107 */
108 #include <sys/ddifm.h>
109 #include <sys/fm/protocol.h>
110 #include <sys/fm/util.h>
111 #include <sys/fm/io/ddi.h>
112
113 /*
114 * For anyone who would modify the code in mptsas_driver, it must be awared
115 * that from snv_145 where CR6910752(mpt_sas driver performance can be
116 * improved) is integrated, the per_instance mutex m_mutex is not hold
117 * in the key IO code path, including mptsas_scsi_start(), mptsas_intr()
118 * and all of the recursive functions called in them, so don't
119 * make it for granted that all operations are sync/exclude correctly. Before
120 * doing any modification in key code path, and even other code path such as
121 * DR, watchsubr, ioctl, passthrough etc, make sure the elements modified have
122 * no releationship to elements shown in the fastpath
123 * (function mptsas_handle_io_fastpath()) in ISR and its recursive functions.
124 * otherwise, you have to use the new introduced mutex to protect them.
125 * As to how to do correctly, refer to the comments in mptsas_intr().
126 */
127
128 /*
129 * autoconfiguration data and routines.
130 */
131 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
132 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
133 static int mptsas_power(dev_info_t *dip, int component, int level);
134
135 /*
136 * cb_ops function
137 */
138 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
139 cred_t *credp, int *rval);
140 #ifdef __sparc
141 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd);
142 #else /* __sparc */
143 static int mptsas_quiesce(dev_info_t *devi);
144 #endif /* __sparc */
145
146 /*
147 * Resource initilaization for hardware
148 */
149 static void mptsas_setup_cmd_reg(mptsas_t *mpt);
150 static void mptsas_disable_bus_master(mptsas_t *mpt);
151 static void mptsas_hba_fini(mptsas_t *mpt);
152 static void mptsas_cfg_fini(mptsas_t *mptsas_blkp);
153 static int mptsas_hba_setup(mptsas_t *mpt);
154 static void mptsas_hba_teardown(mptsas_t *mpt);
155 static int mptsas_config_space_init(mptsas_t *mpt);
156 static void mptsas_config_space_fini(mptsas_t *mpt);
157 static void mptsas_iport_register(mptsas_t *mpt);
158 static int mptsas_smp_setup(mptsas_t *mpt);
159 static void mptsas_smp_teardown(mptsas_t *mpt);
160 static int mptsas_cache_create(mptsas_t *mpt);
161 static void mptsas_cache_destroy(mptsas_t *mpt);
162 static int mptsas_alloc_request_frames(mptsas_t *mpt);
163 static int mptsas_alloc_reply_frames(mptsas_t *mpt);
164 static int mptsas_alloc_free_queue(mptsas_t *mpt);
165 static int mptsas_alloc_post_queue(mptsas_t *mpt);
166 static void mptsas_alloc_reply_args(mptsas_t *mpt);
167 static int mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
168 static void mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
169 static int mptsas_init_chip(mptsas_t *mpt, int first_time);
170
171 /*
172 * SCSA function prototypes
173 */
174 static int mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
175 static int mptsas_scsi_reset(struct scsi_address *ap, int level);
176 static int mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
177 static int mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
178 static int mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
179 int tgtonly);
180 static void mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
181 static struct scsi_pkt *mptsas_scsi_init_pkt(struct scsi_address *ap,
182 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
183 int tgtlen, int flags, int (*callback)(), caddr_t arg);
184 static void mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
185 static void mptsas_scsi_destroy_pkt(struct scsi_address *ap,
186 struct scsi_pkt *pkt);
187 static int mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
188 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
189 static void mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
190 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
191 static int mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
192 void (*callback)(caddr_t), caddr_t arg);
193 static int mptsas_get_name(struct scsi_device *sd, char *name, int len);
194 static int mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len);
195 static int mptsas_scsi_quiesce(dev_info_t *dip);
196 static int mptsas_scsi_unquiesce(dev_info_t *dip);
197 static int mptsas_bus_config(dev_info_t *pdip, uint_t flags,
198 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
199
200 /*
201 * SMP functions
202 */
203 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
204
205 /*
206 * internal function prototypes.
207 */
208 static void mptsas_list_add(mptsas_t *mpt);
209 static void mptsas_list_del(mptsas_t *mpt);
210
211 static int mptsas_quiesce_bus(mptsas_t *mpt);
212 static int mptsas_unquiesce_bus(mptsas_t *mpt);
213
214 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
215 static void mptsas_free_handshake_msg(mptsas_t *mpt);
216
217 static void mptsas_ncmds_checkdrain(void *arg);
218
219 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
220 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
221
222 static int mptsas_do_detach(dev_info_t *dev);
223 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
224 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
225 struct scsi_pkt *pkt);
226 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp);
227
228 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
229 static void mptsas_handle_event(void *args);
230 static int mptsas_handle_event_sync(void *args);
231 static void mptsas_handle_dr(void *args);
232 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
233 dev_info_t *pdip);
234
235 static void mptsas_restart_cmd(void *);
236
237 static void mptsas_flush_hba(mptsas_t *mpt);
238 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
239 uint8_t tasktype);
240 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
241 uchar_t reason, uint_t stat);
242
243 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
244 static void mptsas_process_intr(mptsas_t *mpt,
245 pMpi2ReplyDescriptorsUnion_t reply_desc_union);
246 static int mptsas_handle_io_fastpath(mptsas_t *mpt, uint16_t SMID);
247 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
248 pMpi2ReplyDescriptorsUnion_t reply_desc);
249 static void mptsas_handle_address_reply(mptsas_t *mpt,
250 pMpi2ReplyDescriptorsUnion_t reply_desc);
251 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
252 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
253 uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
254
255 static void mptsas_watch(void *arg);
256 static void mptsas_watchsubr(mptsas_t *mpt);
257 static void mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl);
258
259 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
260 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
261 uint8_t *data, uint32_t request_size, uint32_t reply_size,
262 uint32_t data_size, uint32_t direction, uint8_t *dataout,
263 uint32_t dataout_size, short timeout, int mode);
264 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
265
266 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt,
267 uint32_t unique_id);
268 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd);
269 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt,
270 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
271 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt,
272 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
273 uint32_t diag_type);
274 static int mptsas_diag_register(mptsas_t *mpt,
275 mptsas_fw_diag_register_t *diag_register, uint32_t *return_code);
276 static int mptsas_diag_unregister(mptsas_t *mpt,
277 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
278 static int mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
279 uint32_t *return_code);
280 static int mptsas_diag_read_buffer(mptsas_t *mpt,
281 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
282 uint32_t *return_code, int ioctl_mode);
283 static int mptsas_diag_release(mptsas_t *mpt,
284 mptsas_fw_diag_release_t *diag_release, uint32_t *return_code);
285 static int mptsas_do_diag_action(mptsas_t *mpt, uint32_t action,
286 uint8_t *diag_action, uint32_t length, uint32_t *return_code,
287 int ioctl_mode);
288 static int mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *data,
289 int mode);
290
291 static int mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
292 int cmdlen, int tgtlen, int statuslen, int kf);
293 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
294
295 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
296 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
297
298 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
299 int kmflags);
300 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
301
302 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
303 mptsas_cmd_t *cmd);
304 static void mptsas_check_task_mgt(mptsas_t *mpt,
305 pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
306 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
307 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
308 int *resid);
309
310 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
311 static void mptsas_free_active_slots(mptsas_t *mpt);
312 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
313 static int mptsas_start_cmd0(mptsas_t *mpt, mptsas_cmd_t *cmd);
314
315 static void mptsas_restart_hba(mptsas_t *mpt);
316
317 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
318 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);
319 static inline void mptsas_doneq_add0(mptsas_t *mpt, mptsas_cmd_t *cmd);
320 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
321
322 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
323 static void mptsas_doneq_empty(mptsas_t *mpt);
324 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
325
326 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
327 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
328
329 static void mptsas_start_watch_reset_delay();
330 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
331 static void mptsas_watch_reset_delay(void *arg);
332 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
333
334 static int mptsas_outstanding_cmds_n(mptsas_t *mpt);
335 /*
336 * helper functions
337 */
338 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
339
340 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
341 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
342 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
343 int lun);
344 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
345 int lun);
346 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
347 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
348
349 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
350 int *lun);
351 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
352
353 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt, int phymask,
354 uint8_t phy);
355 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask,
356 uint64_t wwid);
357 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask,
358 uint64_t wwid);
359
360 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
361 uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
362
363 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
364 uint16_t *handle, mptsas_target_t **pptgt);
365 static void mptsas_update_phymask(mptsas_t *mpt);
366 static inline void mptsas_remove_cmd0(mptsas_t *mpt, mptsas_cmd_t *cmd);
367
368 static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
369 uint32_t *status, uint8_t cmd);
370 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
371 mptsas_phymask_t *phymask);
372 static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr,
373 mptsas_phymask_t phymask);
374 static int mptsas_flush_led_status(mptsas_t *mpt, mptsas_target_t *ptgt);
375
376
377 /*
378 * Enumeration / DR functions
379 */
380 static void mptsas_config_all(dev_info_t *pdip);
381 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
382 dev_info_t **lundip);
383 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
384 dev_info_t **lundip);
385
386 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
387 static int mptsas_offline_target(dev_info_t *pdip, char *name);
388
389 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
390 dev_info_t **dip);
391
392 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
393 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
394 dev_info_t **dip, mptsas_target_t *ptgt);
395
396 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
397 dev_info_t **dip, mptsas_target_t *ptgt, int lun);
398
399 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
400 char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
401 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
402 char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
403 int lun);
404
405 static void mptsas_offline_missed_luns(dev_info_t *pdip,
406 uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
407 static int mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
408 mdi_pathinfo_t *rpip, uint_t flags);
409
410 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
411 dev_info_t **smp_dip);
412 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
413 uint_t flags);
414
415 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
416 int mode, int *rval);
417 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
418 int mode, int *rval);
419 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
420 int mode, int *rval);
421 static void mptsas_record_event(void *args);
422 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
423 int mode);
424
425 static void mptsas_hash_init(mptsas_hash_table_t *hashtab);
426 static void mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen);
427 static void mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data);
428 static void * mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1,
429 mptsas_phymask_t key2);
430 static void * mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1,
431 mptsas_phymask_t key2);
432 static void * mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos);
433
434 mptsas_target_t *mptsas_tgt_alloc(mptsas_hash_table_t *, uint16_t, uint64_t,
435 uint32_t, mptsas_phymask_t, uint8_t, mptsas_t *);
436 static mptsas_smp_t *mptsas_smp_alloc(mptsas_hash_table_t *hashtab,
437 mptsas_smp_t *data);
438 static void mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
439 mptsas_phymask_t phymask);
440 static void mptsas_tgt_free(mptsas_hash_table_t *, uint64_t, mptsas_phymask_t);
441 static void * mptsas_search_by_devhdl(mptsas_hash_table_t *, uint16_t);
442 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
443 dev_info_t **smp_dip);
444
445 /*
446 * Power management functions
447 */
448 static int mptsas_get_pci_cap(mptsas_t *mpt);
449 static int mptsas_init_pm(mptsas_t *mpt);
450
451 /*
452 * MPT MSI tunable:
453 *
454 * By default MSI is enabled on all supported platforms.
455 */
456 boolean_t mptsas_enable_msi = B_TRUE;
457 boolean_t mptsas_physical_bind_failed_page_83 = B_FALSE;
458
459 static int mptsas_register_intrs(mptsas_t *);
460 static void mptsas_unregister_intrs(mptsas_t *);
461 static int mptsas_add_intrs(mptsas_t *, int);
462 static void mptsas_rem_intrs(mptsas_t *);
463
464 /*
465 * FMA Prototypes
466 */
467 static void mptsas_fm_init(mptsas_t *mpt);
468 static void mptsas_fm_fini(mptsas_t *mpt);
469 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
470
471 extern pri_t minclsyspri, maxclsyspri;
472
473 /*
474 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
475 * under this device that the paths to a physical device are created when
476 * MPxIO is used.
477 */
478 extern dev_info_t *scsi_vhci_dip;
479
480 /*
481 * Tunable timeout value for Inquiry VPD page 0x83
482 * By default the value is 30 seconds.
483 */
484 int mptsas_inq83_retry_timeout = 30;
485
486 /*
487 * This is used to allocate memory for message frame storage, not for
488 * data I/O DMA. All message frames must be stored in the first 4G of
489 * physical memory.
490 */
491 ddi_dma_attr_t mptsas_dma_attrs = {
492 DMA_ATTR_V0, /* attribute layout version */
493 0x0ull, /* address low - should be 0 (longlong) */
494 0xffffffffull, /* address high - 32-bit max range */
495 0x00ffffffull, /* count max - max DMA object size */
496 4, /* allocation alignment requirements */
497 0x78, /* burstsizes - binary encoded values */
498 1, /* minxfer - gran. of DMA engine */
499 0x00ffffffull, /* maxxfer - gran. of DMA engine */
500 0xffffffffull, /* max segment size (DMA boundary) */
501 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
502 512, /* granularity - device transfer size */
503 0 /* flags, set to 0 */
504 };
505
506 /*
507 * This is used for data I/O DMA memory allocation. (full 64-bit DMA
508 * physical addresses are supported.)
509 */
510 ddi_dma_attr_t mptsas_dma_attrs64 = {
511 DMA_ATTR_V0, /* attribute layout version */
512 0x0ull, /* address low - should be 0 (longlong) */
513 0xffffffffffffffffull, /* address high - 64-bit max */
514 0x00ffffffull, /* count max - max DMA object size */
515 4, /* allocation alignment requirements */
516 0x78, /* burstsizes - binary encoded values */
517 1, /* minxfer - gran. of DMA engine */
518 0x00ffffffull, /* maxxfer - gran. of DMA engine */
519 0xffffffffull, /* max segment size (DMA boundary) */
520 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
521 512, /* granularity - device transfer size */
522 DDI_DMA_RELAXED_ORDERING /* flags, enable relaxed ordering */
523 };
524
525 ddi_device_acc_attr_t mptsas_dev_attr = {
526 DDI_DEVICE_ATTR_V1,
527 DDI_STRUCTURE_LE_ACC,
528 DDI_STRICTORDER_ACC,
529 DDI_DEFAULT_ACC
530 };
531
532 static struct cb_ops mptsas_cb_ops = {
533 scsi_hba_open, /* open */
534 scsi_hba_close, /* close */
535 nodev, /* strategy */
536 nodev, /* print */
537 nodev, /* dump */
538 nodev, /* read */
539 nodev, /* write */
540 mptsas_ioctl, /* ioctl */
541 nodev, /* devmap */
542 nodev, /* mmap */
543 nodev, /* segmap */
544 nochpoll, /* chpoll */
545 ddi_prop_op, /* cb_prop_op */
546 NULL, /* streamtab */
547 D_MP, /* cb_flag */
548 CB_REV, /* rev */
549 nodev, /* aread */
550 nodev /* awrite */
551 };
552
553 static struct dev_ops mptsas_ops = {
554 DEVO_REV, /* devo_rev, */
555 0, /* refcnt */
556 ddi_no_info, /* info */
557 nulldev, /* identify */
558 nulldev, /* probe */
559 mptsas_attach, /* attach */
560 mptsas_detach, /* detach */
561 #ifdef __sparc
562 mptsas_reset,
563 #else
564 nodev, /* reset */
565 #endif /* __sparc */
566 &mptsas_cb_ops, /* driver operations */
567 NULL, /* bus operations */
568 mptsas_power, /* power management */
569 #ifdef __sparc
570 ddi_quiesce_not_needed
571 #else
572 mptsas_quiesce /* quiesce */
573 #endif /* __sparc */
574 };
575
576
577 #define MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24"
578
579 static struct modldrv modldrv = {
580 &mod_driverops, /* Type of module. This one is a driver */
581 MPTSAS_MOD_STRING, /* Name of the module. */
582 &mptsas_ops, /* driver ops */
583 };
584
585 static struct modlinkage modlinkage = {
586 MODREV_1, &modldrv, NULL
587 };
588 #define TARGET_PROP "target"
589 #define LUN_PROP "lun"
590 #define LUN64_PROP "lun64"
591 #define SAS_PROP "sas-mpt"
592 #define MDI_GUID "wwn"
593 #define NDI_GUID "guid"
594 #define MPTSAS_DEV_GONE "mptsas_dev_gone"
595
596 /*
597 * Local static data
598 */
599 #if defined(MPTSAS_DEBUG)
600 uint32_t mptsas_debug_flags = 0;
601 #endif /* defined(MPTSAS_DEBUG) */
602 uint32_t mptsas_debug_resets = 0;
603
604 static kmutex_t mptsas_global_mutex;
605 static void *mptsas_state; /* soft state ptr */
606 static krwlock_t mptsas_global_rwlock;
607
608 static kmutex_t mptsas_log_mutex;
609 static char mptsas_log_buf[256];
610 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex, mptsas_log_buf))
611
612 static mptsas_t *mptsas_head, *mptsas_tail;
613 static clock_t mptsas_scsi_watchdog_tick;
614 static clock_t mptsas_tick;
615 static timeout_id_t mptsas_reset_watch;
616 static timeout_id_t mptsas_timeout_id;
617 static int mptsas_timeouts_enabled = 0;
618 /*
619 * warlock directives
620 */
621 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \
622 mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status))
623 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt))
624 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
625 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private))
626 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private))
627
628 /*
629 * SM - HBA statics
630 */
631 char *mptsas_driver_rev = MPTSAS_MOD_STRING;
632
633 #ifdef MPTSAS_DEBUG
634 void debug_enter(char *);
635 #endif
636
637 /*
638 * Notes:
639 * - scsi_hba_init(9F) initializes SCSI HBA modules
640 * - must call scsi_hba_fini(9F) if modload() fails
641 */
642 int
643 _init(void)
644 {
645 int status;
646 /* CONSTCOND */
647 ASSERT(NO_COMPETING_THREADS);
648
649 NDBG0(("_init"));
650
651 status = ddi_soft_state_init(&mptsas_state, MPTSAS_SIZE,
652 MPTSAS_INITIAL_SOFT_SPACE);
653 if (status != 0) {
654 return (status);
655 }
656
657 if ((status = scsi_hba_init(&modlinkage)) != 0) {
658 ddi_soft_state_fini(&mptsas_state);
659 return (status);
660 }
661
662 mutex_init(&mptsas_global_mutex, NULL, MUTEX_DRIVER, NULL);
663 rw_init(&mptsas_global_rwlock, NULL, RW_DRIVER, NULL);
664 mutex_init(&mptsas_log_mutex, NULL, MUTEX_DRIVER, NULL);
665
666 if ((status = mod_install(&modlinkage)) != 0) {
667 mutex_destroy(&mptsas_log_mutex);
668 rw_destroy(&mptsas_global_rwlock);
669 mutex_destroy(&mptsas_global_mutex);
670 ddi_soft_state_fini(&mptsas_state);
671 scsi_hba_fini(&modlinkage);
672 }
673
674 return (status);
675 }
676
677 /*
678 * Notes:
679 * - scsi_hba_fini(9F) uninitializes SCSI HBA modules
680 */
681 int
682 _fini(void)
683 {
684 int status;
685 /* CONSTCOND */
686 ASSERT(NO_COMPETING_THREADS);
687
688 NDBG0(("_fini"));
689
690 if ((status = mod_remove(&modlinkage)) == 0) {
691 ddi_soft_state_fini(&mptsas_state);
692 scsi_hba_fini(&modlinkage);
693 mutex_destroy(&mptsas_global_mutex);
694 rw_destroy(&mptsas_global_rwlock);
695 mutex_destroy(&mptsas_log_mutex);
696 }
697 return (status);
698 }
699
700 /*
701 * The loadable-module _info(9E) entry point
702 */
703 int
704 _info(struct modinfo *modinfop)
705 {
706 /* CONSTCOND */
707 ASSERT(NO_COMPETING_THREADS);
708 NDBG0(("mptsas _info"));
709
710 return (mod_info(&modlinkage, modinfop));
711 }
712
713
714 static int
715 mptsas_iport_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
716 {
717 dev_info_t *pdip;
718 mptsas_t *mpt;
719 scsi_hba_tran_t *hba_tran;
720 char *iport = NULL;
721 char phymask[MPTSAS_MAX_PHYS];
722 mptsas_phymask_t phy_mask = 0;
723 int dynamic_port = 0;
724 uint32_t page_address;
725 char initiator_wwnstr[MPTSAS_WWN_STRLEN];
726 int rval = DDI_FAILURE;
727 int i = 0;
728 uint8_t numphys = 0;
729 uint8_t phy_id;
730 uint8_t phy_port = 0;
731 uint16_t attached_devhdl = 0;
732 uint32_t dev_info;
733 uint64_t attached_sas_wwn;
734 uint16_t dev_hdl;
735 uint16_t pdev_hdl;
736 uint16_t bay_num, enclosure;
737 char attached_wwnstr[MPTSAS_WWN_STRLEN];
738
739 /* CONSTCOND */
740 ASSERT(NO_COMPETING_THREADS);
741
742 switch (cmd) {
743 case DDI_ATTACH:
744 break;
745
746 case DDI_RESUME:
747 /*
748 * If this a scsi-iport node, nothing to do here.
749 */
750 return (DDI_SUCCESS);
751
752 default:
753 return (DDI_FAILURE);
754 }
755
756 pdip = ddi_get_parent(dip);
757
758 if ((hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SCSI_DEVICE)) ==
759 NULL) {
760 cmn_err(CE_WARN, "Failed attach iport because fail to "
761 "get tran vector for the HBA node");
762 return (DDI_FAILURE);
763 }
764
765 mpt = TRAN2MPT(hba_tran);
766 ASSERT(mpt != NULL);
767 if (mpt == NULL)
768 return (DDI_FAILURE);
769
770 if ((hba_tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) ==
771 NULL) {
772 mptsas_log(mpt, CE_WARN, "Failed attach iport because fail to "
773 "get tran vector for the iport node");
774 return (DDI_FAILURE);
775 }
776
777 /*
778 * Overwrite parent's tran_hba_private to iport's tran vector
779 */
780 hba_tran->tran_hba_private = mpt;
781
782 ddi_report_dev(dip);
783
784 /*
785 * Get SAS address for initiator port according dev_handle
786 */
787 iport = ddi_get_name_addr(dip);
788 if (iport && strncmp(iport, "v0", 2) == 0) {
789 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
790 MPTSAS_VIRTUAL_PORT, 1) !=
791 DDI_PROP_SUCCESS) {
792 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
793 MPTSAS_VIRTUAL_PORT);
794 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
795 "prop update failed");
796 return (DDI_FAILURE);
797 }
798 return (DDI_SUCCESS);
799 }
800
801 mutex_enter(&mpt->m_mutex);
802 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
803 bzero(phymask, sizeof (phymask));
804 (void) sprintf(phymask,
805 "%x", mpt->m_phy_info[i].phy_mask);
806 if (strcmp(phymask, iport) == 0) {
807 break;
808 }
809 }
810
811 if (i == MPTSAS_MAX_PHYS) {
812 mptsas_log(mpt, CE_WARN, "Failed attach port %s because port"
813 "seems not exist", iport);
814 mutex_exit(&mpt->m_mutex);
815 return (DDI_FAILURE);
816 }
817
818 phy_mask = mpt->m_phy_info[i].phy_mask;
819
820 if (mpt->m_phy_info[i].port_flags & AUTO_PORT_CONFIGURATION)
821 dynamic_port = 1;
822 else
823 dynamic_port = 0;
824
825 /*
826 * Update PHY info for smhba
827 */
828 if (mptsas_smhba_phy_init(mpt)) {
829 mutex_exit(&mpt->m_mutex);
830 mptsas_log(mpt, CE_WARN, "mptsas phy update "
831 "failed");
832 return (DDI_FAILURE);
833 }
834
835 mutex_exit(&mpt->m_mutex);
836
837 numphys = 0;
838 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
839 if ((phy_mask >> i) & 0x01) {
840 numphys++;
841 }
842 }
843
844 bzero(initiator_wwnstr, sizeof (initiator_wwnstr));
845 (void) sprintf(initiator_wwnstr, "w%016"PRIx64,
846 mpt->un.m_base_wwid);
847
848 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
849 SCSI_ADDR_PROP_INITIATOR_PORT, initiator_wwnstr) !=
850 DDI_PROP_SUCCESS) {
851 (void) ddi_prop_remove(DDI_DEV_T_NONE,
852 dip, SCSI_ADDR_PROP_INITIATOR_PORT);
853 mptsas_log(mpt, CE_WARN, "mptsas Initiator port "
854 "prop update failed");
855 return (DDI_FAILURE);
856 }
857 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
858 MPTSAS_NUM_PHYS, numphys) !=
859 DDI_PROP_SUCCESS) {
860 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, MPTSAS_NUM_PHYS);
861 return (DDI_FAILURE);
862 }
863
864 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
865 "phymask", phy_mask) !=
866 DDI_PROP_SUCCESS) {
867 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "phymask");
868 mptsas_log(mpt, CE_WARN, "mptsas phy mask "
869 "prop update failed");
870 return (DDI_FAILURE);
871 }
872
873 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
874 "dynamic-port", dynamic_port) !=
875 DDI_PROP_SUCCESS) {
876 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "dynamic-port");
877 mptsas_log(mpt, CE_WARN, "mptsas dynamic port "
878 "prop update failed");
879 return (DDI_FAILURE);
880 }
881 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
882 MPTSAS_VIRTUAL_PORT, 0) !=
883 DDI_PROP_SUCCESS) {
884 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
885 MPTSAS_VIRTUAL_PORT);
886 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
887 "prop update failed");
888 return (DDI_FAILURE);
889 }
890 mptsas_smhba_set_phy_props(mpt,
891 iport, dip, numphys, &attached_devhdl);
892
893 mutex_enter(&mpt->m_mutex);
894 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
895 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)attached_devhdl;
896 rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
897 &attached_sas_wwn, &dev_info, &phy_port, &phy_id,
898 &pdev_hdl, &bay_num, &enclosure);
899 if (rval != DDI_SUCCESS) {
900 mptsas_log(mpt, CE_WARN,
901 "Failed to get device page0 for handle:%d",
902 attached_devhdl);
903 mutex_exit(&mpt->m_mutex);
904 return (DDI_FAILURE);
905 }
906
907 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
908 bzero(phymask, sizeof (phymask));
909 (void) sprintf(phymask, "%x", mpt->m_phy_info[i].phy_mask);
910 if (strcmp(phymask, iport) == 0) {
911 (void) sprintf(&mpt->m_phy_info[i].smhba_info.path[0],
912 "%x",
913 mpt->m_phy_info[i].phy_mask);
914 }
915 }
916 mutex_exit(&mpt->m_mutex);
917
918 bzero(attached_wwnstr, sizeof (attached_wwnstr));
919 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
920 attached_sas_wwn);
921 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
922 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
923 DDI_PROP_SUCCESS) {
924 (void) ddi_prop_remove(DDI_DEV_T_NONE,
925 dip, SCSI_ADDR_PROP_ATTACHED_PORT);
926 return (DDI_FAILURE);
927 }
928
929 /* Create kstats for each phy on this iport */
930
931 mptsas_create_phy_stats(mpt, iport, dip);
932
933 /*
934 * register sas hba iport with mdi (MPxIO/vhci)
935 */
936 if (mdi_phci_register(MDI_HCI_CLASS_SCSI,
937 dip, 0) == MDI_SUCCESS) {
938 mpt->m_mpxio_enable = TRUE;
939 }
940 return (DDI_SUCCESS);
941 }
942
943 /*
944 * Notes:
945 * Set up all device state and allocate data structures,
946 * mutexes, condition variables, etc. for device operation.
947 * Add interrupts needed.
948 * Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
949 */
950 static int
951 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
952 {
953 mptsas_t *mpt = NULL;
954 int instance, i, j;
955 int doneq_thread_num;
956 char intr_added = 0;
957 char map_setup = 0;
958 char config_setup = 0;
959 char hba_attach_setup = 0;
960 char smp_attach_setup = 0;
961 char mutex_init_done = 0;
962 char event_taskq_create = 0;
963 char dr_taskq_create = 0;
964 char doneq_thread_create = 0;
965 scsi_hba_tran_t *hba_tran;
966 uint_t mem_bar = MEM_SPACE;
967 int rval = DDI_FAILURE;
968
969 /* CONSTCOND */
970 ASSERT(NO_COMPETING_THREADS);
971
972 if (scsi_hba_iport_unit_address(dip)) {
973 return (mptsas_iport_attach(dip, cmd));
974 }
975
976 switch (cmd) {
977 case DDI_ATTACH:
978 break;
979
980 case DDI_RESUME:
981 if ((hba_tran = ddi_get_driver_private(dip)) == NULL)
982 return (DDI_FAILURE);
983
984 mpt = TRAN2MPT(hba_tran);
985
986 if (!mpt) {
987 return (DDI_FAILURE);
988 }
989
990 /*
991 * Reset hardware and softc to "no outstanding commands"
992 * Note that a check condition can result on first command
993 * to a target.
994 */
995 mutex_enter(&mpt->m_mutex);
996
997 /*
998 * raise power.
999 */
1000 if (mpt->m_options & MPTSAS_OPT_PM) {
1001 mutex_exit(&mpt->m_mutex);
1002 (void) pm_busy_component(dip, 0);
1003 rval = pm_power_has_changed(dip, 0, PM_LEVEL_D0);
1004 if (rval == DDI_SUCCESS) {
1005 mutex_enter(&mpt->m_mutex);
1006 } else {
1007 /*
1008 * The pm_raise_power() call above failed,
1009 * and that can only occur if we were unable
1010 * to reset the hardware. This is probably
1011 * due to unhealty hardware, and because
1012 * important filesystems(such as the root
1013 * filesystem) could be on the attached disks,
1014 * it would not be a good idea to continue,
1015 * as we won't be entirely certain we are
1016 * writing correct data. So we panic() here
1017 * to not only prevent possible data corruption,
1018 * but to give developers or end users a hope
1019 * of identifying and correcting any problems.
1020 */
1021 fm_panic("mptsas could not reset hardware "
1022 "during resume");
1023 }
1024 }
1025
1026 mpt->m_suspended = 0;
1027
1028 /*
1029 * Reinitialize ioc
1030 */
1031 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1032 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
1033 mutex_exit(&mpt->m_mutex);
1034 if (mpt->m_options & MPTSAS_OPT_PM) {
1035 (void) pm_idle_component(dip, 0);
1036 }
1037 fm_panic("mptsas init chip fail during resume");
1038 }
1039 /*
1040 * mptsas_update_driver_data needs interrupts so enable them
1041 * first.
1042 */
1043 MPTSAS_ENABLE_INTR(mpt);
1044 mptsas_update_driver_data(mpt);
1045
1046 /* start requests, if possible */
1047 mptsas_restart_hba(mpt);
1048
1049 mutex_exit(&mpt->m_mutex);
1050
1051 /*
1052 * Restart watch thread
1053 */
1054 mutex_enter(&mptsas_global_mutex);
1055 if (mptsas_timeout_id == 0) {
1056 mptsas_timeout_id = timeout(mptsas_watch, NULL,
1057 mptsas_tick);
1058 mptsas_timeouts_enabled = 1;
1059 }
1060 mutex_exit(&mptsas_global_mutex);
1061
1062 /* report idle status to pm framework */
1063 if (mpt->m_options & MPTSAS_OPT_PM) {
1064 (void) pm_idle_component(dip, 0);
1065 }
1066
1067 return (DDI_SUCCESS);
1068
1069 default:
1070 return (DDI_FAILURE);
1071
1072 }
1073
1074 instance = ddi_get_instance(dip);
1075
1076 /*
1077 * Allocate softc information.
1078 */
1079 if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
1080 mptsas_log(NULL, CE_WARN,
1081 "mptsas%d: cannot allocate soft state", instance);
1082 goto fail;
1083 }
1084
1085 mpt = ddi_get_soft_state(mptsas_state, instance);
1086
1087 if (mpt == NULL) {
1088 mptsas_log(NULL, CE_WARN,
1089 "mptsas%d: cannot get soft state", instance);
1090 goto fail;
1091 }
1092
1093 /* Mark us as a primary ioctl node for an instance. */
1094 (void) ddi_prop_update_int(DDI_DEV_T_NONE, dip, "primary-ioctl-node",
1095 instance);
1096
1097 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
1098 scsi_size_clean(dip);
1099
1100 mpt->m_dip = dip;
1101 mpt->m_instance = instance;
1102
1103 /* Make a per-instance copy of the structures */
1104 mpt->m_io_dma_attr = mptsas_dma_attrs64;
1105 mpt->m_msg_dma_attr = mptsas_dma_attrs;
1106 mpt->m_reg_acc_attr = mptsas_dev_attr;
1107 mpt->m_dev_acc_attr = mptsas_dev_attr;
1108
1109 /*
1110 * Initialize FMA
1111 */
1112 mpt->m_fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, mpt->m_dip,
1113 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
1114 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1115 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1116
1117 mptsas_fm_init(mpt);
1118
1119 if (mptsas_alloc_handshake_msg(mpt,
1120 sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) {
1121 mptsas_log(mpt, CE_WARN, "cannot initialize handshake msg.");
1122 goto fail;
1123 }
1124
1125 /*
1126 * Setup configuration space
1127 */
1128 if (mptsas_config_space_init(mpt) == FALSE) {
1129 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init failed");
1130 goto fail;
1131 }
1132 config_setup++;
1133
1134 if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&mpt->m_reg,
1135 0, 0, &mpt->m_reg_acc_attr, &mpt->m_datap) != DDI_SUCCESS) {
1136 mptsas_log(mpt, CE_WARN, "map setup failed");
1137 goto fail;
1138 }
1139 map_setup++;
1140
1141 /*
1142 * A taskq is created for dealing with the event handler
1143 */
1144 if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
1145 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1146 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
1147 goto fail;
1148 }
1149 event_taskq_create++;
1150
1151 /*
1152 * A taskq is created for dealing with dr events
1153 */
1154 if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1155 "mptsas_dr_taskq",
1156 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1157 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1158 "failed");
1159 goto fail;
1160 }
1161 dr_taskq_create++;
1162
1163 mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1164 0, "mptsas_doneq_thread_threshold_prop", 10);
1165 mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1166 0, "mptsas_doneq_length_threshold_prop", 8);
1167 mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1168 0, "mptsas_doneq_thread_n_prop", 8);
1169
1170 if (mpt->m_doneq_thread_n) {
1171 cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1172 mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1173
1174 mutex_enter(&mpt->m_doneq_mutex);
1175 mpt->m_doneq_thread_id =
1176 kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1177 * mpt->m_doneq_thread_n, KM_SLEEP);
1178
1179 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1180 cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1181 CV_DRIVER, NULL);
1182 mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1183 MUTEX_DRIVER, NULL);
1184 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1185 mpt->m_doneq_thread_id[j].flag |=
1186 MPTSAS_DONEQ_THREAD_ACTIVE;
1187 mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1188 mpt->m_doneq_thread_id[j].arg.t = j;
1189 mpt->m_doneq_thread_id[j].threadp =
1190 thread_create(NULL, 0, mptsas_doneq_thread,
1191 &mpt->m_doneq_thread_id[j].arg,
1192 0, &p0, TS_RUN, minclsyspri);
1193 mpt->m_doneq_thread_id[j].donetail =
1194 &mpt->m_doneq_thread_id[j].doneq;
1195 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1196 }
1197 mutex_exit(&mpt->m_doneq_mutex);
1198 doneq_thread_create++;
1199 }
1200
1201 /* Initialize mutex used in interrupt handler */
1202 mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1203 DDI_INTR_PRI(mpt->m_intr_pri));
1204 mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1205 mutex_init(&mpt->m_intr_mutex, NULL, MUTEX_DRIVER,
1206 DDI_INTR_PRI(mpt->m_intr_pri));
1207 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1208 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1209 NULL, MUTEX_DRIVER,
1210 DDI_INTR_PRI(mpt->m_intr_pri));
1211 }
1212
1213 cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1214 cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1215 cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1216 cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1217 cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1218 mutex_init_done++;
1219
1220 /*
1221 * Disable hardware interrupt since we're not ready to
1222 * handle it yet.
1223 */
1224 MPTSAS_DISABLE_INTR(mpt);
1225 if (mptsas_register_intrs(mpt) == FALSE)
1226 goto fail;
1227 intr_added++;
1228
1229 mutex_enter(&mpt->m_mutex);
1230 /*
1231 * Initialize power management component
1232 */
1233 if (mpt->m_options & MPTSAS_OPT_PM) {
1234 if (mptsas_init_pm(mpt)) {
1235 mutex_exit(&mpt->m_mutex);
1236 mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1237 "failed");
1238 goto fail;
1239 }
1240 }
1241
1242 /*
1243 * Initialize chip using Message Unit Reset, if allowed
1244 */
1245 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1246 if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1247 mutex_exit(&mpt->m_mutex);
1248 mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1249 goto fail;
1250 }
1251
1252 /*
1253 * Fill in the phy_info structure and get the base WWID
1254 */
1255 if (mptsas_get_manufacture_page5(mpt) == DDI_FAILURE) {
1256 mptsas_log(mpt, CE_WARN,
1257 "mptsas_get_manufacture_page5 failed!");
1258 goto fail;
1259 }
1260
1261 if (mptsas_get_sas_io_unit_page_hndshk(mpt)) {
1262 mptsas_log(mpt, CE_WARN,
1263 "mptsas_get_sas_io_unit_page_hndshk failed!");
1264 goto fail;
1265 }
1266
1267 if (mptsas_get_manufacture_page0(mpt) == DDI_FAILURE) {
1268 mptsas_log(mpt, CE_WARN,
1269 "mptsas_get_manufacture_page0 failed!");
1270 goto fail;
1271 }
1272
1273 mutex_exit(&mpt->m_mutex);
1274
1275 /*
1276 * Register the iport for multiple port HBA
1277 */
1278 mptsas_iport_register(mpt);
1279
1280 /*
1281 * initialize SCSI HBA transport structure
1282 */
1283 if (mptsas_hba_setup(mpt) == FALSE)
1284 goto fail;
1285 hba_attach_setup++;
1286
1287 if (mptsas_smp_setup(mpt) == FALSE)
1288 goto fail;
1289 smp_attach_setup++;
1290
1291 if (mptsas_cache_create(mpt) == FALSE)
1292 goto fail;
1293
1294 mpt->m_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1295 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
1296 if (mpt->m_scsi_reset_delay == 0) {
1297 mptsas_log(mpt, CE_NOTE,
1298 "scsi_reset_delay of 0 is not recommended,"
1299 " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1300 mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1301 }
1302
1303 /*
1304 * Initialize the wait and done FIFO queue
1305 */
1306 mpt->m_donetail = &mpt->m_doneq;
1307 mpt->m_waitqtail = &mpt->m_waitq;
1308
1309 /*
1310 * ioc cmd queue initialize
1311 */
1312 mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1313 mpt->m_dev_handle = 0xFFFF;
1314
1315 MPTSAS_ENABLE_INTR(mpt);
1316
1317 /*
1318 * enable event notification
1319 */
1320 mutex_enter(&mpt->m_mutex);
1321 if (mptsas_ioc_enable_event_notification(mpt)) {
1322 mutex_exit(&mpt->m_mutex);
1323 goto fail;
1324 }
1325 mutex_exit(&mpt->m_mutex);
1326
1327 /*
1328 * Initialize PHY info for smhba
1329 */
1330 if (mptsas_smhba_setup(mpt)) {
1331 mptsas_log(mpt, CE_WARN, "mptsas phy initialization "
1332 "failed");
1333 goto fail;
1334 }
1335
1336 /* Check all dma handles allocated in attach */
1337 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl)
1338 != DDI_SUCCESS) ||
1339 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl)
1340 != DDI_SUCCESS) ||
1341 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl)
1342 != DDI_SUCCESS) ||
1343 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl)
1344 != DDI_SUCCESS) ||
1345 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl)
1346 != DDI_SUCCESS)) {
1347 goto fail;
1348 }
1349
1350 /* Check all acc handles allocated in attach */
1351 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
1352 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl)
1353 != DDI_SUCCESS) ||
1354 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl)
1355 != DDI_SUCCESS) ||
1356 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl)
1357 != DDI_SUCCESS) ||
1358 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl)
1359 != DDI_SUCCESS) ||
1360 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl)
1361 != DDI_SUCCESS) ||
1362 (mptsas_check_acc_handle(mpt->m_config_handle)
1363 != DDI_SUCCESS)) {
1364 goto fail;
1365 }
1366
1367 /*
1368 * After this point, we are not going to fail the attach.
1369 */
1370 /*
1371 * used for mptsas_watch
1372 */
1373 mptsas_list_add(mpt);
1374
1375 mutex_enter(&mptsas_global_mutex);
1376 if (mptsas_timeouts_enabled == 0) {
1377 mptsas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
1378 dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
1379
1380 mptsas_tick = mptsas_scsi_watchdog_tick *
1381 drv_usectohz((clock_t)1000000);
1382
1383 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
1384 mptsas_timeouts_enabled = 1;
1385 }
1386 mutex_exit(&mptsas_global_mutex);
1387
1388 /* Print message of HBA present */
1389 ddi_report_dev(dip);
1390
1391 /* report idle status to pm framework */
1392 if (mpt->m_options & MPTSAS_OPT_PM) {
1393 (void) pm_idle_component(dip, 0);
1394 }
1395
1396 return (DDI_SUCCESS);
1397
1398 fail:
1399 mptsas_log(mpt, CE_WARN, "attach failed");
1400 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
1401 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
1402 if (mpt) {
1403 mutex_enter(&mptsas_global_mutex);
1404
1405 if (mptsas_timeout_id && (mptsas_head == NULL)) {
1406 timeout_id_t tid = mptsas_timeout_id;
1407 mptsas_timeouts_enabled = 0;
1408 mptsas_timeout_id = 0;
1409 mutex_exit(&mptsas_global_mutex);
1410 (void) untimeout(tid);
1411 mutex_enter(&mptsas_global_mutex);
1412 }
1413 mutex_exit(&mptsas_global_mutex);
1414 /* deallocate in reverse order */
1415 mptsas_cache_destroy(mpt);
1416
1417 if (smp_attach_setup) {
1418 mptsas_smp_teardown(mpt);
1419 }
1420 if (hba_attach_setup) {
1421 mptsas_hba_teardown(mpt);
1422 }
1423
1424 if (mpt->m_active) {
1425 mptsas_hash_uninit(&mpt->m_active->m_smptbl,
1426 sizeof (mptsas_smp_t));
1427 mptsas_hash_uninit(&mpt->m_active->m_tgttbl,
1428 sizeof (mptsas_target_t));
1429 mptsas_free_active_slots(mpt);
1430 }
1431 if (intr_added) {
1432 mptsas_unregister_intrs(mpt);
1433 }
1434
1435 if (doneq_thread_create) {
1436 mutex_enter(&mpt->m_doneq_mutex);
1437 doneq_thread_num = mpt->m_doneq_thread_n;
1438 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1439 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1440 mpt->m_doneq_thread_id[j].flag &=
1441 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1442 cv_signal(&mpt->m_doneq_thread_id[j].cv);
1443 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1444 }
1445 while (mpt->m_doneq_thread_n) {
1446 cv_wait(&mpt->m_doneq_thread_cv,
1447 &mpt->m_doneq_mutex);
1448 }
1449 for (j = 0; j < doneq_thread_num; j++) {
1450 cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1451 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1452 }
1453 kmem_free(mpt->m_doneq_thread_id,
1454 sizeof (mptsas_doneq_thread_list_t)
1455 * doneq_thread_num);
1456 mutex_exit(&mpt->m_doneq_mutex);
1457 cv_destroy(&mpt->m_doneq_thread_cv);
1458 mutex_destroy(&mpt->m_doneq_mutex);
1459 }
1460 if (event_taskq_create) {
1461 ddi_taskq_destroy(mpt->m_event_taskq);
1462 }
1463 if (dr_taskq_create) {
1464 ddi_taskq_destroy(mpt->m_dr_taskq);
1465 }
1466 if (mutex_init_done) {
1467 mutex_destroy(&mpt->m_intr_mutex);
1468 mutex_destroy(&mpt->m_passthru_mutex);
1469 mutex_destroy(&mpt->m_mutex);
1470 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1471 mutex_destroy(
1472 &mpt->m_phy_info[i].smhba_info.phy_mutex);
1473 }
1474 cv_destroy(&mpt->m_cv);
1475 cv_destroy(&mpt->m_passthru_cv);
1476 cv_destroy(&mpt->m_fw_cv);
1477 cv_destroy(&mpt->m_config_cv);
1478 cv_destroy(&mpt->m_fw_diag_cv);
1479 }
1480
1481 if (map_setup) {
1482 mptsas_cfg_fini(mpt);
1483 }
1484 if (config_setup) {
1485 mptsas_config_space_fini(mpt);
1486 }
1487 mptsas_free_handshake_msg(mpt);
1488 mptsas_hba_fini(mpt);
1489
1490 mptsas_fm_fini(mpt);
1491 ddi_soft_state_free(mptsas_state, instance);
1492 ddi_prop_remove_all(dip);
1493 }
1494 return (DDI_FAILURE);
1495 }
1496
1497 static int
1498 mptsas_suspend(dev_info_t *devi)
1499 {
1500 mptsas_t *mpt, *g;
1501 scsi_hba_tran_t *tran;
1502
1503 if (scsi_hba_iport_unit_address(devi)) {
1504 return (DDI_SUCCESS);
1505 }
1506
1507 if ((tran = ddi_get_driver_private(devi)) == NULL)
1508 return (DDI_SUCCESS);
1509
1510 mpt = TRAN2MPT(tran);
1511 if (!mpt) {
1512 return (DDI_SUCCESS);
1513 }
1514
1515 mutex_enter(&mpt->m_mutex);
1516
1517 if (mpt->m_suspended++) {
1518 mutex_exit(&mpt->m_mutex);
1519 return (DDI_SUCCESS);
1520 }
1521
1522 /*
1523 * Cancel timeout threads for this mpt
1524 */
1525 if (mpt->m_quiesce_timeid) {
1526 timeout_id_t tid = mpt->m_quiesce_timeid;
1527 mpt->m_quiesce_timeid = 0;
1528 mutex_exit(&mpt->m_mutex);
1529 (void) untimeout(tid);
1530 mutex_enter(&mpt->m_mutex);
1531 }
1532
1533 if (mpt->m_restart_cmd_timeid) {
1534 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1535 mpt->m_restart_cmd_timeid = 0;
1536 mutex_exit(&mpt->m_mutex);
1537 (void) untimeout(tid);
1538 mutex_enter(&mpt->m_mutex);
1539 }
1540
1541 mutex_exit(&mpt->m_mutex);
1542
1543 (void) pm_idle_component(mpt->m_dip, 0);
1544
1545 /*
1546 * Cancel watch threads if all mpts suspended
1547 */
1548 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1549 for (g = mptsas_head; g != NULL; g = g->m_next) {
1550 if (!g->m_suspended)
1551 break;
1552 }
1553 rw_exit(&mptsas_global_rwlock);
1554
1555 mutex_enter(&mptsas_global_mutex);
1556 if (g == NULL) {
1557 timeout_id_t tid;
1558
1559 mptsas_timeouts_enabled = 0;
1560 if (mptsas_timeout_id) {
1561 tid = mptsas_timeout_id;
1562 mptsas_timeout_id = 0;
1563 mutex_exit(&mptsas_global_mutex);
1564 (void) untimeout(tid);
1565 mutex_enter(&mptsas_global_mutex);
1566 }
1567 if (mptsas_reset_watch) {
1568 tid = mptsas_reset_watch;
1569 mptsas_reset_watch = 0;
1570 mutex_exit(&mptsas_global_mutex);
1571 (void) untimeout(tid);
1572 mutex_enter(&mptsas_global_mutex);
1573 }
1574 }
1575 mutex_exit(&mptsas_global_mutex);
1576
1577 mutex_enter(&mpt->m_mutex);
1578
1579 /*
1580 * If this mpt is not in full power(PM_LEVEL_D0), just return.
1581 */
1582 if ((mpt->m_options & MPTSAS_OPT_PM) &&
1583 (mpt->m_power_level != PM_LEVEL_D0)) {
1584 mutex_exit(&mpt->m_mutex);
1585 return (DDI_SUCCESS);
1586 }
1587
1588 /* Disable HBA interrupts in hardware */
1589 MPTSAS_DISABLE_INTR(mpt);
1590 /*
1591 * Send RAID action system shutdown to sync IR
1592 */
1593 mptsas_raid_action_system_shutdown(mpt);
1594
1595 mutex_exit(&mpt->m_mutex);
1596
1597 /* drain the taskq */
1598 ddi_taskq_wait(mpt->m_event_taskq);
1599 ddi_taskq_wait(mpt->m_dr_taskq);
1600
1601 return (DDI_SUCCESS);
1602 }
1603
1604 #ifdef __sparc
1605 /*ARGSUSED*/
1606 static int
1607 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd)
1608 {
1609 mptsas_t *mpt;
1610 scsi_hba_tran_t *tran;
1611
1612 /*
1613 * If this call is for iport, just return.
1614 */
1615 if (scsi_hba_iport_unit_address(devi))
1616 return (DDI_SUCCESS);
1617
1618 if ((tran = ddi_get_driver_private(devi)) == NULL)
1619 return (DDI_SUCCESS);
1620
1621 if ((mpt = TRAN2MPT(tran)) == NULL)
1622 return (DDI_SUCCESS);
1623
1624 /*
1625 * Send RAID action system shutdown to sync IR. Disable HBA
1626 * interrupts in hardware first.
1627 */
1628 MPTSAS_DISABLE_INTR(mpt);
1629 mptsas_raid_action_system_shutdown(mpt);
1630
1631 return (DDI_SUCCESS);
1632 }
1633 #else /* __sparc */
1634 /*
1635 * quiesce(9E) entry point.
1636 *
1637 * This function is called when the system is single-threaded at high
1638 * PIL with preemption disabled. Therefore, this function must not be
1639 * blocked.
1640 *
1641 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1642 * DDI_FAILURE indicates an error condition and should almost never happen.
1643 */
1644 static int
1645 mptsas_quiesce(dev_info_t *devi)
1646 {
1647 mptsas_t *mpt;
1648 scsi_hba_tran_t *tran;
1649
1650 /*
1651 * If this call is for iport, just return.
1652 */
1653 if (scsi_hba_iport_unit_address(devi))
1654 return (DDI_SUCCESS);
1655
1656 if ((tran = ddi_get_driver_private(devi)) == NULL)
1657 return (DDI_SUCCESS);
1658
1659 if ((mpt = TRAN2MPT(tran)) == NULL)
1660 return (DDI_SUCCESS);
1661
1662 /* Disable HBA interrupts in hardware */
1663 MPTSAS_DISABLE_INTR(mpt);
1664 /* Send RAID action system shutdonw to sync IR */
1665 mptsas_raid_action_system_shutdown(mpt);
1666
1667 return (DDI_SUCCESS);
1668 }
1669 #endif /* __sparc */
1670
1671 /*
1672 * detach(9E). Remove all device allocations and system resources;
1673 * disable device interrupts.
1674 * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
1675 */
1676 static int
1677 mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1678 {
1679 /* CONSTCOND */
1680 ASSERT(NO_COMPETING_THREADS);
1681 NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi, (void *)cmd));
1682
1683 switch (cmd) {
1684 case DDI_DETACH:
1685 return (mptsas_do_detach(devi));
1686
1687 case DDI_SUSPEND:
1688 return (mptsas_suspend(devi));
1689
1690 default:
1691 return (DDI_FAILURE);
1692 }
1693 /* NOTREACHED */
1694 }
1695
1696 static int
1697 mptsas_do_detach(dev_info_t *dip)
1698 {
1699 mptsas_t *mpt;
1700 scsi_hba_tran_t *tran;
1701 int circ = 0;
1702 int circ1 = 0;
1703 mdi_pathinfo_t *pip = NULL;
1704 int i;
1705 int doneq_thread_num = 0;
1706
1707 NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1708
1709 if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1710 return (DDI_FAILURE);
1711
1712 mpt = TRAN2MPT(tran);
1713 if (!mpt) {
1714 return (DDI_FAILURE);
1715 }
1716 /*
1717 * Still have pathinfo child, should not detach mpt driver
1718 */
1719 if (scsi_hba_iport_unit_address(dip)) {
1720 if (mpt->m_mpxio_enable) {
1721 /*
1722 * MPxIO enabled for the iport
1723 */
1724 ndi_devi_enter(scsi_vhci_dip, &circ1);
1725 ndi_devi_enter(dip, &circ);
1726 while (pip = mdi_get_next_client_path(dip, NULL)) {
1727 if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1728 continue;
1729 }
1730 ndi_devi_exit(dip, circ);
1731 ndi_devi_exit(scsi_vhci_dip, circ1);
1732 NDBG12(("detach failed because of "
1733 "outstanding path info"));
1734 return (DDI_FAILURE);
1735 }
1736 ndi_devi_exit(dip, circ);
1737 ndi_devi_exit(scsi_vhci_dip, circ1);
1738 (void) mdi_phci_unregister(dip, 0);
1739 }
1740
1741 ddi_prop_remove_all(dip);
1742
1743 return (DDI_SUCCESS);
1744 }
1745
1746 /* Make sure power level is D0 before accessing registers */
1747 if (mpt->m_options & MPTSAS_OPT_PM) {
1748 (void) pm_busy_component(dip, 0);
1749 if (mpt->m_power_level != PM_LEVEL_D0) {
1750 if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1751 DDI_SUCCESS) {
1752 mptsas_log(mpt, CE_WARN,
1753 "mptsas%d: Raise power request failed.",
1754 mpt->m_instance);
1755 (void) pm_idle_component(dip, 0);
1756 return (DDI_FAILURE);
1757 }
1758 }
1759 }
1760
1761 /*
1762 * Send RAID action system shutdown to sync IR. After action, send a
1763 * Message Unit Reset. Since after that DMA resource will be freed,
1764 * set ioc to READY state will avoid HBA initiated DMA operation.
1765 */
1766 mutex_enter(&mpt->m_mutex);
1767 MPTSAS_DISABLE_INTR(mpt);
1768 mptsas_raid_action_system_shutdown(mpt);
1769 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1770 (void) mptsas_ioc_reset(mpt, FALSE);
1771 mutex_exit(&mpt->m_mutex);
1772 mptsas_rem_intrs(mpt);
1773 ddi_taskq_destroy(mpt->m_event_taskq);
1774 ddi_taskq_destroy(mpt->m_dr_taskq);
1775
1776 if (mpt->m_doneq_thread_n) {
1777 mutex_enter(&mpt->m_doneq_mutex);
1778 doneq_thread_num = mpt->m_doneq_thread_n;
1779 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1780 mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1781 mpt->m_doneq_thread_id[i].flag &=
1782 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1783 cv_signal(&mpt->m_doneq_thread_id[i].cv);
1784 mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1785 }
1786 while (mpt->m_doneq_thread_n) {
1787 cv_wait(&mpt->m_doneq_thread_cv,
1788 &mpt->m_doneq_mutex);
1789 }
1790 for (i = 0; i < doneq_thread_num; i++) {
1791 cv_destroy(&mpt->m_doneq_thread_id[i].cv);
1792 mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);
1793 }
1794 kmem_free(mpt->m_doneq_thread_id,
1795 sizeof (mptsas_doneq_thread_list_t)
1796 * doneq_thread_num);
1797 mutex_exit(&mpt->m_doneq_mutex);
1798 cv_destroy(&mpt->m_doneq_thread_cv);
1799 mutex_destroy(&mpt->m_doneq_mutex);
1800 }
1801
1802 scsi_hba_reset_notify_tear_down(mpt->m_reset_notify_listf);
1803
1804 mptsas_list_del(mpt);
1805
1806 /*
1807 * Cancel timeout threads for this mpt
1808 */
1809 mutex_enter(&mpt->m_mutex);
1810 if (mpt->m_quiesce_timeid) {
1811 timeout_id_t tid = mpt->m_quiesce_timeid;
1812 mpt->m_quiesce_timeid = 0;
1813 mutex_exit(&mpt->m_mutex);
1814 (void) untimeout(tid);
1815 mutex_enter(&mpt->m_mutex);
1816 }
1817
1818 if (mpt->m_restart_cmd_timeid) {
1819 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1820 mpt->m_restart_cmd_timeid = 0;
1821 mutex_exit(&mpt->m_mutex);
1822 (void) untimeout(tid);
1823 mutex_enter(&mpt->m_mutex);
1824 }
1825
1826 mutex_exit(&mpt->m_mutex);
1827
1828 /*
1829 * last mpt? ... if active, CANCEL watch threads.
1830 */
1831 mutex_enter(&mptsas_global_mutex);
1832 if (mptsas_head == NULL) {
1833 timeout_id_t tid;
1834 /*
1835 * Clear mptsas_timeouts_enable so that the watch thread
1836 * gets restarted on DDI_ATTACH
1837 */
1838 mptsas_timeouts_enabled = 0;
1839 if (mptsas_timeout_id) {
1840 tid = mptsas_timeout_id;
1841 mptsas_timeout_id = 0;
1842 mutex_exit(&mptsas_global_mutex);
1843 (void) untimeout(tid);
1844 mutex_enter(&mptsas_global_mutex);
1845 }
1846 if (mptsas_reset_watch) {
1847 tid = mptsas_reset_watch;
1848 mptsas_reset_watch = 0;
1849 mutex_exit(&mptsas_global_mutex);
1850 (void) untimeout(tid);
1851 mutex_enter(&mptsas_global_mutex);
1852 }
1853 }
1854 mutex_exit(&mptsas_global_mutex);
1855
1856 /*
1857 * Delete Phy stats
1858 */
1859 mptsas_destroy_phy_stats(mpt);
1860
1861 /*
1862 * Delete nt_active.
1863 */
1864 mutex_enter(&mpt->m_mutex);
1865 mptsas_hash_uninit(&mpt->m_active->m_tgttbl, sizeof (mptsas_target_t));
1866 mptsas_hash_uninit(&mpt->m_active->m_smptbl, sizeof (mptsas_smp_t));
1867 mptsas_free_active_slots(mpt);
1868 mutex_exit(&mpt->m_mutex);
1869
1870 /* deallocate everything that was allocated in mptsas_attach */
1871 mptsas_cache_destroy(mpt);
1872
1873 mptsas_hba_fini(mpt);
1874 mptsas_cfg_fini(mpt);
1875
1876 /* Lower the power informing PM Framework */
1877 if (mpt->m_options & MPTSAS_OPT_PM) {
1878 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
1879 mptsas_log(mpt, CE_WARN,
1880 "!mptsas%d: Lower power request failed "
1881 "during detach, ignoring.",
1882 mpt->m_instance);
1883 }
1884
1885 mutex_destroy(&mpt->m_intr_mutex);
1886 mutex_destroy(&mpt->m_passthru_mutex);
1887 mutex_destroy(&mpt->m_mutex);
1888 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1889 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
1890 }
1891 cv_destroy(&mpt->m_cv);
1892 cv_destroy(&mpt->m_passthru_cv);
1893 cv_destroy(&mpt->m_fw_cv);
1894 cv_destroy(&mpt->m_config_cv);
1895 cv_destroy(&mpt->m_fw_diag_cv);
1896
1897
1898 mptsas_smp_teardown(mpt);
1899 mptsas_hba_teardown(mpt);
1900
1901 mptsas_config_space_fini(mpt);
1902
1903 mptsas_free_handshake_msg(mpt);
1904
1905 mptsas_fm_fini(mpt);
1906 ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
1907 ddi_prop_remove_all(dip);
1908
1909 return (DDI_SUCCESS);
1910 }
1911
1912 static void
1913 mptsas_list_add(mptsas_t *mpt)
1914 {
1915 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1916
1917 if (mptsas_head == NULL) {
1918 mptsas_head = mpt;
1919 } else {
1920 mptsas_tail->m_next = mpt;
1921 }
1922 mptsas_tail = mpt;
1923 rw_exit(&mptsas_global_rwlock);
1924 }
1925
1926 static void
1927 mptsas_list_del(mptsas_t *mpt)
1928 {
1929 mptsas_t *m;
1930 /*
1931 * Remove device instance from the global linked list
1932 */
1933 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1934 if (mptsas_head == mpt) {
1935 m = mptsas_head = mpt->m_next;
1936 } else {
1937 for (m = mptsas_head; m != NULL; m = m->m_next) {
1938 if (m->m_next == mpt) {
1939 m->m_next = mpt->m_next;
1940 break;
1941 }
1942 }
1943 if (m == NULL) {
1944 mptsas_log(mpt, CE_PANIC, "Not in softc list!");
1945 }
1946 }
1947
1948 if (mptsas_tail == mpt) {
1949 mptsas_tail = m;
1950 }
1951 rw_exit(&mptsas_global_rwlock);
1952 }
1953
1954 static int
1955 mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size)
1956 {
1957 ddi_dma_attr_t task_dma_attrs;
1958
1959 task_dma_attrs = mpt->m_msg_dma_attr;
1960 task_dma_attrs.dma_attr_sgllen = 1;
1961 task_dma_attrs.dma_attr_granular = (uint32_t)(alloc_size);
1962
1963 /* allocate Task Management ddi_dma resources */
1964 if (mptsas_dma_addr_create(mpt, task_dma_attrs,
1965 &mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl, &mpt->m_hshk_memp,
1966 alloc_size, NULL) == FALSE) {
1967 return (DDI_FAILURE);
1968 }
1969 mpt->m_hshk_dma_size = alloc_size;
1970
1971 return (DDI_SUCCESS);
1972 }
1973
1974 static void
1975 mptsas_free_handshake_msg(mptsas_t *mpt)
1976 {
1977 mptsas_dma_addr_destroy(&mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl);
1978 mpt->m_hshk_dma_size = 0;
1979 }
1980
1981 static int
1982 mptsas_hba_setup(mptsas_t *mpt)
1983 {
1984 scsi_hba_tran_t *hba_tran;
1985 int tran_flags;
1986
1987 /* Allocate a transport structure */
1988 hba_tran = mpt->m_tran = scsi_hba_tran_alloc(mpt->m_dip,
1989 SCSI_HBA_CANSLEEP);
1990 ASSERT(mpt->m_tran != NULL);
1991
1992 hba_tran->tran_hba_private = mpt;
1993 hba_tran->tran_tgt_private = NULL;
1994
1995 hba_tran->tran_tgt_init = mptsas_scsi_tgt_init;
1996 hba_tran->tran_tgt_free = mptsas_scsi_tgt_free;
1997
1998 hba_tran->tran_start = mptsas_scsi_start;
1999 hba_tran->tran_reset = mptsas_scsi_reset;
2000 hba_tran->tran_abort = mptsas_scsi_abort;
2001 hba_tran->tran_getcap = mptsas_scsi_getcap;
2002 hba_tran->tran_setcap = mptsas_scsi_setcap;
2003 hba_tran->tran_init_pkt = mptsas_scsi_init_pkt;
2004 hba_tran->tran_destroy_pkt = mptsas_scsi_destroy_pkt;
2005
2006 hba_tran->tran_dmafree = mptsas_scsi_dmafree;
2007 hba_tran->tran_sync_pkt = mptsas_scsi_sync_pkt;
2008 hba_tran->tran_reset_notify = mptsas_scsi_reset_notify;
2009
2010 hba_tran->tran_get_bus_addr = mptsas_get_bus_addr;
2011 hba_tran->tran_get_name = mptsas_get_name;
2012
2013 hba_tran->tran_quiesce = mptsas_scsi_quiesce;
2014 hba_tran->tran_unquiesce = mptsas_scsi_unquiesce;
2015 hba_tran->tran_bus_reset = NULL;
2016
2017 hba_tran->tran_add_eventcall = NULL;
2018 hba_tran->tran_get_eventcookie = NULL;
2019 hba_tran->tran_post_event = NULL;
2020 hba_tran->tran_remove_eventcall = NULL;
2021
2022 hba_tran->tran_bus_config = mptsas_bus_config;
2023
2024 hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
2025
2026 /*
2027 * All children of the HBA are iports. We need tran was cloned.
2028 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
2029 * inherited to iport's tran vector.
2030 */
2031 tran_flags = (SCSI_HBA_HBA | SCSI_HBA_TRAN_CLONE);
2032
2033 if (scsi_hba_attach_setup(mpt->m_dip, &mpt->m_msg_dma_attr,
2034 hba_tran, tran_flags) != DDI_SUCCESS) {
2035 mptsas_log(mpt, CE_WARN, "hba attach setup failed");
2036 scsi_hba_tran_free(hba_tran);
2037 mpt->m_tran = NULL;
2038 return (FALSE);
2039 }
2040 return (TRUE);
2041 }
2042
2043 static void
2044 mptsas_hba_teardown(mptsas_t *mpt)
2045 {
2046 (void) scsi_hba_detach(mpt->m_dip);
2047 if (mpt->m_tran != NULL) {
2048 scsi_hba_tran_free(mpt->m_tran);
2049 mpt->m_tran = NULL;
2050 }
2051 }
2052
2053 static void
2054 mptsas_iport_register(mptsas_t *mpt)
2055 {
2056 int i, j;
2057 mptsas_phymask_t mask = 0x0;
2058 /*
2059 * initial value of mask is 0
2060 */
2061 mutex_enter(&mpt->m_mutex);
2062 for (i = 0; i < mpt->m_num_phys; i++) {
2063 mptsas_phymask_t phy_mask = 0x0;
2064 char phy_mask_name[MPTSAS_MAX_PHYS];
2065 uint8_t current_port;
2066
2067 if (mpt->m_phy_info[i].attached_devhdl == 0)
2068 continue;
2069
2070 bzero(phy_mask_name, sizeof (phy_mask_name));
2071
2072 current_port = mpt->m_phy_info[i].port_num;
2073
2074 if ((mask & (1 << i)) != 0)
2075 continue;
2076
2077 for (j = 0; j < mpt->m_num_phys; j++) {
2078 if (mpt->m_phy_info[j].attached_devhdl &&
2079 (mpt->m_phy_info[j].port_num == current_port)) {
2080 phy_mask |= (1 << j);
2081 }
2082 }
2083 mask = mask | phy_mask;
2084
2085 for (j = 0; j < mpt->m_num_phys; j++) {
2086 if ((phy_mask >> j) & 0x01) {
2087 mpt->m_phy_info[j].phy_mask = phy_mask;
2088 }
2089 }
2090
2091 (void) sprintf(phy_mask_name, "%x", phy_mask);
2092
2093 mutex_exit(&mpt->m_mutex);
2094 /*
2095 * register a iport
2096 */
2097 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
2098 mutex_enter(&mpt->m_mutex);
2099 }
2100 mutex_exit(&mpt->m_mutex);
2101 /*
2102 * register a virtual port for RAID volume always
2103 */
2104 (void) scsi_hba_iport_register(mpt->m_dip, "v0");
2105
2106 }
2107
2108 static int
2109 mptsas_smp_setup(mptsas_t *mpt)
2110 {
2111 mpt->m_smptran = smp_hba_tran_alloc(mpt->m_dip);
2112 ASSERT(mpt->m_smptran != NULL);
2113 mpt->m_smptran->smp_tran_hba_private = mpt;
2114 mpt->m_smptran->smp_tran_start = mptsas_smp_start;
2115 if (smp_hba_attach_setup(mpt->m_dip, mpt->m_smptran) != DDI_SUCCESS) {
2116 mptsas_log(mpt, CE_WARN, "smp attach setup failed");
2117 smp_hba_tran_free(mpt->m_smptran);
2118 mpt->m_smptran = NULL;
2119 return (FALSE);
2120 }
2121 /*
2122 * Initialize smp hash table
2123 */
2124 mptsas_hash_init(&mpt->m_active->m_smptbl);
2125 mpt->m_smp_devhdl = 0xFFFF;
2126
2127 return (TRUE);
2128 }
2129
2130 static void
2131 mptsas_smp_teardown(mptsas_t *mpt)
2132 {
2133 (void) smp_hba_detach(mpt->m_dip);
2134 if (mpt->m_smptran != NULL) {
2135 smp_hba_tran_free(mpt->m_smptran);
2136 mpt->m_smptran = NULL;
2137 }
2138 mpt->m_smp_devhdl = 0;
2139 }
2140
2141 static int
2142 mptsas_cache_create(mptsas_t *mpt)
2143 {
2144 int instance = mpt->m_instance;
2145 char buf[64];
2146
2147 /*
2148 * create kmem cache for packets
2149 */
2150 (void) sprintf(buf, "mptsas%d_cache", instance);
2151 mpt->m_kmem_cache = kmem_cache_create(buf,
2152 sizeof (struct mptsas_cmd) + scsi_pkt_size(), 8,
2153 mptsas_kmem_cache_constructor, mptsas_kmem_cache_destructor,
2154 NULL, (void *)mpt, NULL, 0);
2155
2156 if (mpt->m_kmem_cache == NULL) {
2157 mptsas_log(mpt, CE_WARN, "creating kmem cache failed");
2158 return (FALSE);
2159 }
2160
2161 /*
2162 * create kmem cache for extra SGL frames if SGL cannot
2163 * be accomodated into main request frame.
2164 */
2165 (void) sprintf(buf, "mptsas%d_cache_frames", instance);
2166 mpt->m_cache_frames = kmem_cache_create(buf,
2167 sizeof (mptsas_cache_frames_t), 8,
2168 mptsas_cache_frames_constructor, mptsas_cache_frames_destructor,
2169 NULL, (void *)mpt, NULL, 0);
2170
2171 if (mpt->m_cache_frames == NULL) {
2172 mptsas_log(mpt, CE_WARN, "creating cache for frames failed");
2173 return (FALSE);
2174 }
2175
2176 return (TRUE);
2177 }
2178
2179 static void
2180 mptsas_cache_destroy(mptsas_t *mpt)
2181 {
2182 /* deallocate in reverse order */
2183 if (mpt->m_cache_frames) {
2184 kmem_cache_destroy(mpt->m_cache_frames);
2185 mpt->m_cache_frames = NULL;
2186 }
2187 if (mpt->m_kmem_cache) {
2188 kmem_cache_destroy(mpt->m_kmem_cache);
2189 mpt->m_kmem_cache = NULL;
2190 }
2191 }
2192
2193 static int
2194 mptsas_power(dev_info_t *dip, int component, int level)
2195 {
2196 #ifndef __lock_lint
2197 _NOTE(ARGUNUSED(component))
2198 #endif
2199 mptsas_t *mpt;
2200 int rval = DDI_SUCCESS;
2201 int polls = 0;
2202 uint32_t ioc_status;
2203
2204 if (scsi_hba_iport_unit_address(dip) != 0)
2205 return (DDI_SUCCESS);
2206
2207 mpt = ddi_get_soft_state(mptsas_state, ddi_get_instance(dip));
2208 if (mpt == NULL) {
2209 return (DDI_FAILURE);
2210 }
2211
2212 mutex_enter(&mpt->m_mutex);
2213
2214 /*
2215 * If the device is busy, don't lower its power level
2216 */
2217 if (mpt->m_busy && (mpt->m_power_level > level)) {
2218 mutex_exit(&mpt->m_mutex);
2219 return (DDI_FAILURE);
2220 }
2221 switch (level) {
2222 case PM_LEVEL_D0:
2223 NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
2224 MPTSAS_POWER_ON(mpt);
2225 /*
2226 * Wait up to 30 seconds for IOC to come out of reset.
2227 */
2228 while (((ioc_status = ddi_get32(mpt->m_datap,
2229 &mpt->m_reg->Doorbell)) &
2230 MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2231 if (polls++ > 3000) {
2232 break;
2233 }
2234 delay(drv_usectohz(10000));
2235 }
2236 /*
2237 * If IOC is not in operational state, try to hard reset it.
2238 */
2239 if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2240 MPI2_IOC_STATE_OPERATIONAL) {
2241 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2242 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2243 mptsas_log(mpt, CE_WARN,
2244 "mptsas_power: hard reset failed");
2245 mutex_exit(&mpt->m_mutex);
2246 return (DDI_FAILURE);
2247 }
2248 }
2249 mutex_enter(&mpt->m_intr_mutex);
2250 mpt->m_power_level = PM_LEVEL_D0;
2251 mutex_exit(&mpt->m_intr_mutex);
2252 break;
2253 case PM_LEVEL_D3:
2254 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2255 MPTSAS_POWER_OFF(mpt);
2256 break;
2257 default:
2258 mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2259 mpt->m_instance, level);
2260 rval = DDI_FAILURE;
2261 break;
2262 }
2263 mutex_exit(&mpt->m_mutex);
2264 return (rval);
2265 }
2266
2267 /*
2268 * Initialize configuration space and figure out which
2269 * chip and revison of the chip the mpt driver is using.
2270 */
2271 static int
2272 mptsas_config_space_init(mptsas_t *mpt)
2273 {
2274 NDBG0(("mptsas_config_space_init"));
2275
2276 if (mpt->m_config_handle != NULL)
2277 return (TRUE);
2278
2279 if (pci_config_setup(mpt->m_dip,
2280 &mpt->m_config_handle) != DDI_SUCCESS) {
2281 mptsas_log(mpt, CE_WARN, "cannot map configuration space.");
2282 return (FALSE);
2283 }
2284
2285 /*
2286 * This is a workaround for a XMITS ASIC bug which does not
2287 * drive the CBE upper bits.
2288 */
2289 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) &
2290 PCI_STAT_PERROR) {
2291 pci_config_put16(mpt->m_config_handle, PCI_CONF_STAT,
2292 PCI_STAT_PERROR);
2293 }
2294
2295 mptsas_setup_cmd_reg(mpt);
2296
2297 /*
2298 * Get the chip device id:
2299 */
2300 mpt->m_devid = pci_config_get16(mpt->m_config_handle, PCI_CONF_DEVID);
2301
2302 /*
2303 * Save the revision.
2304 */
2305 mpt->m_revid = pci_config_get8(mpt->m_config_handle, PCI_CONF_REVID);
2306
2307 /*
2308 * Save the SubSystem Vendor and Device IDs
2309 */
2310 mpt->m_svid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBVENID);
2311 mpt->m_ssid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBSYSID);
2312
2313 /*
2314 * Set the latency timer to 0x40 as specified by the upa -> pci
2315 * bridge chip design team. This may be done by the sparc pci
2316 * bus nexus driver, but the driver should make sure the latency
2317 * timer is correct for performance reasons.
2318 */
2319 pci_config_put8(mpt->m_config_handle, PCI_CONF_LATENCY_TIMER,
2320 MPTSAS_LATENCY_TIMER);
2321
2322 (void) mptsas_get_pci_cap(mpt);
2323 return (TRUE);
2324 }
2325
2326 static void
2327 mptsas_config_space_fini(mptsas_t *mpt)
2328 {
2329 if (mpt->m_config_handle != NULL) {
2330 mptsas_disable_bus_master(mpt);
2331 pci_config_teardown(&mpt->m_config_handle);
2332 mpt->m_config_handle = NULL;
2333 }
2334 }
2335
2336 static void
2337 mptsas_setup_cmd_reg(mptsas_t *mpt)
2338 {
2339 ushort_t cmdreg;
2340
2341 /*
2342 * Set the command register to the needed values.
2343 */
2344 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2345 cmdreg |= (PCI_COMM_ME | PCI_COMM_SERR_ENABLE |
2346 PCI_COMM_PARITY_DETECT | PCI_COMM_MAE);
2347 cmdreg &= ~PCI_COMM_IO;
2348 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2349 }
2350
2351 static void
2352 mptsas_disable_bus_master(mptsas_t *mpt)
2353 {
2354 ushort_t cmdreg;
2355
2356 /*
2357 * Clear the master enable bit in the PCI command register.
2358 * This prevents any bus mastering activity like DMA.
2359 */
2360 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2361 cmdreg &= ~PCI_COMM_ME;
2362 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2363 }
2364
2365 int
2366 mptsas_dma_alloc(mptsas_t *mpt, mptsas_dma_alloc_state_t *dma_statep)
2367 {
2368 ddi_dma_attr_t attrs;
2369
2370 attrs = mpt->m_io_dma_attr;
2371 attrs.dma_attr_sgllen = 1;
2372
2373 ASSERT(dma_statep != NULL);
2374
2375 if (mptsas_dma_addr_create(mpt, attrs, &dma_statep->handle,
2376 &dma_statep->accessp, &dma_statep->memp, dma_statep->size,
2377 &dma_statep->cookie) == FALSE) {
2378 return (DDI_FAILURE);
2379 }
2380
2381 return (DDI_SUCCESS);
2382 }
2383
2384 void
2385 mptsas_dma_free(mptsas_dma_alloc_state_t *dma_statep)
2386 {
2387 ASSERT(dma_statep != NULL);
2388 mptsas_dma_addr_destroy(&dma_statep->handle, &dma_statep->accessp);
2389 dma_statep->size = 0;
2390 }
2391
2392 int
2393 mptsas_do_dma(mptsas_t *mpt, uint32_t size, int var, int (*callback)())
2394 {
2395 ddi_dma_attr_t attrs;
2396 ddi_dma_handle_t dma_handle;
2397 caddr_t memp;
2398 ddi_acc_handle_t accessp;
2399 int rval;
2400
2401 ASSERT(mutex_owned(&mpt->m_mutex));
2402
2403 attrs = mpt->m_msg_dma_attr;
2404 attrs.dma_attr_sgllen = 1;
2405 attrs.dma_attr_granular = size;
2406
2407 if (mptsas_dma_addr_create(mpt, attrs, &dma_handle,
2408 &accessp, &memp, size, NULL) == FALSE) {
2409 return (DDI_FAILURE);
2410 }
2411
2412 rval = (*callback) (mpt, memp, var, accessp);
2413
2414 if ((mptsas_check_dma_handle(dma_handle) != DDI_SUCCESS) ||
2415 (mptsas_check_acc_handle(accessp) != DDI_SUCCESS)) {
2416 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
2417 rval = DDI_FAILURE;
2418 }
2419
2420 mptsas_dma_addr_destroy(&dma_handle, &accessp);
2421 return (rval);
2422
2423 }
2424
2425 static int
2426 mptsas_alloc_request_frames(mptsas_t *mpt)
2427 {
2428 ddi_dma_attr_t frame_dma_attrs;
2429 caddr_t memp;
2430 ddi_dma_cookie_t cookie;
2431 size_t mem_size;
2432
2433 /*
2434 * re-alloc when it has already alloced
2435 */
2436 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2437 &mpt->m_acc_req_frame_hdl);
2438
2439 /*
2440 * The size of the request frame pool is:
2441 * Number of Request Frames * Request Frame Size
2442 */
2443 mem_size = mpt->m_max_requests * mpt->m_req_frame_size;
2444
2445 /*
2446 * set the DMA attributes. System Request Message Frames must be
2447 * aligned on a 16-byte boundry.
2448 */
2449 frame_dma_attrs = mpt->m_msg_dma_attr;
2450 frame_dma_attrs.dma_attr_align = 16;
2451 frame_dma_attrs.dma_attr_sgllen = 1;
2452
2453 /*
2454 * allocate the request frame pool.
2455 */
2456 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2457 &mpt->m_dma_req_frame_hdl, &mpt->m_acc_req_frame_hdl, &memp,
2458 mem_size, &cookie) == FALSE) {
2459 return (DDI_FAILURE);
2460 }
2461
2462 /*
2463 * Store the request frame memory address. This chip uses this
2464 * address to dma to and from the driver's frame. The second
2465 * address is the address mpt uses to fill in the frame.
2466 */
2467 mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2468 mpt->m_req_frame = memp;
2469
2470 /*
2471 * Clear the request frame pool.
2472 */
2473 bzero(mpt->m_req_frame, mem_size);
2474
2475 return (DDI_SUCCESS);
2476 }
2477
2478 static int
2479 mptsas_alloc_reply_frames(mptsas_t *mpt)
2480 {
2481 ddi_dma_attr_t frame_dma_attrs;
2482 caddr_t memp;
2483 ddi_dma_cookie_t cookie;
2484 size_t mem_size;
2485
2486 /*
2487 * re-alloc when it has already alloced
2488 */
2489 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2490 &mpt->m_acc_reply_frame_hdl);
2491
2492 /*
2493 * The size of the reply frame pool is:
2494 * Number of Reply Frames * Reply Frame Size
2495 */
2496 mem_size = mpt->m_max_replies * mpt->m_reply_frame_size;
2497
2498 /*
2499 * set the DMA attributes. System Reply Message Frames must be
2500 * aligned on a 4-byte boundry. This is the default.
2501 */
2502 frame_dma_attrs = mpt->m_msg_dma_attr;
2503 frame_dma_attrs.dma_attr_sgllen = 1;
2504
2505 /*
2506 * allocate the reply frame pool
2507 */
2508 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2509 &mpt->m_dma_reply_frame_hdl, &mpt->m_acc_reply_frame_hdl, &memp,
2510 mem_size, &cookie) == FALSE) {
2511 return (DDI_FAILURE);
2512 }
2513
2514 /*
2515 * Store the reply frame memory address. This chip uses this
2516 * address to dma to and from the driver's frame. The second
2517 * address is the address mpt uses to process the frame.
2518 */
2519 mpt->m_reply_frame_dma_addr = cookie.dmac_laddress;
2520 mpt->m_reply_frame = memp;
2521
2522 /*
2523 * Clear the reply frame pool.
2524 */
2525 bzero(mpt->m_reply_frame, mem_size);
2526
2527 return (DDI_SUCCESS);
2528 }
2529
2530 static int
2531 mptsas_alloc_free_queue(mptsas_t *mpt)
2532 {
2533 ddi_dma_attr_t frame_dma_attrs;
2534 caddr_t memp;
2535 ddi_dma_cookie_t cookie;
2536 size_t mem_size;
2537
2538 /*
2539 * re-alloc when it has already alloced
2540 */
2541 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2542 &mpt->m_acc_free_queue_hdl);
2543
2544 /*
2545 * The reply free queue size is:
2546 * Reply Free Queue Depth * 4
2547 * The "4" is the size of one 32 bit address (low part of 64-bit
2548 * address)
2549 */
2550 mem_size = mpt->m_free_queue_depth * 4;
2551
2552 /*
2553 * set the DMA attributes The Reply Free Queue must be aligned on a
2554 * 16-byte boundry.
2555 */
2556 frame_dma_attrs = mpt->m_msg_dma_attr;
2557 frame_dma_attrs.dma_attr_align = 16;
2558 frame_dma_attrs.dma_attr_sgllen = 1;
2559
2560 /*
2561 * allocate the reply free queue
2562 */
2563 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2564 &mpt->m_dma_free_queue_hdl, &mpt->m_acc_free_queue_hdl, &memp,
2565 mem_size, &cookie) == FALSE) {
2566 return (DDI_FAILURE);
2567 }
2568
2569 /*
2570 * Store the reply free queue memory address. This chip uses this
2571 * address to read from the reply free queue. The second address
2572 * is the address mpt uses to manage the queue.
2573 */
2574 mpt->m_free_queue_dma_addr = cookie.dmac_laddress;
2575 mpt->m_free_queue = memp;
2576
2577 /*
2578 * Clear the reply free queue memory.
2579 */
2580 bzero(mpt->m_free_queue, mem_size);
2581
2582 return (DDI_SUCCESS);
2583 }
2584
2585 static int
2586 mptsas_alloc_post_queue(mptsas_t *mpt)
2587 {
2588 ddi_dma_attr_t frame_dma_attrs;
2589 caddr_t memp;
2590 ddi_dma_cookie_t cookie;
2591 size_t mem_size;
2592
2593 /*
2594 * re-alloc when it has already alloced
2595 */
2596 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2597 &mpt->m_acc_post_queue_hdl);
2598
2599 /*
2600 * The reply descriptor post queue size is:
2601 * Reply Descriptor Post Queue Depth * 8
2602 * The "8" is the size of each descriptor (8 bytes or 64 bits).
2603 */
2604 mem_size = mpt->m_post_queue_depth * 8;
2605
2606 /*
2607 * set the DMA attributes. The Reply Descriptor Post Queue must be
2608 * aligned on a 16-byte boundry.
2609 */
2610 frame_dma_attrs = mpt->m_msg_dma_attr;
2611 frame_dma_attrs.dma_attr_align = 16;
2612 frame_dma_attrs.dma_attr_sgllen = 1;
2613
2614 /*
2615 * allocate the reply post queue
2616 */
2617 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2618 &mpt->m_dma_post_queue_hdl, &mpt->m_acc_post_queue_hdl, &memp,
2619 mem_size, &cookie) == FALSE) {
2620 return (DDI_FAILURE);
2621 }
2622
2623 /*
2624 * Store the reply descriptor post queue memory address. This chip
2625 * uses this address to write to the reply descriptor post queue. The
2626 * second address is the address mpt uses to manage the queue.
2627 */
2628 mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
2629 mpt->m_post_queue = memp;
2630
2631 /*
2632 * Clear the reply post queue memory.
2633 */
2634 bzero(mpt->m_post_queue, mem_size);
2635
2636 return (DDI_SUCCESS);
2637 }
2638
2639 static void
2640 mptsas_alloc_reply_args(mptsas_t *mpt)
2641 {
2642 if (mpt->m_replyh_args != NULL) {
2643 kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
2644 * mpt->m_max_replies);
2645 mpt->m_replyh_args = NULL;
2646 }
2647 mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
2648 mpt->m_max_replies, KM_SLEEP);
2649 }
2650
2651 static int
2652 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2653 {
2654 mptsas_cache_frames_t *frames = NULL;
2655 if (cmd->cmd_extra_frames == NULL) {
2656 frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
2657 if (frames == NULL) {
2658 return (DDI_FAILURE);
2659 }
2660 cmd->cmd_extra_frames = frames;
2661 }
2662 return (DDI_SUCCESS);
2663 }
2664
2665 static void
2666 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2667 {
2668 if (cmd->cmd_extra_frames) {
2669 kmem_cache_free(mpt->m_cache_frames,
2670 (void *)cmd->cmd_extra_frames);
2671 cmd->cmd_extra_frames = NULL;
2672 }
2673 }
2674
2675 static void
2676 mptsas_cfg_fini(mptsas_t *mpt)
2677 {
2678 NDBG0(("mptsas_cfg_fini"));
2679 ddi_regs_map_free(&mpt->m_datap);
2680 }
2681
2682 static void
2683 mptsas_hba_fini(mptsas_t *mpt)
2684 {
2685 NDBG0(("mptsas_hba_fini"));
2686
2687 /*
2688 * Free up any allocated memory
2689 */
2690 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2691 &mpt->m_acc_req_frame_hdl);
2692
2693 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2694 &mpt->m_acc_reply_frame_hdl);
2695
2696 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2697 &mpt->m_acc_free_queue_hdl);
2698
2699 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2700 &mpt->m_acc_post_queue_hdl);
2701
2702 if (mpt->m_replyh_args != NULL) {
2703 kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
2704 * mpt->m_max_replies);
2705 }
2706 }
2707
2708 static int
2709 mptsas_name_child(dev_info_t *lun_dip, char *name, int len)
2710 {
2711 int lun = 0;
2712 char *sas_wwn = NULL;
2713 int phynum = -1;
2714 int reallen = 0;
2715
2716 /* Get the target num */
2717 lun = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
2718 LUN_PROP, 0);
2719
2720 if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
2721 DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
2722 /*
2723 * Stick in the address of form "pPHY,LUN"
2724 */
2725 reallen = snprintf(name, len, "p%x,%x", phynum, lun);
2726 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
2727 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn)
2728 == DDI_PROP_SUCCESS) {
2729 /*
2730 * Stick in the address of the form "wWWN,LUN"
2731 */
2732 reallen = snprintf(name, len, "%s,%x", sas_wwn, lun);
2733 ddi_prop_free(sas_wwn);
2734 } else {
2735 return (DDI_FAILURE);
2736 }
2737
2738 ASSERT(reallen < len);
2739 if (reallen >= len) {
2740 mptsas_log(0, CE_WARN, "!mptsas_get_name: name parameter "
2741 "length too small, it needs to be %d bytes", reallen + 1);
2742 }
2743 return (DDI_SUCCESS);
2744 }
2745
2746 /*
2747 * tran_tgt_init(9E) - target device instance initialization
2748 */
2749 static int
2750 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2751 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2752 {
2753 #ifndef __lock_lint
2754 _NOTE(ARGUNUSED(hba_tran))
2755 #endif
2756
2757 /*
2758 * At this point, the scsi_device structure already exists
2759 * and has been initialized.
2760 *
2761 * Use this function to allocate target-private data structures,
2762 * if needed by this HBA. Add revised flow-control and queue
2763 * properties for child here, if desired and if you can tell they
2764 * support tagged queueing by now.
2765 */
2766 mptsas_t *mpt;
2767 int lun = sd->sd_address.a_lun;
2768 mdi_pathinfo_t *pip = NULL;
2769 mptsas_tgt_private_t *tgt_private = NULL;
2770 mptsas_target_t *ptgt = NULL;
2771 char *psas_wwn = NULL;
2772 int phymask = 0;
2773 uint64_t sas_wwn = 0;
2774 mpt = SDEV2MPT(sd);
2775
2776 ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
2777
2778 NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
2779 (void *)hba_dip, (void *)tgt_dip, lun));
2780
2781 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
2782 (void) ndi_merge_node(tgt_dip, mptsas_name_child);
2783 ddi_set_name_addr(tgt_dip, NULL);
2784 return (DDI_FAILURE);
2785 }
2786 /*
2787 * phymask is 0 means the virtual port for RAID
2788 */
2789 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
2790 "phymask", 0);
2791 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2792 if ((pip = (void *)(sd->sd_private)) == NULL) {
2793 /*
2794 * Very bad news if this occurs. Somehow scsi_vhci has
2795 * lost the pathinfo node for this target.
2796 */
2797 return (DDI_NOT_WELL_FORMED);
2798 }
2799
2800 if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
2801 DDI_PROP_SUCCESS) {
2802 mptsas_log(mpt, CE_WARN, "Get lun property failed\n");
2803 return (DDI_FAILURE);
2804 }
2805
2806 if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
2807 &psas_wwn) == MDI_SUCCESS) {
2808 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2809 sas_wwn = 0;
2810 }
2811 (void) mdi_prop_free(psas_wwn);
2812 }
2813 } else {
2814 lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
2815 DDI_PROP_DONTPASS, LUN_PROP, 0);
2816 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
2817 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
2818 DDI_PROP_SUCCESS) {
2819 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2820 sas_wwn = 0;
2821 }
2822 ddi_prop_free(psas_wwn);
2823 } else {
2824 sas_wwn = 0;
2825 }
2826 }
2827 ASSERT((sas_wwn != 0) || (phymask != 0));
2828 mutex_enter(&mpt->m_mutex);
2829 ptgt = mptsas_hash_search(&mpt->m_active->m_tgttbl, sas_wwn, phymask);
2830 mutex_exit(&mpt->m_mutex);
2831 if (ptgt == NULL) {
2832 mptsas_log(mpt, CE_WARN, "!tgt_init: target doesn't exist or "
2833 "gone already! phymask:%x, saswwn %"PRIx64, phymask,
2834 sas_wwn);
2835 return (DDI_FAILURE);
2836 }
2837 if (hba_tran->tran_tgt_private == NULL) {
2838 tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
2839 KM_SLEEP);
2840 tgt_private->t_lun = lun;
2841 tgt_private->t_private = ptgt;
2842 hba_tran->tran_tgt_private = tgt_private;
2843 }
2844
2845 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2846 return (DDI_SUCCESS);
2847 }
2848 mutex_enter(&mpt->m_mutex);
2849
2850 if (ptgt->m_deviceinfo &
2851 (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
2852 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
2853 uchar_t *inq89 = NULL;
2854 int inq89_len = 0x238;
2855 int reallen = 0;
2856 int rval = 0;
2857 struct sata_id *sid = NULL;
2858 char model[SATA_ID_MODEL_LEN + 1];
2859 char fw[SATA_ID_FW_LEN + 1];
2860 char *vid, *pid;
2861 int i;
2862
2863 mutex_exit(&mpt->m_mutex);
2864 /*
2865 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
2866 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
2867 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
2868 */
2869 inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
2870 rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
2871 inq89, inq89_len, &reallen, 1);
2872
2873 if (rval != 0) {
2874 if (inq89 != NULL) {
2875 kmem_free(inq89, inq89_len);
2876 }
2877
2878 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
2879 "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
2880 return (DDI_SUCCESS);
2881 }
2882 sid = (void *)(&inq89[60]);
2883
2884 swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
2885 swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
2886
2887 model[SATA_ID_MODEL_LEN] = 0;
2888 fw[SATA_ID_FW_LEN] = 0;
2889
2890 /*
2891 * split model into into vid/pid
2892 */
2893 for (i = 0, pid = model; i < SATA_ID_MODEL_LEN; i++, pid++)
2894 if ((*pid == ' ') || (*pid == '\t'))
2895 break;
2896 if (i < SATA_ID_MODEL_LEN) {
2897 vid = model;
2898 /*
2899 * terminate vid, establish pid
2900 */
2901 *pid++ = 0;
2902 } else {
2903 /*
2904 * vid will stay "ATA ", the rule is same
2905 * as sata framework implementation.
2906 */
2907 vid = NULL;
2908 /*
2909 * model is all pid
2910 */
2911 pid = model;
2912 }
2913
2914 /*
2915 * override SCSA "inquiry-*" properties
2916 */
2917 if (vid)
2918 (void) scsi_device_prop_update_inqstring(sd,
2919 INQUIRY_VENDOR_ID, vid, strlen(vid));
2920 if (pid)
2921 (void) scsi_device_prop_update_inqstring(sd,
2922 INQUIRY_PRODUCT_ID, pid, strlen(pid));
2923 (void) scsi_device_prop_update_inqstring(sd,
2924 INQUIRY_REVISION_ID, fw, strlen(fw));
2925
2926 if (inq89 != NULL) {
2927 kmem_free(inq89, inq89_len);
2928 }
2929 } else {
2930 mutex_exit(&mpt->m_mutex);
2931 }
2932
2933 return (DDI_SUCCESS);
2934 }
2935 /*
2936 * tran_tgt_free(9E) - target device instance deallocation
2937 */
2938 static void
2939 mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2940 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2941 {
2942 #ifndef __lock_lint
2943 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran, sd))
2944 #endif
2945
2946 mptsas_tgt_private_t *tgt_private = hba_tran->tran_tgt_private;
2947
2948 if (tgt_private != NULL) {
2949 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
2950 hba_tran->tran_tgt_private = NULL;
2951 }
2952 }
2953
2954 /*
2955 * scsi_pkt handling
2956 *
2957 * Visible to the external world via the transport structure.
2958 */
2959
2960 /*
2961 * Notes:
2962 * - transport the command to the addressed SCSI target/lun device
2963 * - normal operation is to schedule the command to be transported,
2964 * and return TRAN_ACCEPT if this is successful.
2965 * - if NO_INTR, tran_start must poll device for command completion
2966 */
2967 static int
2968 mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
2969 {
2970 #ifndef __lock_lint
2971 _NOTE(ARGUNUSED(ap))
2972 #endif
2973 mptsas_t *mpt = PKT2MPT(pkt);
2974 mptsas_cmd_t *cmd = PKT2CMD(pkt);
2975 int rval;
2976 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
2977
2978 NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt));
2979 ASSERT(ptgt);
2980 if (ptgt == NULL)
2981 return (TRAN_FATAL_ERROR);
2982
2983 /*
2984 * prepare the pkt before taking mutex.
2985 */
2986 rval = mptsas_prepare_pkt(cmd);
2987 if (rval != TRAN_ACCEPT) {
2988 return (rval);
2989 }
2990
2991 /*
2992 * Send the command to target/lun, however your HBA requires it.
2993 * If busy, return TRAN_BUSY; if there's some other formatting error
2994 * in the packet, return TRAN_BADPKT; otherwise, fall through to the
2995 * return of TRAN_ACCEPT.
2996 *
2997 * Remember that access to shared resources, including the mptsas_t
2998 * data structure and the HBA hardware registers, must be protected
2999 * with mutexes, here and everywhere.
3000 *
3001 * Also remember that at interrupt time, you'll get an argument
3002 * to the interrupt handler which is a pointer to your mptsas_t
3003 * structure; you'll have to remember which commands are outstanding
3004 * and which scsi_pkt is the currently-running command so the
3005 * interrupt handler can refer to the pkt to set completion
3006 * status, call the target driver back through pkt_comp, etc.
3007 */
3008
3009 mutex_enter(&ptgt->m_tgt_intr_mutex);
3010 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3011 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3012 /*
3013 * commands should be allowed to retry by
3014 * returning TRAN_BUSY to stall the I/O's
3015 * which come from scsi_vhci since the device/
3016 * path is in unstable state now.
3017 */
3018 mutex_exit(&ptgt->m_tgt_intr_mutex);
3019 return (TRAN_BUSY);
3020 } else {
3021 /*
3022 * The device is offline, just fail the
3023 * command by returning TRAN_FATAL_ERROR.
3024 */
3025 mutex_exit(&ptgt->m_tgt_intr_mutex);
3026 return (TRAN_FATAL_ERROR);
3027 }
3028 }
3029 mutex_exit(&ptgt->m_tgt_intr_mutex);
3030 rval = mptsas_accept_pkt(mpt, cmd);
3031
3032 return (rval);
3033 }
3034
3035 static int
3036 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3037 {
3038 int rval = TRAN_ACCEPT;
3039 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3040
3041 NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3042
3043 if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3044 rval = mptsas_prepare_pkt(cmd);
3045 if (rval != TRAN_ACCEPT) {
3046 cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3047 return (rval);
3048 }
3049 }
3050
3051 /*
3052 * reset the throttle if we were draining
3053 */
3054 mutex_enter(&ptgt->m_tgt_intr_mutex);
3055 if ((ptgt->m_t_ncmds == 0) &&
3056 (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3057 NDBG23(("reset throttle"));
3058 ASSERT(ptgt->m_reset_delay == 0);
3059 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3060 }
3061
3062 /*
3063 * If device handle has already been invalidated, just
3064 * fail the command. In theory, command from scsi_vhci
3065 * client is impossible send down command with invalid
3066 * devhdl since devhdl is set after path offline, target
3067 * driver is not suppose to select a offlined path.
3068 */
3069 if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3070 NDBG20(("rejecting command, it might because invalid devhdl "
3071 "request."));
3072 mutex_exit(&ptgt->m_tgt_intr_mutex);
3073 mutex_enter(&mpt->m_mutex);
3074 /*
3075 * If HBA is being reset, the DevHandles are being
3076 * re-initialized, which means that they could be invalid
3077 * even if the target is still attached. Check if being reset
3078 * and if DevHandle is being re-initialized. If this is the
3079 * case, return BUSY so the I/O can be retried later.
3080 */
3081 if (mpt->m_in_reset) {
3082 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
3083 STAT_BUS_RESET);
3084 if (cmd->cmd_flags & CFLAG_TXQ) {
3085 mptsas_doneq_add(mpt, cmd);
3086 mptsas_doneq_empty(mpt);
3087 mutex_exit(&mpt->m_mutex);
3088 return (rval);
3089 } else {
3090 mutex_exit(&mpt->m_mutex);
3091 return (TRAN_BUSY);
3092 }
3093 }
3094 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3095 if (cmd->cmd_flags & CFLAG_TXQ) {
3096 mptsas_doneq_add(mpt, cmd);
3097 mptsas_doneq_empty(mpt);
3098 mutex_exit(&mpt->m_mutex);
3099 return (rval);
3100 } else {
3101 mutex_exit(&mpt->m_mutex);
3102 return (TRAN_FATAL_ERROR);
3103 }
3104 }
3105 mutex_exit(&ptgt->m_tgt_intr_mutex);
3106 /*
3107 * The first case is the normal case. mpt gets a command from the
3108 * target driver and starts it.
3109 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3110 * commands is m_max_requests - 2.
3111 */
3112 mutex_enter(&ptgt->m_tgt_intr_mutex);
3113 if ((ptgt->m_t_throttle > HOLD_THROTTLE) &&
3114 (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3115 (ptgt->m_reset_delay == 0) &&
3116 (ptgt->m_t_nwait == 0) &&
3117 ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3118 mutex_exit(&ptgt->m_tgt_intr_mutex);
3119 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3120 (void) mptsas_start_cmd0(mpt, cmd);
3121 } else {
3122 mutex_enter(&mpt->m_mutex);
3123 mptsas_waitq_add(mpt, cmd);
3124 mutex_exit(&mpt->m_mutex);
3125 }
3126 } else {
3127 /*
3128 * Add this pkt to the work queue
3129 */
3130 mutex_exit(&ptgt->m_tgt_intr_mutex);
3131 mutex_enter(&mpt->m_mutex);
3132 mptsas_waitq_add(mpt, cmd);
3133
3134 if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3135 (void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3136
3137 /*
3138 * Only flush the doneq if this is not a TM
3139 * cmd. For TM cmds the flushing of the
3140 * doneq will be done in those routines.
3141 */
3142 if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
3143 mptsas_doneq_empty(mpt);
3144 }
3145 }
3146 mutex_exit(&mpt->m_mutex);
3147 }
3148 return (rval);
3149 }
3150
3151 int
3152 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3153 {
3154 mptsas_slots_t *slots;
3155 int slot;
3156 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3157 mptsas_slot_free_e_t *pe;
3158 int qn, qn_first;
3159
3160 slots = mpt->m_active;
3161
3162 /*
3163 * Account for reserved TM request slot and reserved SMID of 0.
3164 */
3165 ASSERT(slots->m_n_slots == (mpt->m_max_requests - 2));
3166
3167 qn = qn_first = CPU->cpu_seqid & (mpt->m_slot_freeq_pair_n - 1);
3168
3169 qpair_retry:
3170 ASSERT(qn < mpt->m_slot_freeq_pair_n);
3171 mutex_enter(&mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_mutex);
3172 pe = list_head(&mpt->m_slot_freeq_pairp[qn].m_slot_allocq.
3173 s.m_fq_list);
3174 if (!pe) { /* switch the allocq and releq */
3175 mutex_enter(&mpt->m_slot_freeq_pairp[qn].m_slot_releq.
3176 s.m_fq_mutex);
3177 if (mpt->m_slot_freeq_pairp[qn].m_slot_releq.s.m_fq_n) {
3178 mpt->m_slot_freeq_pairp[qn].
3179 m_slot_allocq.s.m_fq_n =
3180 mpt->m_slot_freeq_pairp[qn].
3181 m_slot_releq.s.m_fq_n;
3182 mpt->m_slot_freeq_pairp[qn].
3183 m_slot_allocq.s.m_fq_list.list_head.list_next =
3184 mpt->m_slot_freeq_pairp[qn].
3185 m_slot_releq.s.m_fq_list.list_head.list_next;
3186 mpt->m_slot_freeq_pairp[qn].
3187 m_slot_allocq.s.m_fq_list.list_head.list_prev =
3188 mpt->m_slot_freeq_pairp[qn].
3189 m_slot_releq.s.m_fq_list.list_head.list_prev;
3190 mpt->m_slot_freeq_pairp[qn].
3191 m_slot_releq.s.m_fq_list.list_head.list_prev->
3192 list_next =
3193 &mpt->m_slot_freeq_pairp[qn].
3194 m_slot_allocq.s.m_fq_list.list_head;
3195 mpt->m_slot_freeq_pairp[qn].
3196 m_slot_releq.s.m_fq_list.list_head.list_next->
3197 list_prev =
3198 &mpt->m_slot_freeq_pairp[qn].
3199 m_slot_allocq.s.m_fq_list.list_head;
3200
3201 mpt->m_slot_freeq_pairp[qn].
3202 m_slot_releq.s.m_fq_list.list_head.list_next =
3203 mpt->m_slot_freeq_pairp[qn].
3204 m_slot_releq.s.m_fq_list.list_head.list_prev =
3205 &mpt->m_slot_freeq_pairp[qn].
3206 m_slot_releq.s.m_fq_list.list_head;
3207 mpt->m_slot_freeq_pairp[qn].
3208 m_slot_releq.s.m_fq_n = 0;
3209 } else {
3210 mutex_exit(&mpt->m_slot_freeq_pairp[qn].
3211 m_slot_releq.s.m_fq_mutex);
3212 mutex_exit(&mpt->m_slot_freeq_pairp[qn].
3213 m_slot_allocq.s.m_fq_mutex);
3214 qn = (qn + 1) & (mpt->m_slot_freeq_pair_n - 1);
3215 if (qn == qn_first)
3216 return (FALSE);
3217 else
3218 goto qpair_retry;
3219 }
3220 mutex_exit(&mpt->m_slot_freeq_pairp[qn].
3221 m_slot_releq.s.m_fq_mutex);
3222 pe = list_head(&mpt->m_slot_freeq_pairp[qn].
3223 m_slot_allocq.s.m_fq_list);
3224 ASSERT(pe);
3225 }
3226 list_remove(&mpt->m_slot_freeq_pairp[qn].
3227 m_slot_allocq.s.m_fq_list, pe);
3228 slot = pe->slot;
3229 /*
3230 * Make sure SMID is not using reserved value of 0
3231 * and the TM request slot.
3232 */
3233 ASSERT((slot > 0) && (slot <= slots->m_n_slots) &&
3234 mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_n > 0);
3235 cmd->cmd_slot = slot;
3236 mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_n--;
3237 ASSERT(mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_n >= 0);
3238
3239 mutex_exit(&mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_mutex);
3240 /*
3241 * only increment per target ncmds if this is not a
3242 * command that has no target associated with it (i.e. a
3243 * event acknoledgment)
3244 */
3245 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3246 mutex_enter(&ptgt->m_tgt_intr_mutex);
3247 ptgt->m_t_ncmds++;
3248 mutex_exit(&ptgt->m_tgt_intr_mutex);
3249 }
3250 cmd->cmd_active_timeout = cmd->cmd_pkt->pkt_time;
3251
3252 /*
3253 * If initial timout is less than or equal to one tick, bump
3254 * the timeout by a tick so that command doesn't timeout before
3255 * its allotted time.
3256 */
3257 if (cmd->cmd_active_timeout <= mptsas_scsi_watchdog_tick) {
3258 cmd->cmd_active_timeout += mptsas_scsi_watchdog_tick;
3259 }
3260 return (TRUE);
3261 }
3262
3263 /*
3264 * prepare the pkt:
3265 * the pkt may have been resubmitted or just reused so
3266 * initialize some fields and do some checks.
3267 */
3268 static int
3269 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3270 {
3271 struct scsi_pkt *pkt = CMD2PKT(cmd);
3272
3273 NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3274
3275 /*
3276 * Reinitialize some fields that need it; the packet may
3277 * have been resubmitted
3278 */
3279 pkt->pkt_reason = CMD_CMPLT;
3280 pkt->pkt_state = 0;
3281 pkt->pkt_statistics = 0;
3282 pkt->pkt_resid = 0;
3283 cmd->cmd_age = 0;
3284 cmd->cmd_pkt_flags = pkt->pkt_flags;
3285
3286 /*
3287 * zero status byte.
3288 */
3289 *(pkt->pkt_scbp) = 0;
3290
3291 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3292 pkt->pkt_resid = cmd->cmd_dmacount;
3293
3294 /*
3295 * consistent packets need to be sync'ed first
3296 * (only for data going out)
3297 */
3298 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
3299 (cmd->cmd_flags & CFLAG_DMASEND)) {
3300 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
3301 DDI_DMA_SYNC_FORDEV);
3302 }
3303 }
3304
3305 cmd->cmd_flags =
3306 (cmd->cmd_flags & ~(CFLAG_TRANFLAG)) |
3307 CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
3308
3309 return (TRAN_ACCEPT);
3310 }
3311
3312 /*
3313 * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3314 *
3315 * One of three possibilities:
3316 * - allocate scsi_pkt
3317 * - allocate scsi_pkt and DMA resources
3318 * - allocate DMA resources to an already-allocated pkt
3319 */
3320 static struct scsi_pkt *
3321 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3322 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3323 int (*callback)(), caddr_t arg)
3324 {
3325 mptsas_cmd_t *cmd, *new_cmd;
3326 mptsas_t *mpt = ADDR2MPT(ap);
3327 int failure = 1;
3328 #ifndef __sparc
3329 uint_t oldcookiec;
3330 #endif /* __sparc */
3331 mptsas_target_t *ptgt = NULL;
3332 int rval;
3333 mptsas_tgt_private_t *tgt_private;
3334 int kf;
3335
3336 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3337
3338 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3339 tran_tgt_private;
3340 ASSERT(tgt_private != NULL);
3341 if (tgt_private == NULL) {
3342 return (NULL);
3343 }
3344 ptgt = tgt_private->t_private;
3345 ASSERT(ptgt != NULL);
3346 if (ptgt == NULL)
3347 return (NULL);
3348 ap->a_target = ptgt->m_devhdl;
3349 ap->a_lun = tgt_private->t_lun;
3350
3351 ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3352 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3353 statuslen *= 100; tgtlen *= 4;
3354 #endif
3355 NDBG3(("mptsas_scsi_init_pkt:\n"
3356 "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3357 ap->a_target, (void *)pkt, (void *)bp,
3358 cmdlen, statuslen, tgtlen, flags));
3359
3360 /*
3361 * Allocate the new packet.
3362 */
3363 if (pkt == NULL) {
3364 ddi_dma_handle_t save_dma_handle;
3365 ddi_dma_handle_t save_arq_dma_handle;
3366 struct buf *save_arq_bp;
3367 ddi_dma_cookie_t save_arqcookie;
3368 #ifdef __sparc
3369 mptti_t *save_sg;
3370 #endif /* __sparc */
3371
3372 cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3373
3374 if (cmd) {
3375 save_dma_handle = cmd->cmd_dmahandle;
3376 save_arq_dma_handle = cmd->cmd_arqhandle;
3377 save_arq_bp = cmd->cmd_arq_buf;
3378 save_arqcookie = cmd->cmd_arqcookie;
3379 #ifdef __sparc
3380 save_sg = cmd->cmd_sg;
3381 #endif /* __sparc */
3382 bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3383 cmd->cmd_dmahandle = save_dma_handle;
3384 cmd->cmd_arqhandle = save_arq_dma_handle;
3385 cmd->cmd_arq_buf = save_arq_bp;
3386 cmd->cmd_arqcookie = save_arqcookie;
3387 #ifdef __sparc
3388 cmd->cmd_sg = save_sg;
3389 #endif /* __sparc */
3390 pkt = (void *)((uchar_t *)cmd +
3391 sizeof (struct mptsas_cmd));
3392 pkt->pkt_ha_private = (opaque_t)cmd;
3393 pkt->pkt_address = *ap;
3394 pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3395 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3396 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3397 cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3398 cmd->cmd_cdblen = (uchar_t)cmdlen;
3399 cmd->cmd_scblen = statuslen;
3400 cmd->cmd_rqslen = SENSE_LENGTH;
3401 cmd->cmd_tgt_addr = ptgt;
3402 failure = 0;
3403 }
3404
3405 if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
3406 (tgtlen > PKT_PRIV_LEN) ||
3407 (statuslen > EXTCMDS_STATUS_SIZE)) {
3408 if (failure == 0) {
3409 /*
3410 * if extern alloc fails, all will be
3411 * deallocated, including cmd
3412 */
3413 failure = mptsas_pkt_alloc_extern(mpt, cmd,
3414 cmdlen, tgtlen, statuslen, kf);
3415 }
3416 if (failure) {
3417 /*
3418 * if extern allocation fails, it will
3419 * deallocate the new pkt as well
3420 */
3421 return (NULL);
3422 }
3423 }
3424 new_cmd = cmd;
3425
3426 } else {
3427 cmd = PKT2CMD(pkt);
3428 new_cmd = NULL;
3429 }
3430
3431
3432 #ifndef __sparc
3433 /* grab cmd->cmd_cookiec here as oldcookiec */
3434
3435 oldcookiec = cmd->cmd_cookiec;
3436 #endif /* __sparc */
3437
3438 /*
3439 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3440 * greater than 0 and we'll need to grab the next dma window
3441 */
3442 /*
3443 * SLM-not doing extra command frame right now; may add later
3444 */
3445
3446 if (cmd->cmd_nwin > 0) {
3447
3448 /*
3449 * Make sure we havn't gone past the the total number
3450 * of windows
3451 */
3452 if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3453 return (NULL);
3454 }
3455 if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3456 &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
3457 &cmd->cmd_cookie, &cmd->cmd_cookiec) == DDI_FAILURE) {
3458 return (NULL);
3459 }
3460 goto get_dma_cookies;
3461 }
3462
3463
3464 if (flags & PKT_XARQ) {
3465 cmd->cmd_flags |= CFLAG_XARQ;
3466 }
3467
3468 /*
3469 * DMA resource allocation. This version assumes your
3470 * HBA has some sort of bus-mastering or onboard DMA capability, with a
3471 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
3472 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
3473 */
3474 if (bp && (bp->b_bcount != 0) &&
3475 (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
3476
3477 int cnt, dma_flags;
3478 mptti_t *dmap; /* ptr to the S/G list */
3479
3480 /*
3481 * Set up DMA memory and position to the next DMA segment.
3482 */
3483 ASSERT(cmd->cmd_dmahandle != NULL);
3484
3485 if (bp->b_flags & B_READ) {
3486 dma_flags = DDI_DMA_READ;
3487 cmd->cmd_flags &= ~CFLAG_DMASEND;
3488 } else {
3489 dma_flags = DDI_DMA_WRITE;
3490 cmd->cmd_flags |= CFLAG_DMASEND;
3491 }
3492 if (flags & PKT_CONSISTENT) {
3493 cmd->cmd_flags |= CFLAG_CMDIOPB;
3494 dma_flags |= DDI_DMA_CONSISTENT;
3495 }
3496
3497 if (flags & PKT_DMA_PARTIAL) {
3498 dma_flags |= DDI_DMA_PARTIAL;
3499 }
3500
3501 /*
3502 * workaround for byte hole issue on psycho and
3503 * schizo pre 2.1
3504 */
3505 if ((bp->b_flags & B_READ) && ((bp->b_flags &
3506 (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
3507 ((uintptr_t)bp->b_un.b_addr & 0x7)) {
3508 dma_flags |= DDI_DMA_CONSISTENT;
3509 }
3510
3511 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
3512 dma_flags, callback, arg,
3513 &cmd->cmd_cookie, &cmd->cmd_cookiec);
3514 if (rval == DDI_DMA_PARTIAL_MAP) {
3515 (void) ddi_dma_numwin(cmd->cmd_dmahandle,
3516 &cmd->cmd_nwin);
3517 cmd->cmd_winindex = 0;
3518 (void) ddi_dma_getwin(cmd->cmd_dmahandle,
3519 cmd->cmd_winindex, &cmd->cmd_dma_offset,
3520 &cmd->cmd_dma_len, &cmd->cmd_cookie,
3521 &cmd->cmd_cookiec);
3522 } else if (rval && (rval != DDI_DMA_MAPPED)) {
3523 switch (rval) {
3524 case DDI_DMA_NORESOURCES:
3525 bioerror(bp, 0);
3526 break;
3527 case DDI_DMA_BADATTR:
3528 case DDI_DMA_NOMAPPING:
3529 bioerror(bp, EFAULT);
3530 break;
3531 case DDI_DMA_TOOBIG:
3532 default:
3533 bioerror(bp, EINVAL);
3534 break;
3535 }
3536 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3537 if (new_cmd) {
3538 mptsas_scsi_destroy_pkt(ap, pkt);
3539 }
3540 return ((struct scsi_pkt *)NULL);
3541 }
3542
3543 get_dma_cookies:
3544 cmd->cmd_flags |= CFLAG_DMAVALID;
3545 ASSERT(cmd->cmd_cookiec > 0);
3546
3547 if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
3548 mptsas_log(mpt, CE_NOTE, "large cookiec received %d\n",
3549 cmd->cmd_cookiec);
3550 bioerror(bp, EINVAL);
3551 if (new_cmd) {
3552 mptsas_scsi_destroy_pkt(ap, pkt);
3553 }
3554 return ((struct scsi_pkt *)NULL);
3555 }
3556
3557 /*
3558 * Allocate extra SGL buffer if needed.
3559 */
3560 if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
3561 (cmd->cmd_extra_frames == NULL)) {
3562 if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
3563 DDI_FAILURE) {
3564 mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
3565 "failed");
3566 bioerror(bp, ENOMEM);
3567 if (new_cmd) {
3568 mptsas_scsi_destroy_pkt(ap, pkt);
3569 }
3570 return ((struct scsi_pkt *)NULL);
3571 }
3572 }
3573
3574 /*
3575 * Always use scatter-gather transfer
3576 * Use the loop below to store physical addresses of
3577 * DMA segments, from the DMA cookies, into your HBA's
3578 * scatter-gather list.
3579 * We need to ensure we have enough kmem alloc'd
3580 * for the sg entries since we are no longer using an
3581 * array inside mptsas_cmd_t.
3582 *
3583 * We check cmd->cmd_cookiec against oldcookiec so
3584 * the scatter-gather list is correctly allocated
3585 */
3586 #ifndef __sparc
3587 if (oldcookiec != cmd->cmd_cookiec) {
3588 if (cmd->cmd_sg != (mptti_t *)NULL) {
3589 kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
3590 oldcookiec);
3591 cmd->cmd_sg = NULL;
3592 }
3593 }
3594
3595 if (cmd->cmd_sg == (mptti_t *)NULL) {
3596 cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3597 cmd->cmd_cookiec), kf);
3598
3599 if (cmd->cmd_sg == (mptti_t *)NULL) {
3600 mptsas_log(mpt, CE_WARN,
3601 "unable to kmem_alloc enough memory "
3602 "for scatter/gather list");
3603 /*
3604 * if we have an ENOMEM condition we need to behave
3605 * the same way as the rest of this routine
3606 */
3607
3608 bioerror(bp, ENOMEM);
3609 if (new_cmd) {
3610 mptsas_scsi_destroy_pkt(ap, pkt);
3611 }
3612 return ((struct scsi_pkt *)NULL);
3613 }
3614 }
3615 #endif /* __sparc */
3616 dmap = cmd->cmd_sg;
3617
3618 ASSERT(cmd->cmd_cookie.dmac_size != 0);
3619
3620 /*
3621 * store the first segment into the S/G list
3622 */
3623 dmap->count = cmd->cmd_cookie.dmac_size;
3624 dmap->addr.address64.Low = (uint32_t)
3625 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3626 dmap->addr.address64.High = (uint32_t)
3627 (cmd->cmd_cookie.dmac_laddress >> 32);
3628
3629 /*
3630 * dmacount counts the size of the dma for this window
3631 * (if partial dma is being used). totaldmacount
3632 * keeps track of the total amount of dma we have
3633 * transferred for all the windows (needed to calculate
3634 * the resid value below).
3635 */
3636 cmd->cmd_dmacount = cmd->cmd_cookie.dmac_size;
3637 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3638
3639 /*
3640 * We already stored the first DMA scatter gather segment,
3641 * start at 1 if we need to store more.
3642 */
3643 for (cnt = 1; cnt < cmd->cmd_cookiec; cnt++) {
3644 /*
3645 * Get next DMA cookie
3646 */
3647 ddi_dma_nextcookie(cmd->cmd_dmahandle,
3648 &cmd->cmd_cookie);
3649 dmap++;
3650
3651 cmd->cmd_dmacount += cmd->cmd_cookie.dmac_size;
3652 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3653
3654 /*
3655 * store the segment parms into the S/G list
3656 */
3657 dmap->count = cmd->cmd_cookie.dmac_size;
3658 dmap->addr.address64.Low = (uint32_t)
3659 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3660 dmap->addr.address64.High = (uint32_t)
3661 (cmd->cmd_cookie.dmac_laddress >> 32);
3662 }
3663
3664 /*
3665 * If this was partially allocated we set the resid
3666 * the amount of data NOT transferred in this window
3667 * If there is only one window, the resid will be 0
3668 */
3669 pkt->pkt_resid = (bp->b_bcount - cmd->cmd_totaldmacount);
3670 NDBG16(("mptsas_dmaget: cmd_dmacount=%d.", cmd->cmd_dmacount));
3671 }
3672 return (pkt);
3673 }
3674
3675 /*
3676 * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
3677 *
3678 * Notes:
3679 * - also frees DMA resources if allocated
3680 * - implicit DMA synchonization
3681 */
3682 static void
3683 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
3684 {
3685 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3686 mptsas_t *mpt = ADDR2MPT(ap);
3687
3688 NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
3689 ap->a_target, (void *)pkt));
3690
3691 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3692 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
3693 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3694 }
3695 #ifndef __sparc
3696 if (cmd->cmd_sg) {
3697 kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
3698 cmd->cmd_sg = NULL;
3699 }
3700 #endif /* __sparc */
3701 mptsas_free_extra_sgl_frame(mpt, cmd);
3702
3703 if ((cmd->cmd_flags &
3704 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
3705 CFLAG_SCBEXTERN)) == 0) {
3706 cmd->cmd_flags = CFLAG_FREE;
3707 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
3708 } else {
3709 mptsas_pkt_destroy_extern(mpt, cmd);
3710 }
3711 }
3712
3713 /*
3714 * kmem cache constructor and destructor:
3715 * When constructing, we bzero the cmd and allocate the dma handle
3716 * When destructing, just free the dma handle
3717 */
3718 static int
3719 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
3720 {
3721 mptsas_cmd_t *cmd = buf;
3722 mptsas_t *mpt = cdrarg;
3723 struct scsi_address ap;
3724 uint_t cookiec;
3725 ddi_dma_attr_t arq_dma_attr;
3726 int (*callback)(caddr_t);
3727
3728 callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3729
3730 NDBG4(("mptsas_kmem_cache_constructor"));
3731
3732 ap.a_hba_tran = mpt->m_tran;
3733 ap.a_target = 0;
3734 ap.a_lun = 0;
3735
3736 /*
3737 * allocate a dma handle
3738 */
3739 if ((ddi_dma_alloc_handle(mpt->m_dip, &mpt->m_io_dma_attr, callback,
3740 NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
3741 cmd->cmd_dmahandle = NULL;
3742 return (-1);
3743 }
3744
3745 cmd->cmd_arq_buf = scsi_alloc_consistent_buf(&ap, (struct buf *)NULL,
3746 SENSE_LENGTH, B_READ, callback, NULL);
3747 if (cmd->cmd_arq_buf == NULL) {
3748 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3749 cmd->cmd_dmahandle = NULL;
3750 return (-1);
3751 }
3752
3753 /*
3754 * allocate a arq handle
3755 */
3756 arq_dma_attr = mpt->m_msg_dma_attr;
3757 arq_dma_attr.dma_attr_sgllen = 1;
3758 if ((ddi_dma_alloc_handle(mpt->m_dip, &arq_dma_attr, callback,
3759 NULL, &cmd->cmd_arqhandle)) != DDI_SUCCESS) {
3760 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3761 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3762 cmd->cmd_dmahandle = NULL;
3763 cmd->cmd_arqhandle = NULL;
3764 return (-1);
3765 }
3766
3767 if (ddi_dma_buf_bind_handle(cmd->cmd_arqhandle,
3768 cmd->cmd_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
3769 callback, NULL, &cmd->cmd_arqcookie, &cookiec) != DDI_SUCCESS) {
3770 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3771 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3772 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3773 cmd->cmd_dmahandle = NULL;
3774 cmd->cmd_arqhandle = NULL;
3775 cmd->cmd_arq_buf = NULL;
3776 return (-1);
3777 }
3778 /*
3779 * In sparc, the sgl length in most of the cases would be 1, so we
3780 * pre-allocate it in cache. On x86, the max number would be 256,
3781 * pre-allocate a maximum would waste a lot of memory especially
3782 * when many cmds are put onto waitq.
3783 */
3784 #ifdef __sparc
3785 cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3786 MPTSAS_MAX_CMD_SEGS), KM_SLEEP);
3787 #endif /* __sparc */
3788
3789 return (0);
3790 }
3791
3792 static void
3793 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
3794 {
3795 #ifndef __lock_lint
3796 _NOTE(ARGUNUSED(cdrarg))
3797 #endif
3798 mptsas_cmd_t *cmd = buf;
3799
3800 NDBG4(("mptsas_kmem_cache_destructor"));
3801
3802 if (cmd->cmd_arqhandle) {
3803 (void) ddi_dma_unbind_handle(cmd->cmd_arqhandle);
3804 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3805 cmd->cmd_arqhandle = NULL;
3806 }
3807 if (cmd->cmd_arq_buf) {
3808 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3809 cmd->cmd_arq_buf = NULL;
3810 }
3811 if (cmd->cmd_dmahandle) {
3812 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3813 cmd->cmd_dmahandle = NULL;
3814 }
3815 #ifdef __sparc
3816 if (cmd->cmd_sg) {
3817 kmem_free(cmd->cmd_sg, sizeof (mptti_t)* MPTSAS_MAX_CMD_SEGS);
3818 cmd->cmd_sg = NULL;
3819 }
3820 #endif /* __sparc */
3821 }
3822
3823 static int
3824 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
3825 {
3826 mptsas_cache_frames_t *p = buf;
3827 mptsas_t *mpt = cdrarg;
3828 ddi_dma_attr_t frame_dma_attr;
3829 size_t mem_size, alloc_len;
3830 ddi_dma_cookie_t cookie;
3831 uint_t ncookie;
3832 int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
3833 ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3834
3835 frame_dma_attr = mpt->m_msg_dma_attr;
3836 frame_dma_attr.dma_attr_align = 0x10;
3837 frame_dma_attr.dma_attr_sgllen = 1;
3838
3839 if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
3840 &p->m_dma_hdl) != DDI_SUCCESS) {
3841 mptsas_log(mpt, CE_WARN, "Unable to allocate dma handle for"
3842 " extra SGL.");
3843 return (DDI_FAILURE);
3844 }
3845
3846 mem_size = (mpt->m_max_request_frames - 1) * mpt->m_req_frame_size;
3847
3848 if (ddi_dma_mem_alloc(p->m_dma_hdl, mem_size, &mpt->m_dev_acc_attr,
3849 DDI_DMA_CONSISTENT, callback, NULL, (caddr_t *)&p->m_frames_addr,
3850 &alloc_len, &p->m_acc_hdl) != DDI_SUCCESS) {
3851 ddi_dma_free_handle(&p->m_dma_hdl);
3852 p->m_dma_hdl = NULL;
3853 mptsas_log(mpt, CE_WARN, "Unable to allocate dma memory for"
3854 " extra SGL.");
3855 return (DDI_FAILURE);
3856 }
3857
3858 if (ddi_dma_addr_bind_handle(p->m_dma_hdl, NULL, p->m_frames_addr,
3859 alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
3860 &cookie, &ncookie) != DDI_DMA_MAPPED) {
3861 (void) ddi_dma_mem_free(&p->m_acc_hdl);
3862 ddi_dma_free_handle(&p->m_dma_hdl);
3863 p->m_dma_hdl = NULL;
3864 mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources for"
3865 " extra SGL");
3866 return (DDI_FAILURE);
3867 }
3868
3869 /*
3870 * Store the SGL memory address. This chip uses this
3871 * address to dma to and from the driver. The second
3872 * address is the address mpt uses to fill in the SGL.
3873 */
3874 p->m_phys_addr = cookie.dmac_address;
3875
3876 return (DDI_SUCCESS);
3877 }
3878
3879 static void
3880 mptsas_cache_frames_destructor(void *buf, void *cdrarg)
3881 {
3882 #ifndef __lock_lint
3883 _NOTE(ARGUNUSED(cdrarg))
3884 #endif
3885 mptsas_cache_frames_t *p = buf;
3886 if (p->m_dma_hdl != NULL) {
3887 (void) ddi_dma_unbind_handle(p->m_dma_hdl);
3888 (void) ddi_dma_mem_free(&p->m_acc_hdl);
3889 ddi_dma_free_handle(&p->m_dma_hdl);
3890 p->m_phys_addr = NULL;
3891 p->m_frames_addr = NULL;
3892 p->m_dma_hdl = NULL;
3893 p->m_acc_hdl = NULL;
3894 }
3895
3896 }
3897
3898 /*
3899 * allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
3900 * for non-standard length cdb, pkt_private, status areas
3901 * if allocation fails, then deallocate all external space and the pkt
3902 */
3903 /* ARGSUSED */
3904 static int
3905 mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
3906 int cmdlen, int tgtlen, int statuslen, int kf)
3907 {
3908 caddr_t cdbp, scbp, tgt;
3909 int (*callback)(caddr_t) = (kf == KM_SLEEP) ?
3910 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
3911 struct scsi_address ap;
3912 size_t senselength;
3913 ddi_dma_attr_t ext_arq_dma_attr;
3914 uint_t cookiec;
3915
3916 NDBG3(("mptsas_pkt_alloc_extern: "
3917 "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
3918 (void *)cmd, cmdlen, tgtlen, statuslen, kf));
3919
3920 tgt = cdbp = scbp = NULL;
3921 cmd->cmd_scblen = statuslen;
3922 cmd->cmd_privlen = (uchar_t)tgtlen;
3923
3924 if (cmdlen > sizeof (cmd->cmd_cdb)) {
3925 if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
3926 goto fail;
3927 }
3928 cmd->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
3929 cmd->cmd_flags |= CFLAG_CDBEXTERN;
3930 }
3931 if (tgtlen > PKT_PRIV_LEN) {
3932 if ((tgt = kmem_zalloc((size_t)tgtlen, kf)) == NULL) {
3933 goto fail;
3934 }
3935 cmd->cmd_flags |= CFLAG_PRIVEXTERN;
3936 cmd->cmd_pkt->pkt_private = tgt;
3937 }
3938 if (statuslen > EXTCMDS_STATUS_SIZE) {
3939 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
3940 goto fail;
3941 }
3942 cmd->cmd_flags |= CFLAG_SCBEXTERN;
3943 cmd->cmd_pkt->pkt_scbp = (opaque_t)scbp;
3944
3945 /* allocate sense data buf for DMA */
3946
3947 senselength = statuslen - MPTSAS_GET_ITEM_OFF(
3948 struct scsi_arq_status, sts_sensedata);
3949 cmd->cmd_rqslen = (uchar_t)senselength;
3950
3951 ap.a_hba_tran = mpt->m_tran;
3952 ap.a_target = 0;
3953 ap.a_lun = 0;
3954
3955 cmd->cmd_ext_arq_buf = scsi_alloc_consistent_buf(&ap,
3956 (struct buf *)NULL, senselength, B_READ,
3957 callback, NULL);
3958
3959 if (cmd->cmd_ext_arq_buf == NULL) {
3960 goto fail;
3961 }
3962 /*
3963 * allocate a extern arq handle and bind the buf
3964 */
3965 ext_arq_dma_attr = mpt->m_msg_dma_attr;
3966 ext_arq_dma_attr.dma_attr_sgllen = 1;
3967 if ((ddi_dma_alloc_handle(mpt->m_dip,
3968 &ext_arq_dma_attr, callback,
3969 NULL, &cmd->cmd_ext_arqhandle)) != DDI_SUCCESS) {
3970 goto fail;
3971 }
3972
3973 if (ddi_dma_buf_bind_handle(cmd->cmd_ext_arqhandle,
3974 cmd->cmd_ext_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
3975 callback, NULL, &cmd->cmd_ext_arqcookie,
3976 &cookiec)
3977 != DDI_SUCCESS) {
3978 goto fail;
3979 }
3980 cmd->cmd_flags |= CFLAG_EXTARQBUFVALID;
3981 }
3982 return (0);
3983 fail:
3984 mptsas_pkt_destroy_extern(mpt, cmd);
3985 return (1);
3986 }
3987
3988 /*
3989 * deallocate external pkt space and deallocate the pkt
3990 */
3991 static void
3992 mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd)
3993 {
3994 NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd));
3995
3996 if (cmd->cmd_flags & CFLAG_FREE) {
3997 mptsas_log(mpt, CE_PANIC,
3998 "mptsas_pkt_destroy_extern: freeing free packet");
3999 _NOTE(NOT_REACHED)
4000 /* NOTREACHED */
4001 }
4002 if (cmd->cmd_flags & CFLAG_CDBEXTERN) {
4003 kmem_free(cmd->cmd_pkt->pkt_cdbp, (size_t)cmd->cmd_cdblen);
4004 }
4005 if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
4006 kmem_free(cmd->cmd_pkt->pkt_scbp, (size_t)cmd->cmd_scblen);
4007 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4008 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4009 }
4010 if (cmd->cmd_ext_arqhandle) {
4011 ddi_dma_free_handle(&cmd->cmd_ext_arqhandle);
4012 cmd->cmd_ext_arqhandle = NULL;
4013 }
4014 if (cmd->cmd_ext_arq_buf)
4015 scsi_free_consistent_buf(cmd->cmd_ext_arq_buf);
4016 }
4017 if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
4018 kmem_free(cmd->cmd_pkt->pkt_private, (size_t)cmd->cmd_privlen);
4019 }
4020 cmd->cmd_flags = CFLAG_FREE;
4021 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4022 }
4023
4024 /*
4025 * tran_sync_pkt(9E) - explicit DMA synchronization
4026 */
4027 /*ARGSUSED*/
4028 static void
4029 mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4030 {
4031 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4032
4033 NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
4034 ap->a_target, (void *)pkt));
4035
4036 if (cmd->cmd_dmahandle) {
4037 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4038 (cmd->cmd_flags & CFLAG_DMASEND) ?
4039 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
4040 }
4041 }
4042
4043 /*
4044 * tran_dmafree(9E) - deallocate DMA resources allocated for command
4045 */
4046 /*ARGSUSED*/
4047 static void
4048 mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
4049 {
4050 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4051 mptsas_t *mpt = ADDR2MPT(ap);
4052
4053 NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
4054 ap->a_target, (void *)pkt));
4055
4056 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4057 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4058 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4059 }
4060
4061 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4062 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4063 cmd->cmd_flags &= ~CFLAG_EXTARQBUFVALID;
4064 }
4065
4066 mptsas_free_extra_sgl_frame(mpt, cmd);
4067 }
4068
4069 static void
4070 mptsas_pkt_comp(struct scsi_pkt *pkt, mptsas_cmd_t *cmd)
4071 {
4072 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
4073 (!(cmd->cmd_flags & CFLAG_DMASEND))) {
4074 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4075 DDI_DMA_SYNC_FORCPU);
4076 }
4077 (*pkt->pkt_comp)(pkt);
4078 }
4079
4080 static void
4081 mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control,
4082 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4083 {
4084 uint_t cookiec;
4085 mptti_t *dmap;
4086 uint32_t flags;
4087 pMpi2SGESimple64_t sge;
4088 pMpi2SGEChain64_t sgechain;
4089 ASSERT(cmd->cmd_flags & CFLAG_DMAVALID);
4090
4091 /*
4092 * Save the number of entries in the DMA
4093 * Scatter/Gather list
4094 */
4095 cookiec = cmd->cmd_cookiec;
4096
4097 NDBG1(("mptsas_sge_setup: cookiec=%d", cookiec));
4098
4099 /*
4100 * Set read/write bit in control.
4101 */
4102 if (cmd->cmd_flags & CFLAG_DMASEND) {
4103 *control |= MPI2_SCSIIO_CONTROL_WRITE;
4104 } else {
4105 *control |= MPI2_SCSIIO_CONTROL_READ;
4106 }
4107
4108 ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount);
4109
4110 /*
4111 * We have 2 cases here. First where we can fit all the
4112 * SG elements into the main frame, and the case
4113 * where we can't.
4114 * If we have more cookies than we can attach to a frame
4115 * we will need to use a chain element to point
4116 * a location of memory where the rest of the S/G
4117 * elements reside.
4118 */
4119 if (cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) {
4120 dmap = cmd->cmd_sg;
4121 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4122 while (cookiec--) {
4123 ddi_put32(acc_hdl,
4124 &sge->Address.Low, dmap->addr.address64.Low);
4125 ddi_put32(acc_hdl,
4126 &sge->Address.High, dmap->addr.address64.High);
4127 ddi_put32(acc_hdl, &sge->FlagsLength,
4128 dmap->count);
4129 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4130 flags |= ((uint32_t)
4131 (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4132 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4133 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4134 MPI2_SGE_FLAGS_SHIFT);
4135
4136 /*
4137 * If this is the last cookie, we set the flags
4138 * to indicate so
4139 */
4140 if (cookiec == 0) {
4141 flags |=
4142 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
4143 | MPI2_SGE_FLAGS_END_OF_BUFFER
4144 | MPI2_SGE_FLAGS_END_OF_LIST) <<
4145 MPI2_SGE_FLAGS_SHIFT);
4146 }
4147 if (cmd->cmd_flags & CFLAG_DMASEND) {
4148 flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4149 MPI2_SGE_FLAGS_SHIFT);
4150 } else {
4151 flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4152 MPI2_SGE_FLAGS_SHIFT);
4153 }
4154 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4155 dmap++;
4156 sge++;
4157 }
4158 } else {
4159 /*
4160 * Hereby we start to deal with multiple frames.
4161 * The process is as follows:
4162 * 1. Determine how many frames are needed for SGL element
4163 * storage; Note that all frames are stored in contiguous
4164 * memory space and in 64-bit DMA mode each element is
4165 * 3 double-words (12 bytes) long.
4166 * 2. Fill up the main frame. We need to do this separately
4167 * since it contains the SCSI IO request header and needs
4168 * dedicated processing. Note that the last 4 double-words
4169 * of the SCSI IO header is for SGL element storage
4170 * (MPI2_SGE_IO_UNION).
4171 * 3. Fill the chain element in the main frame, so the DMA
4172 * engine can use the following frames.
4173 * 4. Enter a loop to fill the remaining frames. Note that the
4174 * last frame contains no chain element. The remaining
4175 * frames go into the mpt SGL buffer allocated on the fly,
4176 * not immediately following the main message frame, as in
4177 * Gen1.
4178 * Some restrictions:
4179 * 1. For 64-bit DMA, the simple element and chain element
4180 * are both of 3 double-words (12 bytes) in size, even
4181 * though all frames are stored in the first 4G of mem
4182 * range and the higher 32-bits of the address are always 0.
4183 * 2. On some controllers (like the 1064/1068), a frame can
4184 * hold SGL elements with the last 1 or 2 double-words
4185 * (4 or 8 bytes) un-used. On these controllers, we should
4186 * recognize that there's not enough room for another SGL
4187 * element and move the sge pointer to the next frame.
4188 */
4189 int i, j, k, l, frames, sgemax;
4190 int temp;
4191 uint8_t chainflags;
4192 uint16_t chainlength;
4193 mptsas_cache_frames_t *p;
4194
4195 /*
4196 * Sgemax is the number of SGE's that will fit
4197 * each extra frame and frames is total
4198 * number of frames we'll need. 1 sge entry per
4199 * frame is reseverd for the chain element thus the -1 below.
4200 */
4201 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64))
4202 - 1);
4203 temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4204
4205 /*
4206 * A little check to see if we need to round up the number
4207 * of frames we need
4208 */
4209 if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4210 sgemax) > 1) {
4211 frames = (temp + 1);
4212 } else {
4213 frames = temp;
4214 }
4215 dmap = cmd->cmd_sg;
4216 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4217
4218 /*
4219 * First fill in the main frame
4220 */
4221 for (j = 1; j < MPTSAS_MAX_FRAME_SGES64(mpt); j++) {
4222 ddi_put32(acc_hdl, &sge->Address.Low,
4223 dmap->addr.address64.Low);
4224 ddi_put32(acc_hdl, &sge->Address.High,
4225 dmap->addr.address64.High);
4226 ddi_put32(acc_hdl, &sge->FlagsLength, dmap->count);
4227 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4228 flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4229 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4230 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4231 MPI2_SGE_FLAGS_SHIFT);
4232
4233 /*
4234 * If this is the last SGE of this frame
4235 * we set the end of list flag
4236 */
4237 if (j == (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) {
4238 flags |= ((uint32_t)
4239 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4240 MPI2_SGE_FLAGS_SHIFT);
4241 }
4242 if (cmd->cmd_flags & CFLAG_DMASEND) {
4243 flags |=
4244 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4245 MPI2_SGE_FLAGS_SHIFT);
4246 } else {
4247 flags |=
4248 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4249 MPI2_SGE_FLAGS_SHIFT);
4250 }
4251 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4252 dmap++;
4253 sge++;
4254 }
4255
4256 /*
4257 * Fill in the chain element in the main frame.
4258 * About calculation on ChainOffset:
4259 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4260 * in the end reserved for SGL element storage
4261 * (MPI2_SGE_IO_UNION); we should count it in our
4262 * calculation. See its definition in the header file.
4263 * 2. Constant j is the counter of the current SGL element
4264 * that will be processed, and (j - 1) is the number of
4265 * SGL elements that have been processed (stored in the
4266 * main frame).
4267 * 3. ChainOffset value should be in units of double-words (4
4268 * bytes) so the last value should be divided by 4.
4269 */
4270 ddi_put8(acc_hdl, &frame->ChainOffset,
4271 (sizeof (MPI2_SCSI_IO_REQUEST) -
4272 sizeof (MPI2_SGE_IO_UNION) +
4273 (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4274 sgechain = (pMpi2SGEChain64_t)sge;
4275 chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4276 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4277 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4278 ddi_put8(acc_hdl, &sgechain->Flags, chainflags);
4279
4280 /*
4281 * The size of the next frame is the accurate size of space
4282 * (in bytes) used to store the SGL elements. j is the counter
4283 * of SGL elements. (j - 1) is the number of SGL elements that
4284 * have been processed (stored in frames).
4285 */
4286 if (frames >= 2) {
4287 chainlength = mpt->m_req_frame_size /
4288 sizeof (MPI2_SGE_SIMPLE64) *
4289 sizeof (MPI2_SGE_SIMPLE64);
4290 } else {
4291 chainlength = ((cookiec - (j - 1)) *
4292 sizeof (MPI2_SGE_SIMPLE64));
4293 }
4294
4295 p = cmd->cmd_extra_frames;
4296
4297 ddi_put16(acc_hdl, &sgechain->Length, chainlength);
4298 ddi_put32(acc_hdl, &sgechain->Address.Low,
4299 p->m_phys_addr);
4300 /* SGL is allocated in the first 4G mem range */
4301 ddi_put32(acc_hdl, &sgechain->Address.High, 0);
4302
4303 /*
4304 * If there are more than 2 frames left we have to
4305 * fill in the next chain offset to the location of
4306 * the chain element in the next frame.
4307 * sgemax is the number of simple elements in an extra
4308 * frame. Note that the value NextChainOffset should be
4309 * in double-words (4 bytes).
4310 */
4311 if (frames >= 2) {
4312 ddi_put8(acc_hdl, &sgechain->NextChainOffset,
4313 (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4314 } else {
4315 ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0);
4316 }
4317
4318 /*
4319 * Jump to next frame;
4320 * Starting here, chain buffers go into the per command SGL.
4321 * This buffer is allocated when chain buffers are needed.
4322 */
4323 sge = (pMpi2SGESimple64_t)p->m_frames_addr;
4324 i = cookiec;
4325
4326 /*
4327 * Start filling in frames with SGE's. If we
4328 * reach the end of frame and still have SGE's
4329 * to fill we need to add a chain element and
4330 * use another frame. j will be our counter
4331 * for what cookie we are at and i will be
4332 * the total cookiec. k is the current frame
4333 */
4334 for (k = 1; k <= frames; k++) {
4335 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4336
4337 /*
4338 * If we have reached the end of frame
4339 * and we have more SGE's to fill in
4340 * we have to fill the final entry
4341 * with a chain element and then
4342 * continue to the next frame
4343 */
4344 if ((l == (sgemax + 1)) && (k != frames)) {
4345 sgechain = (pMpi2SGEChain64_t)sge;
4346 j--;
4347 chainflags = (
4348 MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4349 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4350 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4351 ddi_put8(p->m_acc_hdl,
4352 &sgechain->Flags, chainflags);
4353 /*
4354 * k is the frame counter and (k + 1)
4355 * is the number of the next frame.
4356 * Note that frames are in contiguous
4357 * memory space.
4358 */
4359 ddi_put32(p->m_acc_hdl,
4360 &sgechain->Address.Low,
4361 (p->m_phys_addr +
4362 (mpt->m_req_frame_size * k)));
4363 ddi_put32(p->m_acc_hdl,
4364 &sgechain->Address.High, 0);
4365
4366 /*
4367 * If there are more than 2 frames left
4368 * we have to next chain offset to
4369 * the location of the chain element
4370 * in the next frame and fill in the
4371 * length of the next chain
4372 */
4373 if ((frames - k) >= 2) {
4374 ddi_put8(p->m_acc_hdl,
4375 &sgechain->NextChainOffset,
4376 (sgemax *
4377 sizeof (MPI2_SGE_SIMPLE64))
4378 >> 2);
4379 ddi_put16(p->m_acc_hdl,
4380 &sgechain->Length,
4381 mpt->m_req_frame_size /
4382 sizeof (MPI2_SGE_SIMPLE64) *
4383 sizeof (MPI2_SGE_SIMPLE64));
4384 } else {
4385 /*
4386 * This is the last frame. Set
4387 * the NextChainOffset to 0 and
4388 * Length is the total size of
4389 * all remaining simple elements
4390 */
4391 ddi_put8(p->m_acc_hdl,
4392 &sgechain->NextChainOffset,
4393 0);
4394 ddi_put16(p->m_acc_hdl,
4395 &sgechain->Length,
4396 (cookiec - j) *
4397 sizeof (MPI2_SGE_SIMPLE64));
4398 }
4399
4400 /* Jump to the next frame */
4401 sge = (pMpi2SGESimple64_t)
4402 ((char *)p->m_frames_addr +
4403 (int)mpt->m_req_frame_size * k);
4404
4405 continue;
4406 }
4407
4408 ddi_put32(p->m_acc_hdl,
4409 &sge->Address.Low,
4410 dmap->addr.address64.Low);
4411 ddi_put32(p->m_acc_hdl,
4412 &sge->Address.High,
4413 dmap->addr.address64.High);
4414 ddi_put32(p->m_acc_hdl,
4415 &sge->FlagsLength, dmap->count);
4416 flags = ddi_get32(p->m_acc_hdl,
4417 &sge->FlagsLength);
4418 flags |= ((uint32_t)(
4419 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4420 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4421 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4422 MPI2_SGE_FLAGS_SHIFT);
4423
4424 /*
4425 * If we are at the end of the frame and
4426 * there is another frame to fill in
4427 * we set the last simple element as last
4428 * element
4429 */
4430 if ((l == sgemax) && (k != frames)) {
4431 flags |= ((uint32_t)
4432 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4433 MPI2_SGE_FLAGS_SHIFT);
4434 }
4435
4436 /*
4437 * If this is the final cookie we
4438 * indicate it by setting the flags
4439 */
4440 if (j == i) {
4441 flags |= ((uint32_t)
4442 (MPI2_SGE_FLAGS_LAST_ELEMENT |
4443 MPI2_SGE_FLAGS_END_OF_BUFFER |
4444 MPI2_SGE_FLAGS_END_OF_LIST) <<
4445 MPI2_SGE_FLAGS_SHIFT);
4446 }
4447 if (cmd->cmd_flags & CFLAG_DMASEND) {
4448 flags |=
4449 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4450 MPI2_SGE_FLAGS_SHIFT);
4451 } else {
4452 flags |=
4453 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4454 MPI2_SGE_FLAGS_SHIFT);
4455 }
4456 ddi_put32(p->m_acc_hdl,
4457 &sge->FlagsLength, flags);
4458 dmap++;
4459 sge++;
4460 }
4461 }
4462
4463 /*
4464 * Sync DMA with the chain buffers that were just created
4465 */
4466 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4467 }
4468 }
4469
4470 /*
4471 * Interrupt handling
4472 * Utility routine. Poll for status of a command sent to HBA
4473 * without interrupts (a FLAG_NOINTR command).
4474 */
4475 int
4476 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
4477 {
4478 int rval = TRUE;
4479
4480 NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
4481
4482 /*
4483 * In order to avoid using m_mutex in ISR(a new separate mutex
4484 * m_intr_mutex is introduced) and keep the same lock logic,
4485 * the m_intr_mutex should be used to protect the getting and
4486 * setting of the ReplyDescriptorIndex.
4487 *
4488 * Since the m_intr_mutex would be released during processing the poll
4489 * cmd, so we should set the poll flag earlier here to make sure the
4490 * polled cmd be handled in this thread/context. A side effect is other
4491 * cmds during the period between the flag set and reset are also
4492 * handled in this thread and not the ISR. Since the poll cmd is not
4493 * so common, so the performance degradation in this case is not a big
4494 * issue.
4495 */
4496 mutex_enter(&mpt->m_intr_mutex);
4497 mpt->m_polled_intr = 1;
4498 mutex_exit(&mpt->m_intr_mutex);
4499
4500 if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
4501 mptsas_restart_hba(mpt);
4502 }
4503
4504 /*
4505 * Wait, using drv_usecwait(), long enough for the command to
4506 * reasonably return from the target if the target isn't
4507 * "dead". A polled command may well be sent from scsi_poll, and
4508 * there are retries built in to scsi_poll if the transport
4509 * accepted the packet (TRAN_ACCEPT). scsi_poll waits 1 second
4510 * and retries the transport up to scsi_poll_busycnt times
4511 * (currently 60) if
4512 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
4513 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
4514 *
4515 * limit the waiting to avoid a hang in the event that the
4516 * cmd never gets started but we are still receiving interrupts
4517 */
4518 while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
4519 if (mptsas_wait_intr(mpt, polltime) == FALSE) {
4520 NDBG5(("mptsas_poll: command incomplete"));
4521 rval = FALSE;
4522 break;
4523 }
4524 }
4525
4526 mutex_enter(&mpt->m_intr_mutex);
4527 mpt->m_polled_intr = 0;
4528 mutex_exit(&mpt->m_intr_mutex);
4529
4530 if (rval == FALSE) {
4531
4532 /*
4533 * this isn't supposed to happen, the hba must be wedged
4534 * Mark this cmd as a timeout.
4535 */
4536 mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
4537 (STAT_TIMEOUT|STAT_ABORTED));
4538
4539 if (poll_cmd->cmd_queued == FALSE) {
4540
4541 NDBG5(("mptsas_poll: not on waitq"));
4542
4543 poll_cmd->cmd_pkt->pkt_state |=
4544 (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
4545 } else {
4546
4547 /* find and remove it from the waitq */
4548 NDBG5(("mptsas_poll: delete from waitq"));
4549 mptsas_waitq_delete(mpt, poll_cmd);
4550 }
4551
4552 }
4553 mptsas_fma_check(mpt, poll_cmd);
4554 NDBG5(("mptsas_poll: done"));
4555 return (rval);
4556 }
4557
4558 /*
4559 * Used for polling cmds and TM function
4560 */
4561 static int
4562 mptsas_wait_intr(mptsas_t *mpt, int polltime)
4563 {
4564 int cnt;
4565 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
4566 Mpi2ReplyDescriptorsUnion_t reply_desc_union_v;
4567 uint32_t int_mask;
4568 uint8_t reply_type;
4569
4570 NDBG5(("mptsas_wait_intr"));
4571
4572
4573 /*
4574 * Get the current interrupt mask and disable interrupts. When
4575 * re-enabling ints, set mask to saved value.
4576 */
4577 int_mask = ddi_get32(mpt->m_datap, &mpt->m_reg->HostInterruptMask);
4578 MPTSAS_DISABLE_INTR(mpt);
4579
4580 /*
4581 * Keep polling for at least (polltime * 1000) seconds
4582 */
4583 for (cnt = 0; cnt < polltime; cnt++) {
4584 mutex_enter(&mpt->m_intr_mutex);
4585 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
4586 DDI_DMA_SYNC_FORCPU);
4587
4588 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
4589 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
4590
4591 if (ddi_get32(mpt->m_acc_post_queue_hdl,
4592 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
4593 ddi_get32(mpt->m_acc_post_queue_hdl,
4594 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
4595 mutex_exit(&mpt->m_intr_mutex);
4596 drv_usecwait(1000);
4597 continue;
4598 }
4599
4600 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
4601 &reply_desc_union->Default.ReplyFlags);
4602 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
4603 reply_desc_union_v.Default.ReplyFlags = reply_type;
4604 if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
4605 reply_desc_union_v.SCSIIOSuccess.SMID =
4606 ddi_get16(mpt->m_acc_post_queue_hdl,
4607 &reply_desc_union->SCSIIOSuccess.SMID);
4608 } else if (reply_type ==
4609 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
4610 reply_desc_union_v.AddressReply.ReplyFrameAddress =
4611 ddi_get32(mpt->m_acc_post_queue_hdl,
4612 &reply_desc_union->AddressReply.ReplyFrameAddress);
4613 reply_desc_union_v.AddressReply.SMID =
4614 ddi_get16(mpt->m_acc_post_queue_hdl,
4615 &reply_desc_union->AddressReply.SMID);
4616 }
4617 /*
4618 * Clear the reply descriptor for re-use and increment
4619 * index.
4620 */
4621 ddi_put64(mpt->m_acc_post_queue_hdl,
4622 &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
4623 0xFFFFFFFFFFFFFFFF);
4624 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
4625 DDI_DMA_SYNC_FORDEV);
4626
4627 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
4628 mpt->m_post_index = 0;
4629 }
4630
4631 /*
4632 * Update the global reply index
4633 */
4634 ddi_put32(mpt->m_datap,
4635 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
4636 mutex_exit(&mpt->m_intr_mutex);
4637
4638 /*
4639 * The reply is valid, process it according to its
4640 * type.
4641 */
4642 mptsas_process_intr(mpt, &reply_desc_union_v);
4643
4644
4645 /*
4646 * Re-enable interrupts and quit.
4647 */
4648 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask,
4649 int_mask);
4650 return (TRUE);
4651
4652 }
4653
4654 /*
4655 * Clear polling flag, re-enable interrupts and quit.
4656 */
4657 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask);
4658 return (FALSE);
4659 }
4660
4661 /*
4662 * For fastpath, the m_intr_mutex should be held from the begining to the end,
4663 * so we only treat those cmds that need not release m_intr_mutex(even just for
4664 * a moment) as candidate for fast processing. otherwise, we don't handle them
4665 * and just return, then in ISR, those cmds would be handled later with m_mutex
4666 * held and m_intr_mutex not held.
4667 */
4668 static int
4669 mptsas_handle_io_fastpath(mptsas_t *mpt,
4670 uint16_t SMID)
4671 {
4672 mptsas_slots_t *slots = mpt->m_active;
4673 mptsas_cmd_t *cmd = NULL;
4674 struct scsi_pkt *pkt;
4675
4676 /*
4677 * This is a success reply so just complete the IO. First, do a sanity
4678 * check on the SMID. The final slot is used for TM requests, which
4679 * would not come into this reply handler.
4680 */
4681 if ((SMID == 0) || (SMID > slots->m_n_slots)) {
4682 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
4683 SMID);
4684 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4685 return (TRUE);
4686 }
4687
4688 cmd = slots->m_slot[SMID];
4689
4690 /*
4691 * print warning and return if the slot is empty
4692 */
4693 if (cmd == NULL) {
4694 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
4695 "in slot %d", SMID);
4696 return (TRUE);
4697 }
4698
4699 pkt = CMD2PKT(cmd);
4700 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
4701 STATE_GOT_STATUS);
4702 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4703 pkt->pkt_state |= STATE_XFERRED_DATA;
4704 }
4705 pkt->pkt_resid = 0;
4706
4707 /*
4708 * If the cmd is a IOC, or a passthrough, then we don't process it in
4709 * fastpath, and later it would be handled by mptsas_process_intr()
4710 * with m_mutex protected.
4711 */
4712 if (cmd->cmd_flags & (CFLAG_PASSTHRU | CFLAG_CMDIOC)) {
4713 return (FALSE);
4714 } else {
4715 mptsas_remove_cmd0(mpt, cmd);
4716 }
4717
4718 if (cmd->cmd_flags & CFLAG_RETRY) {
4719 /*
4720 * The target returned QFULL or busy, do not add tihs
4721 * pkt to the doneq since the hba will retry
4722 * this cmd.
4723 *
4724 * The pkt has already been resubmitted in
4725 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4726 * Remove this cmd_flag here.
4727 */
4728 cmd->cmd_flags &= ~CFLAG_RETRY;
4729 } else {
4730 mptsas_doneq_add0(mpt, cmd);
4731 }
4732
4733 /*
4734 * In fastpath, the cmd should only be a context reply, so just check
4735 * the post queue of the reply descriptor and the dmahandle of the cmd
4736 * is enough. No sense data in this case and no need to check the dma
4737 * handle where sense data dma info is saved, the dma handle of the
4738 * reply frame, and the dma handle of the reply free queue.
4739 * For the dma handle of the request queue. Check fma here since we
4740 * are sure the request must have already been sent/DMAed correctly.
4741 * otherwise checking in mptsas_scsi_start() is not correct since
4742 * at that time the dma may not start.
4743 */
4744 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
4745 DDI_SUCCESS) ||
4746 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
4747 DDI_SUCCESS)) {
4748 ddi_fm_service_impact(mpt->m_dip,
4749 DDI_SERVICE_UNAFFECTED);
4750 pkt->pkt_reason = CMD_TRAN_ERR;
4751 pkt->pkt_statistics = 0;
4752 }
4753 if (cmd->cmd_dmahandle &&
4754 (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
4755 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4756 pkt->pkt_reason = CMD_TRAN_ERR;
4757 pkt->pkt_statistics = 0;
4758 }
4759 if ((cmd->cmd_extra_frames &&
4760 ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
4761 DDI_SUCCESS) ||
4762 (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
4763 DDI_SUCCESS)))) {
4764 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4765 pkt->pkt_reason = CMD_TRAN_ERR;
4766 pkt->pkt_statistics = 0;
4767 }
4768
4769 return (TRUE);
4770 }
4771
4772 static void
4773 mptsas_handle_scsi_io_success(mptsas_t *mpt,
4774 pMpi2ReplyDescriptorsUnion_t reply_desc)
4775 {
4776 pMpi2SCSIIOSuccessReplyDescriptor_t scsi_io_success;
4777 uint16_t SMID;
4778 mptsas_slots_t *slots = mpt->m_active;
4779 mptsas_cmd_t *cmd = NULL;
4780 struct scsi_pkt *pkt;
4781
4782 scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
4783 SMID = scsi_io_success->SMID;
4784
4785 /*
4786 * This is a success reply so just complete the IO. First, do a sanity
4787 * check on the SMID. The final slot is used for TM requests, which
4788 * would not come into this reply handler.
4789 */
4790 if ((SMID == 0) || (SMID > slots->m_n_slots)) {
4791 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
4792 SMID);
4793 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4794 return;
4795 }
4796
4797 cmd = slots->m_slot[SMID];
4798
4799 /*
4800 * print warning and return if the slot is empty
4801 */
4802 if (cmd == NULL) {
4803 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
4804 "in slot %d", SMID);
4805 return;
4806 }
4807
4808 pkt = CMD2PKT(cmd);
4809 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
4810 STATE_GOT_STATUS);
4811 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4812 pkt->pkt_state |= STATE_XFERRED_DATA;
4813 }
4814 pkt->pkt_resid = 0;
4815
4816 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
4817 cmd->cmd_flags |= CFLAG_FINISHED;
4818 cv_broadcast(&mpt->m_passthru_cv);
4819 return;
4820 } else {
4821 mptsas_remove_cmd(mpt, cmd);
4822 }
4823
4824 if (cmd->cmd_flags & CFLAG_RETRY) {
4825 /*
4826 * The target returned QFULL or busy, do not add tihs
4827 * pkt to the doneq since the hba will retry
4828 * this cmd.
4829 *
4830 * The pkt has already been resubmitted in
4831 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4832 * Remove this cmd_flag here.
4833 */
4834 cmd->cmd_flags &= ~CFLAG_RETRY;
4835 } else {
4836 mptsas_doneq_add(mpt, cmd);
4837 }
4838 }
4839
4840 static void
4841 mptsas_handle_address_reply(mptsas_t *mpt,
4842 pMpi2ReplyDescriptorsUnion_t reply_desc)
4843 {
4844 pMpi2AddressReplyDescriptor_t address_reply;
4845 pMPI2DefaultReply_t reply;
4846 mptsas_fw_diagnostic_buffer_t *pBuffer;
4847 uint32_t reply_addr;
4848 uint16_t SMID, iocstatus;
4849 mptsas_slots_t *slots = mpt->m_active;
4850 mptsas_cmd_t *cmd = NULL;
4851 uint8_t function, buffer_type;
4852 m_replyh_arg_t *args;
4853 int reply_frame_no;
4854
4855 ASSERT(mutex_owned(&mpt->m_mutex));
4856
4857 address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
4858
4859 reply_addr = address_reply->ReplyFrameAddress;
4860 SMID = address_reply->SMID;
4861 /*
4862 * If reply frame is not in the proper range we should ignore this
4863 * message and exit the interrupt handler.
4864 */
4865 if ((reply_addr < mpt->m_reply_frame_dma_addr) ||
4866 (reply_addr >= (mpt->m_reply_frame_dma_addr +
4867 (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
4868 ((reply_addr - mpt->m_reply_frame_dma_addr) %
4869 mpt->m_reply_frame_size != 0)) {
4870 mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
4871 "address 0x%x\n", reply_addr);
4872 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4873 return;
4874 }
4875
4876 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
4877 DDI_DMA_SYNC_FORCPU);
4878 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
4879 mpt->m_reply_frame_dma_addr));
4880 function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
4881
4882 /*
4883 * don't get slot information and command for events since these values
4884 * don't exist
4885 */
4886 if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) &&
4887 (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) {
4888 /*
4889 * This could be a TM reply, which use the last allocated SMID,
4890 * so allow for that.
4891 */
4892 if ((SMID == 0) || (SMID > (slots->m_n_slots + 1))) {
4893 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of "
4894 "%d\n", SMID);
4895 ddi_fm_service_impact(mpt->m_dip,
4896 DDI_SERVICE_UNAFFECTED);
4897 return;
4898 }
4899
4900 cmd = slots->m_slot[SMID];
4901
4902 /*
4903 * print warning and return if the slot is empty
4904 */
4905 if (cmd == NULL) {
4906 mptsas_log(mpt, CE_WARN, "?NULL command for address "
4907 "reply in slot %d", SMID);
4908 return;
4909 }
4910 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
4911 (cmd->cmd_flags & CFLAG_CONFIG) ||
4912 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
4913 cmd->cmd_rfm = reply_addr;
4914 cmd->cmd_flags |= CFLAG_FINISHED;
4915 cv_broadcast(&mpt->m_passthru_cv);
4916 cv_broadcast(&mpt->m_config_cv);
4917 cv_broadcast(&mpt->m_fw_diag_cv);
4918 return;
4919 } else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
4920 mptsas_remove_cmd(mpt, cmd);
4921 }
4922 NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
4923 }
4924 /*
4925 * Depending on the function, we need to handle
4926 * the reply frame (and cmd) differently.
4927 */
4928 switch (function) {
4929 case MPI2_FUNCTION_SCSI_IO_REQUEST:
4930 mptsas_check_scsi_io_error(mpt, (pMpi2SCSIIOReply_t)reply, cmd);
4931 break;
4932 case MPI2_FUNCTION_SCSI_TASK_MGMT:
4933 cmd->cmd_rfm = reply_addr;
4934 mptsas_check_task_mgt(mpt, (pMpi2SCSIManagementReply_t)reply,
4935 cmd);
4936 break;
4937 case MPI2_FUNCTION_FW_DOWNLOAD:
4938 cmd->cmd_flags |= CFLAG_FINISHED;
4939 cv_signal(&mpt->m_fw_cv);
4940 break;
4941 case MPI2_FUNCTION_EVENT_NOTIFICATION:
4942 reply_frame_no = (reply_addr - mpt->m_reply_frame_dma_addr) /
4943 mpt->m_reply_frame_size;
4944 args = &mpt->m_replyh_args[reply_frame_no];
4945 args->mpt = (void *)mpt;
4946 args->rfm = reply_addr;
4947
4948 /*
4949 * Record the event if its type is enabled in
4950 * this mpt instance by ioctl.
4951 */
4952 mptsas_record_event(args);
4953
4954 /*
4955 * Handle time critical events
4956 * NOT_RESPONDING/ADDED only now
4957 */
4958 if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
4959 /*
4960 * Would not return main process,
4961 * just let taskq resolve ack action
4962 * and ack would be sent in taskq thread
4963 */
4964 NDBG20(("send mptsas_handle_event_sync success"));
4965 }
4966 if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
4967 (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
4968 mptsas_log(mpt, CE_WARN, "No memory available"
4969 "for dispatch taskq");
4970 /*
4971 * Return the reply frame to the free queue.
4972 */
4973 ddi_put32(mpt->m_acc_free_queue_hdl,
4974 &((uint32_t *)(void *)
4975 mpt->m_free_queue)[mpt->m_free_index], reply_addr);
4976 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
4977 DDI_DMA_SYNC_FORDEV);
4978 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
4979 mpt->m_free_index = 0;
4980 }
4981
4982 ddi_put32(mpt->m_datap,
4983 &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
4984 }
4985 return;
4986 case MPI2_FUNCTION_DIAG_BUFFER_POST:
4987 /*
4988 * If SMID is 0, this implies that the reply is due to a
4989 * release function with a status that the buffer has been
4990 * released. Set the buffer flags accordingly.
4991 */
4992 if (SMID == 0) {
4993 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
4994 &reply->IOCStatus);
4995 buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl,
4996 &(((pMpi2DiagBufferPostReply_t)reply)->BufferType));
4997 if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) {
4998 pBuffer =
4999 &mpt->m_fw_diag_buffer_list[buffer_type];
5000 pBuffer->valid_data = TRUE;
5001 pBuffer->owned_by_firmware = FALSE;
5002 pBuffer->immediate = FALSE;
5003 }
5004 } else {
5005 /*
5006 * Normal handling of diag post reply with SMID.
5007 */
5008 cmd = slots->m_slot[SMID];
5009
5010 /*
5011 * print warning and return if the slot is empty
5012 */
5013 if (cmd == NULL) {
5014 mptsas_log(mpt, CE_WARN, "?NULL command for "
5015 "address reply in slot %d", SMID);
5016 return;
5017 }
5018 cmd->cmd_rfm = reply_addr;
5019 cmd->cmd_flags |= CFLAG_FINISHED;
5020 cv_broadcast(&mpt->m_fw_diag_cv);
5021 }
5022 return;
5023 default:
5024 mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
5025 break;
5026 }
5027
5028 /*
5029 * Return the reply frame to the free queue.
5030 */
5031 ddi_put32(mpt->m_acc_free_queue_hdl,
5032 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
5033 reply_addr);
5034 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5035 DDI_DMA_SYNC_FORDEV);
5036 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5037 mpt->m_free_index = 0;
5038 }
5039 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
5040 mpt->m_free_index);
5041
5042 if (cmd->cmd_flags & CFLAG_FW_CMD)
5043 return;
5044
5045 if (cmd->cmd_flags & CFLAG_RETRY) {
5046 /*
5047 * The target returned QFULL or busy, do not add tihs
5048 * pkt to the doneq since the hba will retry
5049 * this cmd.
5050 *
5051 * The pkt has already been resubmitted in
5052 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5053 * Remove this cmd_flag here.
5054 */
5055 cmd->cmd_flags &= ~CFLAG_RETRY;
5056 } else {
5057 mptsas_doneq_add(mpt, cmd);
5058 }
5059 }
5060
5061 static void
5062 mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
5063 mptsas_cmd_t *cmd)
5064 {
5065 uint8_t scsi_status, scsi_state;
5066 uint16_t ioc_status;
5067 uint32_t xferred, sensecount, responsedata, loginfo = 0;
5068 struct scsi_pkt *pkt;
5069 struct scsi_arq_status *arqstat;
5070 struct buf *bp;
5071 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5072 uint8_t *sensedata = NULL;
5073
5074 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
5075 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
5076 bp = cmd->cmd_ext_arq_buf;
5077 } else {
5078 bp = cmd->cmd_arq_buf;
5079 }
5080
5081 scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
5082 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5083 scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
5084 xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
5085 sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
5086 responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl,
5087 &reply->ResponseInfo);
5088
5089 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
5090 loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
5091 &reply->IOCLogInfo);
5092 mptsas_log(mpt, CE_NOTE,
5093 "?Log info 0x%x received for target %d.\n"
5094 "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5095 loginfo, Tgt(cmd), scsi_status, ioc_status,
5096 scsi_state);
5097 }
5098
5099 NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5100 scsi_status, ioc_status, scsi_state));
5101
5102 pkt = CMD2PKT(cmd);
5103 *(pkt->pkt_scbp) = scsi_status;
5104
5105 if (loginfo == 0x31170000) {
5106 /*
5107 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
5108 * 0x31170000 comes, that means the device missing delay
5109 * is in progressing, the command need retry later.
5110 */
5111 *(pkt->pkt_scbp) = STATUS_BUSY;
5112 return;
5113 }
5114
5115 if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
5116 ((ioc_status & MPI2_IOCSTATUS_MASK) ==
5117 MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
5118 pkt->pkt_reason = CMD_INCOMPLETE;
5119 pkt->pkt_state |= STATE_GOT_BUS;
5120 mutex_enter(&ptgt->m_tgt_intr_mutex);
5121 if (ptgt->m_reset_delay == 0) {
5122 mptsas_set_throttle(mpt, ptgt,
5123 DRAIN_THROTTLE);
5124 }
5125 mutex_exit(&ptgt->m_tgt_intr_mutex);
5126 return;
5127 }
5128
5129 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5130 responsedata &= 0x000000FF;
5131 if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
5132 mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n");
5133 pkt->pkt_reason = CMD_TLR_OFF;
5134 return;
5135 }
5136 }
5137
5138
5139 switch (scsi_status) {
5140 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5141 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5142 arqstat = (void*)(pkt->pkt_scbp);
5143 arqstat->sts_rqpkt_status = *((struct scsi_status *)
5144 (pkt->pkt_scbp));
5145 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
5146 STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
5147 if (cmd->cmd_flags & CFLAG_XARQ) {
5148 pkt->pkt_state |= STATE_XARQ_DONE;
5149 }
5150 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5151 pkt->pkt_state |= STATE_XFERRED_DATA;
5152 }
5153 arqstat->sts_rqpkt_reason = pkt->pkt_reason;
5154 arqstat->sts_rqpkt_state = pkt->pkt_state;
5155 arqstat->sts_rqpkt_state |= STATE_XFERRED_DATA;
5156 arqstat->sts_rqpkt_statistics = pkt->pkt_statistics;
5157 sensedata = (uint8_t *)&arqstat->sts_sensedata;
5158
5159 bcopy((uchar_t *)bp->b_un.b_addr, sensedata,
5160 ((cmd->cmd_rqslen >= sensecount) ? sensecount :
5161 cmd->cmd_rqslen));
5162 arqstat->sts_rqpkt_resid = (cmd->cmd_rqslen - sensecount);
5163 cmd->cmd_flags |= CFLAG_CMDARQ;
5164 /*
5165 * Set proper status for pkt if autosense was valid
5166 */
5167 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5168 struct scsi_status zero_status = { 0 };
5169 arqstat->sts_rqpkt_status = zero_status;
5170 }
5171
5172 /*
5173 * ASC=0x47 is parity error
5174 * ASC=0x48 is initiator detected error received
5175 */
5176 if ((scsi_sense_key(sensedata) == KEY_ABORTED_COMMAND) &&
5177 ((scsi_sense_asc(sensedata) == 0x47) ||
5178 (scsi_sense_asc(sensedata) == 0x48))) {
5179 mptsas_log(mpt, CE_NOTE, "Aborted_command!");
5180 }
5181
5182 /*
5183 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5184 * ASC/ASCQ=0x25/0x00 means invalid lun
5185 */
5186 if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
5187 (scsi_sense_asc(sensedata) == 0x3F) &&
5188 (scsi_sense_ascq(sensedata) == 0x0E)) ||
5189 ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
5190 (scsi_sense_asc(sensedata) == 0x25) &&
5191 (scsi_sense_ascq(sensedata) == 0x00))) {
5192 mptsas_topo_change_list_t *topo_node = NULL;
5193
5194 topo_node = kmem_zalloc(
5195 sizeof (mptsas_topo_change_list_t),
5196 KM_NOSLEEP);
5197 if (topo_node == NULL) {
5198 mptsas_log(mpt, CE_NOTE, "No memory"
5199 "resource for handle SAS dynamic"
5200 "reconfigure.\n");
5201 break;
5202 }
5203 topo_node->mpt = mpt;
5204 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
5205 topo_node->un.phymask = ptgt->m_phymask;
5206 topo_node->devhdl = ptgt->m_devhdl;
5207 topo_node->object = (void *)ptgt;
5208 topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5209
5210 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5211 mptsas_handle_dr,
5212 (void *)topo_node,
5213 DDI_NOSLEEP)) != DDI_SUCCESS) {
5214 mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5215 "for handle SAS dynamic reconfigure"
5216 "failed. \n");
5217 }
5218 }
5219 break;
5220 case MPI2_SCSI_STATUS_GOOD:
5221 switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5222 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5223 pkt->pkt_reason = CMD_DEV_GONE;
5224 pkt->pkt_state |= STATE_GOT_BUS;
5225 mutex_enter(&ptgt->m_tgt_intr_mutex);
5226 if (ptgt->m_reset_delay == 0) {
5227 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5228 }
5229 mutex_exit(&ptgt->m_tgt_intr_mutex);
5230 NDBG31(("lost disk for target%d, command:%x",
5231 Tgt(cmd), pkt->pkt_cdbp[0]));
5232 break;
5233 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5234 NDBG31(("data overrun: xferred=%d", xferred));
5235 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5236 pkt->pkt_reason = CMD_DATA_OVR;
5237 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5238 | STATE_SENT_CMD | STATE_GOT_STATUS
5239 | STATE_XFERRED_DATA);
5240 pkt->pkt_resid = 0;
5241 break;
5242 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5243 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5244 NDBG31(("data underrun: xferred=%d", xferred));
5245 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5246 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5247 | STATE_SENT_CMD | STATE_GOT_STATUS);
5248 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5249 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5250 pkt->pkt_state |= STATE_XFERRED_DATA;
5251 }
5252 break;
5253 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5254 mptsas_set_pkt_reason(mpt,
5255 cmd, CMD_RESET, STAT_BUS_RESET);
5256 break;
5257 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5258 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5259 mptsas_set_pkt_reason(mpt,
5260 cmd, CMD_RESET, STAT_DEV_RESET);
5261 break;
5262 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5263 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5264 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5265 mptsas_set_pkt_reason(mpt,
5266 cmd, CMD_TERMINATED, STAT_TERMINATED);
5267 break;
5268 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5269 case MPI2_IOCSTATUS_BUSY:
5270 /*
5271 * set throttles to drain
5272 */
5273 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5274 &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
5275 while (ptgt != NULL) {
5276 mutex_enter(&ptgt->m_tgt_intr_mutex);
5277 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5278 mutex_exit(&ptgt->m_tgt_intr_mutex);
5279
5280 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5281 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
5282 }
5283
5284 /*
5285 * retry command
5286 */
5287 cmd->cmd_flags |= CFLAG_RETRY;
5288 cmd->cmd_pkt_flags |= FLAG_HEAD;
5289
5290 mutex_exit(&mpt->m_mutex);
5291 (void) mptsas_accept_pkt(mpt, cmd);
5292 mutex_enter(&mpt->m_mutex);
5293 break;
5294 default:
5295 mptsas_log(mpt, CE_WARN,
5296 "unknown ioc_status = %x\n", ioc_status);
5297 mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5298 "count = %x, scsi_status = %x", scsi_state,
5299 xferred, scsi_status);
5300 break;
5301 }
5302 break;
5303 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5304 mptsas_handle_qfull(mpt, cmd);
5305 break;
5306 case MPI2_SCSI_STATUS_BUSY:
5307 NDBG31(("scsi_status busy received"));
5308 break;
5309 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5310 NDBG31(("scsi_status reservation conflict received"));
5311 break;
5312 default:
5313 mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x\n",
5314 scsi_status, ioc_status);
5315 mptsas_log(mpt, CE_WARN,
5316 "mptsas_process_intr: invalid scsi status\n");
5317 break;
5318 }
5319 }
5320
5321 static void
5322 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5323 mptsas_cmd_t *cmd)
5324 {
5325 uint8_t task_type;
5326 uint16_t ioc_status;
5327 uint32_t log_info;
5328 uint16_t dev_handle;
5329 struct scsi_pkt *pkt = CMD2PKT(cmd);
5330
5331 task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5332 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5333 log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5334 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5335
5336 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5337 mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5338 "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
5339 task_type, ioc_status, log_info, dev_handle);
5340 pkt->pkt_reason = CMD_INCOMPLETE;
5341 return;
5342 }
5343
5344 switch (task_type) {
5345 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5346 case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5347 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5348 case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5349 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5350 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5351 break;
5352 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5353 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5354 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5355 /*
5356 * Check for invalid DevHandle of 0 in case application
5357 * sends bad command. DevHandle of 0 could cause problems.
5358 */
5359 if (dev_handle == 0) {
5360 mptsas_log(mpt, CE_WARN, "!Can't flush target with"
5361 " DevHandle of 0.");
5362 } else {
5363 mptsas_flush_target(mpt, dev_handle, Lun(cmd),
5364 task_type);
5365 }
5366 break;
5367 default:
5368 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5369 task_type);
5370 mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5371 break;
5372 }
5373 }
5374
5375 static void
5376 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5377 {
5378 mptsas_t *mpt = arg->mpt;
5379 uint64_t t = arg->t;
5380 mptsas_cmd_t *cmd;
5381 struct scsi_pkt *pkt;
5382 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
5383
5384 mutex_enter(&item->mutex);
5385 while (item->flag & MPTSAS_DONEQ_THREAD_ACTIVE) {
5386 if (!item->doneq) {
5387 cv_wait(&item->cv, &item->mutex);
5388 }
5389 pkt = NULL;
5390 if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5391 cmd->cmd_flags |= CFLAG_COMPLETED;
5392 pkt = CMD2PKT(cmd);
5393 }
5394 mutex_exit(&item->mutex);
5395 if (pkt) {
5396 mptsas_pkt_comp(pkt, cmd);
5397 }
5398 mutex_enter(&item->mutex);
5399 }
5400 mutex_exit(&item->mutex);
5401 mutex_enter(&mpt->m_doneq_mutex);
5402 mpt->m_doneq_thread_n--;
5403 cv_broadcast(&mpt->m_doneq_thread_cv);
5404 mutex_exit(&mpt->m_doneq_mutex);
5405 }
5406
5407 /*
5408 * mpt interrupt handler.
5409 */
5410 static uint_t
5411 mptsas_intr(caddr_t arg1, caddr_t arg2)
5412 {
5413 mptsas_t *mpt = (void *)arg1;
5414 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
5415 uchar_t did_reply = FALSE;
5416 int i = 0, j;
5417 uint8_t reply_type;
5418 uint16_t SMID;
5419
5420 NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5421
5422 /*
5423 * 1.
5424 * To avoid using m_mutex in the ISR(ISR referes not only mptsas_intr,
5425 * but all of the recursive called functions in it. the same below),
5426 * separate mutexs are introduced to protect the elements shown in ISR.
5427 * 3 type of mutex are involved here:
5428 * a)per instance mutex m_intr_mutex.
5429 * b)per target mutex m_tgt_intr_mutex.
5430 * c)mutex that protect the free slot.
5431 *
5432 * a)per instance mutex m_intr_mutex:
5433 * used to protect m_options, m_power, m_waitq, etc that would be
5434 * checked/modified in ISR; protect the getting and setting the reply
5435 * descriptor index; protect the m_slots[];
5436 *
5437 * b)per target mutex m_tgt_intr_mutex:
5438 * used to protect per target element which has relationship to ISR.
5439 * contention for the new per target mutex is just as high as it in
5440 * sd(7d) driver.
5441 *
5442 * c)mutexs that protect the free slots:
5443 * those mutexs are introduced to minimize the mutex contentions
5444 * between the IO request threads where free slots are allocated
5445 * for sending cmds and ISR where slots holding outstanding cmds
5446 * are returned to the free pool.
5447 * the idea is like this:
5448 * 1) Partition all of the free slot into NCPU groups. For example,
5449 * In system where we have 15 slots, and 4 CPU, then slot s1,s5,s9,s13
5450 * are marked belonging to CPU1, s2,s6,s10,s14 to CPU2, s3,s7,s11,s15
5451 * to CPU3, and s4,s8,s12 to CPU4.
5452 * 2) In each of the group, an alloc/release queue pair is created,
5453 * and both the allocq and the releaseq have a dedicated mutex.
5454 * 3) When init, all of the slots in a CPU group are inserted into the
5455 * allocq of its CPU's pair.
5456 * 4) When doing IO,
5457 * mptsas_scsi_start()
5458 * {
5459 * cpuid = the cpu NO of the cpu where this thread is running on
5460 * retry:
5461 * mutex_enter(&allocq[cpuid]);
5462 * if (get free slot = success) {
5463 * remove the slot from the allocq
5464 * mutex_exit(&allocq[cpuid]);
5465 * return(success);
5466 * } else { // exchange allocq and releaseq and try again
5467 * mutex_enter(&releq[cpuid]);
5468 * exchange the allocq and releaseq of this pair;
5469 * mutex_exit(&releq[cpuid]);
5470 * if (try to get free slot again = success) {
5471 * remove the slot from the allocq
5472 * mutex_exit(&allocq[cpuid]);
5473 * return(success);
5474 * } else {
5475 * MOD(cpuid)++;
5476 * goto retry;
5477 * if (all CPU groups tried)
5478 * mutex_exit(&allocq[cpuid]);
5479 * return(failure);
5480 * }
5481 * }
5482 * }
5483 * ISR()
5484 * {
5485 * cpuid = the CPU group id where the slot sending the
5486 * cmd belongs;
5487 * mutex_enter(&releq[cpuid]);
5488 * remove the slot from the releaseq
5489 * mutex_exit(&releq[cpuid]);
5490 * }
5491 * This way, only when the queue pair doing exchange have mutex
5492 * contentions.
5493 *
5494 * For mutex m_intr_mutex and m_tgt_intr_mutex, there are 2 scenarios:
5495 *
5496 * a)If the elements are only checked but not modified in the ISR, then
5497 * only the places where those elements are modifed(outside of ISR)
5498 * need to be protected by the new introduced mutex.
5499 * For example, data A is only read/checked in ISR, then we need do
5500 * like this:
5501 * In ISR:
5502 * {
5503 * mutex_enter(&new_mutex);
5504 * read(A);
5505 * mutex_exit(&new_mutex);
5506 * //the new_mutex here is either the m_tgt_intr_mutex or
5507 * //the m_intr_mutex.
5508 * }
5509 * In non-ISR
5510 * {
5511 * mutex_enter(&m_mutex); //the stock driver already did this
5512 * mutex_enter(&new_mutex);
5513 * write(A);
5514 * mutex_exit(&new_mutex);
5515 * mutex_exit(&m_mutex); //the stock driver already did this
5516 *
5517 * read(A);
5518 * // read(A) in non-ISR is not required to be protected by new
5519 * // mutex since 'A' has already been protected by m_mutex
5520 * // outside of the ISR
5521 * }
5522 *
5523 * Those fields in mptsas_target_t/ptgt which are only read in ISR
5524 * fall into this catergory. So they, together with the fields which
5525 * are never read in ISR, are not necessary to be protected by
5526 * m_tgt_intr_mutex, don't bother.
5527 * checking of m_waitq also falls into this catergory. so all of the
5528 * place outside of ISR where the m_waitq is modified, such as in
5529 * mptsas_waitq_add(), mptsas_waitq_delete(), mptsas_waitq_rm(),
5530 * m_intr_mutex should be used.
5531 *
5532 * b)If the elements are modified in the ISR, then each place where
5533 * those elements are referred(outside of ISR) need to be protected
5534 * by the new introduced mutex. Of course, if those elements only
5535 * appear in the non-key code path, that is, they don't affect
5536 * performance, then the m_mutex can still be used as before.
5537 * For example, data B is modified in key code path in ISR, and data C
5538 * is modified in non-key code path in ISR, then we can do like this:
5539 * In ISR:
5540 * {
5541 * mutex_enter(&new_mutex);
5542 * wirte(B);
5543 * mutex_exit(&new_mutex);
5544 * if (seldom happen) {
5545 * mutex_enter(&m_mutex);
5546 * write(C);
5547 * mutex_exit(&m_mutex);
5548 * }
5549 * //the new_mutex here is either the m_tgt_intr_mutex or
5550 * //the m_intr_mutex.
5551 * }
5552 * In non-ISR
5553 * {
5554 * mutex_enter(&new_mutex);
5555 * write(B);
5556 * mutex_exit(&new_mutex);
5557 *
5558 * mutex_enter(&new_mutex);
5559 * read(B);
5560 * mutex_exit(&new_mutex);
5561 * // both write(B) and read(B) in non-ISR is required to be
5562 * // protected by new mutex outside of the ISR
5563 *
5564 * mutex_enter(&m_mutex); //the stock driver already did this
5565 * read(C);
5566 * write(C);
5567 * mutex_exit(&m_mutex); //the stock driver already did this
5568 * // both write(C) and read(C) in non-ISR have been already
5569 * // been protected by m_mutex outside of the ISR
5570 * }
5571 *
5572 * For example, ptgt->m_t_ncmds fall into 'B' of this catergory, and
5573 * elements shown in address reply, restart_hba, passthrough, IOC
5574 * fall into 'C' of this catergory.
5575 *
5576 * In any case where mutexs are nested, make sure in the following
5577 * order:
5578 * m_mutex -> m_intr_mutex -> m_tgt_intr_mutex
5579 * m_intr_mutex -> m_tgt_intr_mutex
5580 * m_mutex -> m_intr_mutex
5581 * m_mutex -> m_tgt_intr_mutex
5582 *
5583 * 2.
5584 * Make sure at any time, getting the ReplyDescriptor by m_post_index
5585 * and setting m_post_index to the ReplyDescriptorIndex register are
5586 * atomic. Since m_mutex is not used for this purpose in ISR, the new
5587 * mutex m_intr_mutex must play this role. So mptsas_poll(), where this
5588 * kind of getting/setting is also performed, must use m_intr_mutex.
5589 * Note, since context reply in ISR/process_intr is the only code path
5590 * which affect performance, a fast path is introduced to only handle
5591 * the read/write IO having context reply. For other IOs such as
5592 * passthrough and IOC with context reply and all address reply, we
5593 * use the as-is process_intr() to handle them. In order to keep the
5594 * same semantics in process_intr(), make sure any new mutex is not held
5595 * before enterring it.
5596 */
5597
5598 mutex_enter(&mpt->m_intr_mutex);
5599
5600 /*
5601 * If interrupts are shared by two channels then check whether this
5602 * interrupt is genuinely for this channel by making sure first the
5603 * chip is in high power state.
5604 */
5605 if ((mpt->m_options & MPTSAS_OPT_PM) &&
5606 (mpt->m_power_level != PM_LEVEL_D0)) {
5607 mutex_exit(&mpt->m_intr_mutex);
5608 return (DDI_INTR_UNCLAIMED);
5609 }
5610
5611 /*
5612 * If polling, interrupt was triggered by some shared interrupt because
5613 * IOC interrupts are disabled during polling, so polling routine will
5614 * handle any replies. Considering this, if polling is happening,
5615 * return with interrupt unclaimed.
5616 */
5617 if (mpt->m_polled_intr) {
5618 mutex_exit(&mpt->m_intr_mutex);
5619 mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt");
5620 return (DDI_INTR_UNCLAIMED);
5621 }
5622
5623 /*
5624 * Read the istat register.
5625 */
5626 if ((INTPENDING(mpt)) != 0) {
5627 /*
5628 * read fifo until empty.
5629 */
5630 #ifndef __lock_lint
5631 _NOTE(CONSTCOND)
5632 #endif
5633 while (TRUE) {
5634 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5635 DDI_DMA_SYNC_FORCPU);
5636 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5637 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5638
5639 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5640 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5641 ddi_get32(mpt->m_acc_post_queue_hdl,
5642 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5643 break;
5644 }
5645
5646 /*
5647 * The reply is valid, process it according to its
5648 * type. Also, set a flag for updating the reply index
5649 * after they've all been processed.
5650 */
5651 did_reply = TRUE;
5652
5653 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
5654 &reply_desc_union->Default.ReplyFlags);
5655 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
5656 mpt->m_reply[i].Default.ReplyFlags = reply_type;
5657 if (reply_type ==
5658 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
5659 SMID = ddi_get16(mpt->m_acc_post_queue_hdl,
5660 &reply_desc_union->SCSIIOSuccess.SMID);
5661 if (mptsas_handle_io_fastpath(mpt, SMID) !=
5662 TRUE) {
5663 mpt->m_reply[i].SCSIIOSuccess.SMID =
5664 SMID;
5665 i++;
5666 }
5667 } else if (reply_type ==
5668 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
5669 mpt->m_reply[i].AddressReply.ReplyFrameAddress =
5670 ddi_get32(mpt->m_acc_post_queue_hdl,
5671 &reply_desc_union->AddressReply.
5672 ReplyFrameAddress);
5673 mpt->m_reply[i].AddressReply.SMID =
5674 ddi_get16(mpt->m_acc_post_queue_hdl,
5675 &reply_desc_union->AddressReply.SMID);
5676 i++;
5677 }
5678 /*
5679 * Clear the reply descriptor for re-use and increment
5680 * index.
5681 */
5682 ddi_put64(mpt->m_acc_post_queue_hdl,
5683 &((uint64_t *)(void *)mpt->m_post_queue)
5684 [mpt->m_post_index], 0xFFFFFFFFFFFFFFFF);
5685 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5686 DDI_DMA_SYNC_FORDEV);
5687
5688 /*
5689 * Increment post index and roll over if needed.
5690 */
5691 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5692 mpt->m_post_index = 0;
5693 }
5694 if (i >= MPI_ADDRESS_COALSCE_MAX)
5695 break;
5696 }
5697
5698 /*
5699 * Update the global reply index if at least one reply was
5700 * processed.
5701 */
5702 if (did_reply) {
5703 ddi_put32(mpt->m_datap,
5704 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5705
5706 /*
5707 * For fma, only check the PIO is required and enough
5708 * here. Those cases where fastpath is not hit, the
5709 * mptsas_fma_check() check all of the types of
5710 * fma. That is not necessary and sometimes not
5711 * correct. fma check should only be done after
5712 * the PIO and/or dma is performed.
5713 */
5714 if ((mptsas_check_acc_handle(mpt->m_datap) !=
5715 DDI_SUCCESS)) {
5716 ddi_fm_service_impact(mpt->m_dip,
5717 DDI_SERVICE_UNAFFECTED);
5718 }
5719
5720 }
5721 } else {
5722 mutex_exit(&mpt->m_intr_mutex);
5723 return (DDI_INTR_UNCLAIMED);
5724 }
5725 NDBG1(("mptsas_intr complete"));
5726 mutex_exit(&mpt->m_intr_mutex);
5727
5728 /*
5729 * Since most of the cmds(read and write IO with success return.)
5730 * have already been processed in fast path in which the m_mutex
5731 * is not held, handling here the address reply and other context reply
5732 * such as passthrough and IOC cmd with m_mutex held should be a big
5733 * issue for performance.
5734 * If holding m_mutex to process these cmds was still an obvious issue,
5735 * we can process them in a taskq.
5736 */
5737 for (j = 0; j < i; j++) {
5738 mutex_enter(&mpt->m_mutex);
5739 mptsas_process_intr(mpt, &mpt->m_reply[j]);
5740 mutex_exit(&mpt->m_mutex);
5741 }
5742
5743 /*
5744 * If no helper threads are created, process the doneq in ISR. If
5745 * helpers are created, use the doneq length as a metric to measure the
5746 * load on the interrupt CPU. If it is long enough, which indicates the
5747 * load is heavy, then we deliver the IO completions to the helpers.
5748 * This measurement has some limitations, although it is simple and
5749 * straightforward and works well for most of the cases at present.
5750 */
5751 if (!mpt->m_doneq_thread_n) {
5752 mptsas_doneq_empty(mpt);
5753 } else {
5754 int helper = 1;
5755 mutex_enter(&mpt->m_intr_mutex);
5756 if (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)
5757 helper = 0;
5758 mutex_exit(&mpt->m_intr_mutex);
5759 if (helper) {
5760 mptsas_deliver_doneq_thread(mpt);
5761 } else {
5762 mptsas_doneq_empty(mpt);
5763 }
5764 }
5765
5766 /*
5767 * If there are queued cmd, start them now.
5768 */
5769 mutex_enter(&mpt->m_intr_mutex);
5770 if (mpt->m_waitq != NULL) {
5771 mutex_exit(&mpt->m_intr_mutex);
5772 mutex_enter(&mpt->m_mutex);
5773 mptsas_restart_hba(mpt);
5774 mutex_exit(&mpt->m_mutex);
5775 return (DDI_INTR_CLAIMED);
5776 }
5777 mutex_exit(&mpt->m_intr_mutex);
5778 return (DDI_INTR_CLAIMED);
5779 }
5780
5781 /*
5782 * In ISR, the successfully completed read and write IO are processed in a
5783 * fast path. This function is only used to handle non-fastpath IO, including
5784 * all of the address reply, and the context reply for IOC cmd, passthrough,
5785 * etc.
5786 * This function is also used to process polled cmd.
5787 */
5788 static void
5789 mptsas_process_intr(mptsas_t *mpt,
5790 pMpi2ReplyDescriptorsUnion_t reply_desc_union)
5791 {
5792 uint8_t reply_type;
5793
5794 /*
5795 * The reply is valid, process it according to its
5796 * type. Also, set a flag for updated the reply index
5797 * after they've all been processed.
5798 */
5799 reply_type = reply_desc_union->Default.ReplyFlags;
5800 if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
5801 mptsas_handle_scsi_io_success(mpt, reply_desc_union);
5802 } else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
5803 mptsas_handle_address_reply(mpt, reply_desc_union);
5804 } else {
5805 mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
5806 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5807 }
5808 }
5809
5810 /*
5811 * handle qfull condition
5812 */
5813 static void
5814 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
5815 {
5816 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5817
5818 if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
5819 (ptgt->m_qfull_retries == 0)) {
5820 /*
5821 * We have exhausted the retries on QFULL, or,
5822 * the target driver has indicated that it
5823 * wants to handle QFULL itself by setting
5824 * qfull-retries capability to 0. In either case
5825 * we want the target driver's QFULL handling
5826 * to kick in. We do this by having pkt_reason
5827 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
5828 */
5829 mutex_enter(&ptgt->m_tgt_intr_mutex);
5830 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5831 mutex_exit(&ptgt->m_tgt_intr_mutex);
5832 } else {
5833 mutex_enter(&ptgt->m_tgt_intr_mutex);
5834 if (ptgt->m_reset_delay == 0) {
5835 ptgt->m_t_throttle =
5836 max((ptgt->m_t_ncmds - 2), 0);
5837 }
5838 mutex_exit(&ptgt->m_tgt_intr_mutex);
5839
5840 cmd->cmd_pkt_flags |= FLAG_HEAD;
5841 cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
5842 cmd->cmd_flags |= CFLAG_RETRY;
5843
5844 mutex_exit(&mpt->m_mutex);
5845 (void) mptsas_accept_pkt(mpt, cmd);
5846 mutex_enter(&mpt->m_mutex);
5847
5848 /*
5849 * when target gives queue full status with no commands
5850 * outstanding (m_t_ncmds == 0), throttle is set to 0
5851 * (HOLD_THROTTLE), and the queue full handling start
5852 * (see psarc/1994/313); if there are commands outstanding,
5853 * throttle is set to (m_t_ncmds - 2)
5854 */
5855 mutex_enter(&ptgt->m_tgt_intr_mutex);
5856 if (ptgt->m_t_throttle == HOLD_THROTTLE) {
5857 /*
5858 * By setting throttle to QFULL_THROTTLE, we
5859 * avoid submitting new commands and in
5860 * mptsas_restart_cmd find out slots which need
5861 * their throttles to be cleared.
5862 */
5863 mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
5864 if (mpt->m_restart_cmd_timeid == 0) {
5865 mpt->m_restart_cmd_timeid =
5866 timeout(mptsas_restart_cmd, mpt,
5867 ptgt->m_qfull_retry_interval);
5868 }
5869 }
5870 mutex_exit(&ptgt->m_tgt_intr_mutex);
5871 }
5872 }
5873
5874 mptsas_phymask_t
5875 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
5876 {
5877 mptsas_phymask_t phy_mask = 0;
5878 uint8_t i = 0;
5879
5880 NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
5881
5882 ASSERT(mutex_owned(&mpt->m_mutex));
5883
5884 /*
5885 * If physport is 0xFF, this is a RAID volume. Use phymask of 0.
5886 */
5887 if (physport == 0xFF) {
5888 return (0);
5889 }
5890
5891 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
5892 if (mpt->m_phy_info[i].attached_devhdl &&
5893 (mpt->m_phy_info[i].phy_mask != 0) &&
5894 (mpt->m_phy_info[i].port_num == physport)) {
5895 phy_mask = mpt->m_phy_info[i].phy_mask;
5896 break;
5897 }
5898 }
5899 NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
5900 mpt->m_instance, physport, phy_mask));
5901 return (phy_mask);
5902 }
5903
5904 /*
5905 * mpt free device handle after device gone, by use of passthrough
5906 */
5907 static int
5908 mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl)
5909 {
5910 Mpi2SasIoUnitControlRequest_t req;
5911 Mpi2SasIoUnitControlReply_t rep;
5912 int ret;
5913
5914 ASSERT(mutex_owned(&mpt->m_mutex));
5915
5916 /*
5917 * Need to compose a SAS IO Unit Control request message
5918 * and call mptsas_do_passthru() function
5919 */
5920 bzero(&req, sizeof (req));
5921 bzero(&rep, sizeof (rep));
5922
5923 req.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
5924 req.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
5925 req.DevHandle = LE_16(devhdl);
5926
5927 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
5928 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
5929 if (ret != 0) {
5930 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5931 "Control error %d", ret);
5932 return (DDI_FAILURE);
5933 }
5934
5935 /* do passthrough success, check the ioc status */
5936 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
5937 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5938 "Control IOCStatus %d", LE_16(rep.IOCStatus));
5939 return (DDI_FAILURE);
5940 }
5941
5942 return (DDI_SUCCESS);
5943 }
5944
5945 static void
5946 mptsas_update_phymask(mptsas_t *mpt)
5947 {
5948 mptsas_phymask_t mask = 0, phy_mask;
5949 char *phy_mask_name;
5950 uint8_t current_port;
5951 int i, j;
5952
5953 NDBG20(("mptsas%d update phymask ", mpt->m_instance));
5954
5955 ASSERT(mutex_owned(&mpt->m_mutex));
5956
5957 (void) mptsas_get_sas_io_unit_page(mpt);
5958
5959 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5960
5961 for (i = 0; i < mpt->m_num_phys; i++) {
5962 phy_mask = 0x00;
5963
5964 if (mpt->m_phy_info[i].attached_devhdl == 0)
5965 continue;
5966
5967 bzero(phy_mask_name, sizeof (phy_mask_name));
5968
5969 current_port = mpt->m_phy_info[i].port_num;
5970
5971 if ((mask & (1 << i)) != 0)
5972 continue;
5973
5974 for (j = 0; j < mpt->m_num_phys; j++) {
5975 if (mpt->m_phy_info[j].attached_devhdl &&
5976 (mpt->m_phy_info[j].port_num == current_port)) {
5977 phy_mask |= (1 << j);
5978 }
5979 }
5980 mask = mask | phy_mask;
5981
5982 for (j = 0; j < mpt->m_num_phys; j++) {
5983 if ((phy_mask >> j) & 0x01) {
5984 mpt->m_phy_info[j].phy_mask = phy_mask;
5985 }
5986 }
5987
5988 (void) sprintf(phy_mask_name, "%x", phy_mask);
5989
5990 mutex_exit(&mpt->m_mutex);
5991 /*
5992 * register a iport, if the port has already been existed
5993 * SCSA will do nothing and just return.
5994 */
5995 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
5996 mutex_enter(&mpt->m_mutex);
5997 }
5998 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5999 NDBG20(("mptsas%d update phymask return", mpt->m_instance));
6000 }
6001
6002 /*
6003 * mptsas_handle_dr is a task handler for DR, the DR action includes:
6004 * 1. Directly attched Device Added/Removed.
6005 * 2. Expander Device Added/Removed.
6006 * 3. Indirectly Attached Device Added/Expander.
6007 * 4. LUNs of a existing device status change.
6008 * 5. RAID volume created/deleted.
6009 * 6. Member of RAID volume is released because of RAID deletion.
6010 * 7. Physical disks are removed because of RAID creation.
6011 */
6012 static void
6013 mptsas_handle_dr(void *args) {
6014 mptsas_topo_change_list_t *topo_node = NULL;
6015 mptsas_topo_change_list_t *save_node = NULL;
6016 mptsas_t *mpt;
6017 dev_info_t *parent = NULL;
6018 mptsas_phymask_t phymask = 0;
6019 char *phy_mask_name;
6020 uint8_t flags = 0, physport = 0xff;
6021 uint8_t port_update = 0;
6022 uint_t event;
6023
6024 topo_node = (mptsas_topo_change_list_t *)args;
6025
6026 mpt = topo_node->mpt;
6027 event = topo_node->event;
6028 flags = topo_node->flags;
6029
6030 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6031
6032 NDBG20(("mptsas%d handle_dr enter", mpt->m_instance));
6033
6034 switch (event) {
6035 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6036 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6037 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE) ||
6038 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6039 /*
6040 * Direct attached or expander attached device added
6041 * into system or a Phys Disk that is being unhidden.
6042 */
6043 port_update = 1;
6044 }
6045 break;
6046 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6047 /*
6048 * New expander added into system, it must be the head
6049 * of topo_change_list_t
6050 */
6051 port_update = 1;
6052 break;
6053 default:
6054 port_update = 0;
6055 break;
6056 }
6057 /*
6058 * All cases port_update == 1 may cause initiator port form change
6059 */
6060 mutex_enter(&mpt->m_mutex);
6061 if (mpt->m_port_chng && port_update) {
6062 /*
6063 * mpt->m_port_chng flag indicates some PHYs of initiator
6064 * port have changed to online. So when expander added or
6065 * directly attached device online event come, we force to
6066 * update port information by issueing SAS IO Unit Page and
6067 * update PHYMASKs.
6068 */
6069 (void) mptsas_update_phymask(mpt);
6070 mpt->m_port_chng = 0;
6071
6072 }
6073 mutex_exit(&mpt->m_mutex);
6074 while (topo_node) {
6075 phymask = 0;
6076 if (parent == NULL) {
6077 physport = topo_node->un.physport;
6078 event = topo_node->event;
6079 flags = topo_node->flags;
6080 if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET |
6081 MPTSAS_DR_EVENT_OFFLINE_SMP)) {
6082 /*
6083 * For all offline events, phymask is known
6084 */
6085 phymask = topo_node->un.phymask;
6086 goto find_parent;
6087 }
6088 if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6089 goto handle_topo_change;
6090 }
6091 if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) {
6092 phymask = topo_node->un.phymask;
6093 goto find_parent;
6094 }
6095
6096 if ((flags ==
6097 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) &&
6098 (event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) {
6099 /*
6100 * There is no any field in IR_CONFIG_CHANGE
6101 * event indicate physport/phynum, let's get
6102 * parent after SAS Device Page0 request.
6103 */
6104 goto handle_topo_change;
6105 }
6106
6107 mutex_enter(&mpt->m_mutex);
6108 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6109 /*
6110 * If the direct attached device added or a
6111 * phys disk is being unhidden, argument
6112 * physport actually is PHY#, so we have to get
6113 * phymask according PHY#.
6114 */
6115 physport = mpt->m_phy_info[physport].port_num;
6116 }
6117
6118 /*
6119 * Translate physport to phymask so that we can search
6120 * parent dip.
6121 */
6122 phymask = mptsas_physport_to_phymask(mpt,
6123 physport);
6124 mutex_exit(&mpt->m_mutex);
6125
6126 find_parent:
6127 bzero(phy_mask_name, MPTSAS_MAX_PHYS);
6128 /*
6129 * For RAID topology change node, write the iport name
6130 * as v0.
6131 */
6132 if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6133 (void) sprintf(phy_mask_name, "v0");
6134 } else {
6135 /*
6136 * phymask can bo 0 if the drive has been
6137 * pulled by the time an add event is
6138 * processed. If phymask is 0, just skip this
6139 * event and continue.
6140 */
6141 if (phymask == 0) {
6142 mutex_enter(&mpt->m_mutex);
6143 save_node = topo_node;
6144 topo_node = topo_node->next;
6145 ASSERT(save_node);
6146 kmem_free(save_node,
6147 sizeof (mptsas_topo_change_list_t));
6148 mutex_exit(&mpt->m_mutex);
6149
6150 parent = NULL;
6151 continue;
6152 }
6153 (void) sprintf(phy_mask_name, "%x", phymask);
6154 }
6155 parent = scsi_hba_iport_find(mpt->m_dip,
6156 phy_mask_name);
6157 if (parent == NULL) {
6158 mptsas_log(mpt, CE_WARN, "Failed to find an "
6159 "iport, should not happen!");
6160 goto out;
6161 }
6162
6163 }
6164 ASSERT(parent);
6165 handle_topo_change:
6166
6167 mutex_enter(&mpt->m_mutex);
6168
6169 mptsas_handle_topo_change(topo_node, parent);
6170 save_node = topo_node;
6171 topo_node = topo_node->next;
6172 ASSERT(save_node);
6173 kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
6174 mutex_exit(&mpt->m_mutex);
6175
6176 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6177 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
6178 (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
6179 /*
6180 * If direct attached device associated, make sure
6181 * reset the parent before start the next one. But
6182 * all devices associated with expander shares the
6183 * parent. Also, reset parent if this is for RAID.
6184 */
6185 parent = NULL;
6186 }
6187 }
6188 out:
6189 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6190 }
6191
6192 static void
6193 mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
6194 dev_info_t *parent)
6195 {
6196 mptsas_target_t *ptgt = NULL;
6197 mptsas_smp_t *psmp = NULL;
6198 mptsas_t *mpt = (void *)topo_node->mpt;
6199 uint16_t devhdl;
6200 uint16_t attached_devhdl;
6201 uint64_t sas_wwn = 0;
6202 int rval = 0;
6203 uint32_t page_address;
6204 uint8_t phy, flags;
6205 char *addr = NULL;
6206 dev_info_t *lundip;
6207 int circ = 0, circ1 = 0;
6208 char attached_wwnstr[MPTSAS_WWN_STRLEN];
6209
6210 NDBG20(("mptsas%d handle_topo_change enter", mpt->m_instance));
6211
6212 ASSERT(mutex_owned(&mpt->m_mutex));
6213
6214 switch (topo_node->event) {
6215 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6216 {
6217 char *phy_mask_name;
6218 mptsas_phymask_t phymask = 0;
6219
6220 if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6221 /*
6222 * Get latest RAID info.
6223 */
6224 (void) mptsas_get_raid_info(mpt);
6225 ptgt = mptsas_search_by_devhdl(
6226 &mpt->m_active->m_tgttbl, topo_node->devhdl);
6227 if (ptgt == NULL)
6228 break;
6229 } else {
6230 ptgt = (void *)topo_node->object;
6231 }
6232
6233 if (ptgt == NULL) {
6234 /*
6235 * If a Phys Disk was deleted, RAID info needs to be
6236 * updated to reflect the new topology.
6237 */
6238 (void) mptsas_get_raid_info(mpt);
6239
6240 /*
6241 * Get sas device page 0 by DevHandle to make sure if
6242 * SSP/SATA end device exist.
6243 */
6244 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
6245 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
6246 topo_node->devhdl;
6247
6248 rval = mptsas_get_target_device_info(mpt, page_address,
6249 &devhdl, &ptgt);
6250 if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
6251 mptsas_log(mpt, CE_NOTE,
6252 "mptsas_handle_topo_change: target %d is "
6253 "not a SAS/SATA device. \n",
6254 topo_node->devhdl);
6255 } else if (rval == DEV_INFO_FAIL_ALLOC) {
6256 mptsas_log(mpt, CE_NOTE,
6257 "mptsas_handle_topo_change: could not "
6258 "allocate memory. \n");
6259 }
6260 /*
6261 * If rval is DEV_INFO_PHYS_DISK than there is nothing
6262 * else to do, just leave.
6263 */
6264 if (rval != DEV_INFO_SUCCESS) {
6265 return;
6266 }
6267 }
6268
6269 ASSERT(ptgt->m_devhdl == topo_node->devhdl);
6270
6271 mutex_exit(&mpt->m_mutex);
6272 flags = topo_node->flags;
6273
6274 if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
6275 phymask = ptgt->m_phymask;
6276 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6277 (void) sprintf(phy_mask_name, "%x", phymask);
6278 parent = scsi_hba_iport_find(mpt->m_dip,
6279 phy_mask_name);
6280 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6281 if (parent == NULL) {
6282 mptsas_log(mpt, CE_WARN, "Failed to find a "
6283 "iport for PD, should not happen!");
6284 mutex_enter(&mpt->m_mutex);
6285 break;
6286 }
6287 }
6288
6289 if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6290 ndi_devi_enter(parent, &circ1);
6291 (void) mptsas_config_raid(parent, topo_node->devhdl,
6292 &lundip);
6293 ndi_devi_exit(parent, circ1);
6294 } else {
6295 /*
6296 * hold nexus for bus configure
6297 */
6298 ndi_devi_enter(scsi_vhci_dip, &circ);
6299 ndi_devi_enter(parent, &circ1);
6300 rval = mptsas_config_target(parent, ptgt);
6301 /*
6302 * release nexus for bus configure
6303 */
6304 ndi_devi_exit(parent, circ1);
6305 ndi_devi_exit(scsi_vhci_dip, circ);
6306
6307 /*
6308 * Add parent's props for SMHBA support
6309 */
6310 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6311 bzero(attached_wwnstr,
6312 sizeof (attached_wwnstr));
6313 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
6314 ptgt->m_sas_wwn);
6315 if (ddi_prop_update_string(DDI_DEV_T_NONE,
6316 parent,
6317 SCSI_ADDR_PROP_ATTACHED_PORT,
6318 attached_wwnstr)
6319 != DDI_PROP_SUCCESS) {
6320 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6321 parent,
6322 SCSI_ADDR_PROP_ATTACHED_PORT);
6323 mptsas_log(mpt, CE_WARN, "Failed to"
6324 "attached-port props");
6325 return;
6326 }
6327 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6328 MPTSAS_NUM_PHYS, 1) !=
6329 DDI_PROP_SUCCESS) {
6330 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6331 parent, MPTSAS_NUM_PHYS);
6332 mptsas_log(mpt, CE_WARN, "Failed to"
6333 " create num-phys props");
6334 return;
6335 }
6336
6337 /*
6338 * Update PHY info for smhba
6339 */
6340 mutex_enter(&mpt->m_mutex);
6341 if (mptsas_smhba_phy_init(mpt)) {
6342 mutex_exit(&mpt->m_mutex);
6343 mptsas_log(mpt, CE_WARN, "mptsas phy"
6344 " update failed");
6345 return;
6346 }
6347 mutex_exit(&mpt->m_mutex);
6348 mptsas_smhba_set_phy_props(mpt,
6349 ddi_get_name_addr(parent), parent,
6350 1, &attached_devhdl);
6351 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6352 MPTSAS_VIRTUAL_PORT, 0) !=
6353 DDI_PROP_SUCCESS) {
6354 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6355 parent, MPTSAS_VIRTUAL_PORT);
6356 mptsas_log(mpt, CE_WARN,
6357 "mptsas virtual-port"
6358 "port prop update failed");
6359 return;
6360 }
6361 }
6362 }
6363 mutex_enter(&mpt->m_mutex);
6364
6365 NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
6366 "phymask:%x.", mpt->m_instance, ptgt->m_devhdl,
6367 ptgt->m_phymask));
6368 break;
6369 }
6370 case MPTSAS_DR_EVENT_OFFLINE_TARGET:
6371 {
6372 mptsas_hash_table_t *tgttbl = &mpt->m_active->m_tgttbl;
6373 devhdl = topo_node->devhdl;
6374 ptgt = mptsas_search_by_devhdl(tgttbl, devhdl);
6375 if (ptgt == NULL)
6376 break;
6377
6378 sas_wwn = ptgt->m_sas_wwn;
6379 phy = ptgt->m_phynum;
6380
6381 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
6382
6383 if (sas_wwn) {
6384 (void) sprintf(addr, "w%016"PRIx64, sas_wwn);
6385 } else {
6386 (void) sprintf(addr, "p%x", phy);
6387 }
6388 ASSERT(ptgt->m_devhdl == devhdl);
6389
6390 if ((topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) ||
6391 (topo_node->flags ==
6392 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6393 /*
6394 * Get latest RAID info if RAID volume status changes
6395 * or Phys Disk status changes
6396 */
6397 (void) mptsas_get_raid_info(mpt);
6398 }
6399 /*
6400 * Abort all outstanding command on the device
6401 */
6402 rval = mptsas_do_scsi_reset(mpt, devhdl);
6403 if (rval) {
6404 NDBG20(("mptsas%d handle_topo_change to reset target "
6405 "before offline devhdl:%x, phymask:%x, rval:%x",
6406 mpt->m_instance, ptgt->m_devhdl, ptgt->m_phymask,
6407 rval));
6408 }
6409
6410 mutex_exit(&mpt->m_mutex);
6411
6412 ndi_devi_enter(scsi_vhci_dip, &circ);
6413 ndi_devi_enter(parent, &circ1);
6414 rval = mptsas_offline_target(parent, addr);
6415 ndi_devi_exit(parent, circ1);
6416 ndi_devi_exit(scsi_vhci_dip, circ);
6417 NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
6418 "phymask:%x, rval:%x", mpt->m_instance,
6419 ptgt->m_devhdl, ptgt->m_phymask, rval));
6420
6421 kmem_free(addr, SCSI_MAXNAMELEN);
6422
6423 /*
6424 * Clear parent's props for SMHBA support
6425 */
6426 flags = topo_node->flags;
6427 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6428 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6429 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6430 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6431 DDI_PROP_SUCCESS) {
6432 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6433 SCSI_ADDR_PROP_ATTACHED_PORT);
6434 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6435 "prop update failed");
6436 break;
6437 }
6438 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6439 MPTSAS_NUM_PHYS, 0) !=
6440 DDI_PROP_SUCCESS) {
6441 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6442 MPTSAS_NUM_PHYS);
6443 mptsas_log(mpt, CE_WARN, "mptsas num phys "
6444 "prop update failed");
6445 break;
6446 }
6447 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6448 MPTSAS_VIRTUAL_PORT, 1) !=
6449 DDI_PROP_SUCCESS) {
6450 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6451 MPTSAS_VIRTUAL_PORT);
6452 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6453 "prop update failed");
6454 break;
6455 }
6456 }
6457
6458 mutex_enter(&mpt->m_mutex);
6459 ptgt->m_led_status = 0;
6460 if (mptsas_flush_led_status(mpt, ptgt) != DDI_SUCCESS) {
6461 NDBG14(("mptsas: clear LED for tgt %x failed",
6462 ptgt->m_slot_num));
6463 }
6464 if (rval == DDI_SUCCESS) {
6465 mptsas_tgt_free(&mpt->m_active->m_tgttbl,
6466 ptgt->m_sas_wwn, ptgt->m_phymask);
6467 ptgt = NULL;
6468 } else {
6469 /*
6470 * clean DR_INTRANSITION flag to allow I/O down to
6471 * PHCI driver since failover finished.
6472 * Invalidate the devhdl
6473 */
6474 mutex_enter(&ptgt->m_tgt_intr_mutex);
6475 ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6476 ptgt->m_tgt_unconfigured = 0;
6477 ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6478 mutex_exit(&ptgt->m_tgt_intr_mutex);
6479 }
6480
6481 /*
6482 * Send SAS IO Unit Control to free the dev handle
6483 */
6484 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6485 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
6486 rval = mptsas_free_devhdl(mpt, devhdl);
6487
6488 NDBG20(("mptsas%d handle_topo_change to remove "
6489 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6490 rval));
6491 }
6492
6493 break;
6494 }
6495 case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
6496 {
6497 devhdl = topo_node->devhdl;
6498 /*
6499 * If this is the remove handle event, do a reset first.
6500 */
6501 if (topo_node->event == MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6502 rval = mptsas_do_scsi_reset(mpt, devhdl);
6503 if (rval) {
6504 NDBG20(("mpt%d reset target before remove "
6505 "devhdl:%x, rval:%x", mpt->m_instance,
6506 devhdl, rval));
6507 }
6508 }
6509
6510 /*
6511 * Send SAS IO Unit Control to free the dev handle
6512 */
6513 rval = mptsas_free_devhdl(mpt, devhdl);
6514 NDBG20(("mptsas%d handle_topo_change to remove "
6515 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6516 rval));
6517 break;
6518 }
6519 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6520 {
6521 mptsas_smp_t smp;
6522 dev_info_t *smpdip;
6523 mptsas_hash_table_t *smptbl = &mpt->m_active->m_smptbl;
6524
6525 devhdl = topo_node->devhdl;
6526
6527 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
6528 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)devhdl;
6529 rval = mptsas_get_sas_expander_page0(mpt, page_address, &smp);
6530 if (rval != DDI_SUCCESS) {
6531 mptsas_log(mpt, CE_WARN, "failed to online smp, "
6532 "handle %x", devhdl);
6533 return;
6534 }
6535
6536 psmp = mptsas_smp_alloc(smptbl, &smp);
6537 if (psmp == NULL) {
6538 return;
6539 }
6540
6541 mutex_exit(&mpt->m_mutex);
6542 ndi_devi_enter(parent, &circ1);
6543 (void) mptsas_online_smp(parent, psmp, &smpdip);
6544 ndi_devi_exit(parent, circ1);
6545
6546 mutex_enter(&mpt->m_mutex);
6547 break;
6548 }
6549 case MPTSAS_DR_EVENT_OFFLINE_SMP:
6550 {
6551 mptsas_hash_table_t *smptbl = &mpt->m_active->m_smptbl;
6552 devhdl = topo_node->devhdl;
6553 uint32_t dev_info;
6554
6555 psmp = mptsas_search_by_devhdl(smptbl, devhdl);
6556 if (psmp == NULL)
6557 break;
6558 /*
6559 * The mptsas_smp_t data is released only if the dip is offlined
6560 * successfully.
6561 */
6562 mutex_exit(&mpt->m_mutex);
6563
6564 ndi_devi_enter(parent, &circ1);
6565 rval = mptsas_offline_smp(parent, psmp, NDI_DEVI_REMOVE);
6566 ndi_devi_exit(parent, circ1);
6567
6568 dev_info = psmp->m_deviceinfo;
6569 if ((dev_info & DEVINFO_DIRECT_ATTACHED) ==
6570 DEVINFO_DIRECT_ATTACHED) {
6571 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6572 MPTSAS_VIRTUAL_PORT, 1) !=
6573 DDI_PROP_SUCCESS) {
6574 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6575 MPTSAS_VIRTUAL_PORT);
6576 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6577 "prop update failed");
6578 return;
6579 }
6580 /*
6581 * Check whether the smp connected to the iport,
6582 */
6583 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6584 MPTSAS_NUM_PHYS, 0) !=
6585 DDI_PROP_SUCCESS) {
6586 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6587 MPTSAS_NUM_PHYS);
6588 mptsas_log(mpt, CE_WARN, "mptsas num phys"
6589 "prop update failed");
6590 return;
6591 }
6592 /*
6593 * Clear parent's attached-port props
6594 */
6595 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6596 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6597 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6598 DDI_PROP_SUCCESS) {
6599 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6600 SCSI_ADDR_PROP_ATTACHED_PORT);
6601 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6602 "prop update failed");
6603 return;
6604 }
6605 }
6606
6607 mutex_enter(&mpt->m_mutex);
6608 NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
6609 "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
6610 if (rval == DDI_SUCCESS) {
6611 mptsas_smp_free(smptbl, psmp->m_sasaddr,
6612 psmp->m_phymask);
6613 } else {
6614 psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
6615 }
6616
6617 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6618
6619 break;
6620 }
6621 default:
6622 return;
6623 }
6624 }
6625
6626 /*
6627 * Record the event if its type is enabled in mpt instance by ioctl.
6628 */
6629 static void
6630 mptsas_record_event(void *args)
6631 {
6632 m_replyh_arg_t *replyh_arg;
6633 pMpi2EventNotificationReply_t eventreply;
6634 uint32_t event, rfm;
6635 mptsas_t *mpt;
6636 int i, j;
6637 uint16_t event_data_len;
6638 boolean_t sendAEN = FALSE;
6639
6640 replyh_arg = (m_replyh_arg_t *)args;
6641 rfm = replyh_arg->rfm;
6642 mpt = replyh_arg->mpt;
6643
6644 eventreply = (pMpi2EventNotificationReply_t)
6645 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6646 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6647
6648
6649 /*
6650 * Generate a system event to let anyone who cares know that a
6651 * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the
6652 * event mask is set to.
6653 */
6654 if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
6655 sendAEN = TRUE;
6656 }
6657
6658 /*
6659 * Record the event only if it is not masked. Determine which dword
6660 * and bit of event mask to test.
6661 */
6662 i = (uint8_t)(event / 32);
6663 j = (uint8_t)(event % 32);
6664 if ((i < 4) && ((1 << j) & mpt->m_event_mask[i])) {
6665 i = mpt->m_event_index;
6666 mpt->m_events[i].Type = event;
6667 mpt->m_events[i].Number = ++mpt->m_event_number;
6668 bzero(mpt->m_events[i].Data, MPTSAS_MAX_EVENT_DATA_LENGTH * 4);
6669 event_data_len = ddi_get16(mpt->m_acc_reply_frame_hdl,
6670 &eventreply->EventDataLength);
6671
6672 if (event_data_len > 0) {
6673 /*
6674 * Limit data to size in m_event entry
6675 */
6676 if (event_data_len > MPTSAS_MAX_EVENT_DATA_LENGTH) {
6677 event_data_len = MPTSAS_MAX_EVENT_DATA_LENGTH;
6678 }
6679 for (j = 0; j < event_data_len; j++) {
6680 mpt->m_events[i].Data[j] =
6681 ddi_get32(mpt->m_acc_reply_frame_hdl,
6682 &(eventreply->EventData[j]));
6683 }
6684
6685 /*
6686 * check for index wrap-around
6687 */
6688 if (++i == MPTSAS_EVENT_QUEUE_SIZE) {
6689 i = 0;
6690 }
6691 mpt->m_event_index = (uint8_t)i;
6692
6693 /*
6694 * Set flag to send the event.
6695 */
6696 sendAEN = TRUE;
6697 }
6698 }
6699
6700 /*
6701 * Generate a system event if flag is set to let anyone who cares know
6702 * that an event has occurred.
6703 */
6704 if (sendAEN) {
6705 (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
6706 "SAS", NULL, NULL, DDI_NOSLEEP);
6707 }
6708 }
6709
6710 #define SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS
6711 /*
6712 * handle sync events from ioc in interrupt
6713 * return value:
6714 * DDI_SUCCESS: The event is handled by this func
6715 * DDI_FAILURE: Event is not handled
6716 */
6717 static int
6718 mptsas_handle_event_sync(void *args)
6719 {
6720 m_replyh_arg_t *replyh_arg;
6721 pMpi2EventNotificationReply_t eventreply;
6722 uint32_t event, rfm;
6723 mptsas_t *mpt;
6724 uint_t iocstatus;
6725
6726 replyh_arg = (m_replyh_arg_t *)args;
6727 rfm = replyh_arg->rfm;
6728 mpt = replyh_arg->mpt;
6729
6730 ASSERT(mutex_owned(&mpt->m_mutex));
6731
6732 eventreply = (pMpi2EventNotificationReply_t)
6733 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6734 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6735
6736 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
6737 &eventreply->IOCStatus)) {
6738 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
6739 mptsas_log(mpt, CE_WARN,
6740 "!mptsas_handle_event_sync: IOCStatus=0x%x, "
6741 "IOCLogInfo=0x%x", iocstatus,
6742 ddi_get32(mpt->m_acc_reply_frame_hdl,
6743 &eventreply->IOCLogInfo));
6744 } else {
6745 mptsas_log(mpt, CE_WARN,
6746 "mptsas_handle_event_sync: IOCStatus=0x%x, "
6747 "IOCLogInfo=0x%x", iocstatus,
6748 ddi_get32(mpt->m_acc_reply_frame_hdl,
6749 &eventreply->IOCLogInfo));
6750 }
6751 }
6752
6753 /*
6754 * figure out what kind of event we got and handle accordingly
6755 */
6756 switch (event) {
6757 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
6758 {
6759 pMpi2EventDataSasTopologyChangeList_t sas_topo_change_list;
6760 uint8_t num_entries, expstatus, phy;
6761 uint8_t phystatus, physport, state, i;
6762 uint8_t start_phy_num, link_rate;
6763 uint16_t dev_handle, reason_code;
6764 uint16_t enc_handle, expd_handle;
6765 char string[80], curr[80], prev[80];
6766 mptsas_topo_change_list_t *topo_head = NULL;
6767 mptsas_topo_change_list_t *topo_tail = NULL;
6768 mptsas_topo_change_list_t *topo_node = NULL;
6769 mptsas_target_t *ptgt;
6770 mptsas_smp_t *psmp;
6771 mptsas_hash_table_t *tgttbl, *smptbl;
6772 uint8_t flags = 0, exp_flag;
6773 smhba_info_t *pSmhba = NULL;
6774
6775 NDBG20(("mptsas_handle_event_sync: SAS topology change"));
6776
6777 tgttbl = &mpt->m_active->m_tgttbl;
6778 smptbl = &mpt->m_active->m_smptbl;
6779
6780 sas_topo_change_list = (pMpi2EventDataSasTopologyChangeList_t)
6781 eventreply->EventData;
6782
6783 enc_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6784 &sas_topo_change_list->EnclosureHandle);
6785 expd_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6786 &sas_topo_change_list->ExpanderDevHandle);
6787 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6788 &sas_topo_change_list->NumEntries);
6789 start_phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
6790 &sas_topo_change_list->StartPhyNum);
6791 expstatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6792 &sas_topo_change_list->ExpStatus);
6793 physport = ddi_get8(mpt->m_acc_reply_frame_hdl,
6794 &sas_topo_change_list->PhysicalPort);
6795
6796 string[0] = 0;
6797 if (expd_handle) {
6798 flags = MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED;
6799 switch (expstatus) {
6800 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6801 (void) sprintf(string, " added");
6802 /*
6803 * New expander device added
6804 */
6805 mpt->m_port_chng = 1;
6806 topo_node = kmem_zalloc(
6807 sizeof (mptsas_topo_change_list_t),
6808 KM_SLEEP);
6809 topo_node->mpt = mpt;
6810 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_SMP;
6811 topo_node->un.physport = physport;
6812 topo_node->devhdl = expd_handle;
6813 topo_node->flags = flags;
6814 topo_node->object = NULL;
6815 if (topo_head == NULL) {
6816 topo_head = topo_tail = topo_node;
6817 } else {
6818 topo_tail->next = topo_node;
6819 topo_tail = topo_node;
6820 }
6821 break;
6822 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6823 (void) sprintf(string, " not responding, "
6824 "removed");
6825 psmp = mptsas_search_by_devhdl(smptbl,
6826 expd_handle);
6827 if (psmp == NULL)
6828 break;
6829
6830 topo_node = kmem_zalloc(
6831 sizeof (mptsas_topo_change_list_t),
6832 KM_SLEEP);
6833 topo_node->mpt = mpt;
6834 topo_node->un.phymask = psmp->m_phymask;
6835 topo_node->event = MPTSAS_DR_EVENT_OFFLINE_SMP;
6836 topo_node->devhdl = expd_handle;
6837 topo_node->flags = flags;
6838 topo_node->object = NULL;
6839 if (topo_head == NULL) {
6840 topo_head = topo_tail = topo_node;
6841 } else {
6842 topo_tail->next = topo_node;
6843 topo_tail = topo_node;
6844 }
6845 break;
6846 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6847 break;
6848 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6849 (void) sprintf(string, " not responding, "
6850 "delaying removal");
6851 break;
6852 default:
6853 break;
6854 }
6855 } else {
6856 flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
6857 }
6858
6859 NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
6860 enc_handle, expd_handle, string));
6861 for (i = 0; i < num_entries; i++) {
6862 phy = i + start_phy_num;
6863 phystatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6864 &sas_topo_change_list->PHY[i].PhyStatus);
6865 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6866 &sas_topo_change_list->PHY[i].AttachedDevHandle);
6867 reason_code = phystatus & MPI2_EVENT_SAS_TOPO_RC_MASK;
6868 /*
6869 * Filter out processing of Phy Vacant Status unless
6870 * the reason code is "Not Responding". Process all
6871 * other combinations of Phy Status and Reason Codes.
6872 */
6873 if ((phystatus &
6874 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) &&
6875 (reason_code !=
6876 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
6877 continue;
6878 }
6879 curr[0] = 0;
6880 prev[0] = 0;
6881 string[0] = 0;
6882 switch (reason_code) {
6883 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6884 {
6885 NDBG20(("mptsas%d phy %d physical_port %d "
6886 "dev_handle %d added", mpt->m_instance, phy,
6887 physport, dev_handle));
6888 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6889 &sas_topo_change_list->PHY[i].LinkRate);
6890 state = (link_rate &
6891 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6892 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6893 switch (state) {
6894 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6895 (void) sprintf(curr, "is disabled");
6896 break;
6897 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6898 (void) sprintf(curr, "is offline, "
6899 "failed speed negotiation");
6900 break;
6901 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6902 (void) sprintf(curr, "SATA OOB "
6903 "complete");
6904 break;
6905 case SMP_RESET_IN_PROGRESS:
6906 (void) sprintf(curr, "SMP reset in "
6907 "progress");
6908 break;
6909 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6910 (void) sprintf(curr, "is online at "
6911 "1.5 Gbps");
6912 break;
6913 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6914 (void) sprintf(curr, "is online at 3.0 "
6915 "Gbps");
6916 break;
6917 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6918 (void) sprintf(curr, "is online at 6.0 "
6919 "Gbps");
6920 break;
6921 default:
6922 (void) sprintf(curr, "state is "
6923 "unknown");
6924 break;
6925 }
6926 /*
6927 * New target device added into the system.
6928 * Set association flag according to if an
6929 * expander is used or not.
6930 */
6931 exp_flag =
6932 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6933 if (flags ==
6934 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6935 flags = exp_flag;
6936 }
6937 topo_node = kmem_zalloc(
6938 sizeof (mptsas_topo_change_list_t),
6939 KM_SLEEP);
6940 topo_node->mpt = mpt;
6941 topo_node->event =
6942 MPTSAS_DR_EVENT_RECONFIG_TARGET;
6943 if (expd_handle == 0) {
6944 /*
6945 * Per MPI 2, if expander dev handle
6946 * is 0, it's a directly attached
6947 * device. So driver use PHY to decide
6948 * which iport is associated
6949 */
6950 physport = phy;
6951 mpt->m_port_chng = 1;
6952 }
6953 topo_node->un.physport = physport;
6954 topo_node->devhdl = dev_handle;
6955 topo_node->flags = flags;
6956 topo_node->object = NULL;
6957 if (topo_head == NULL) {
6958 topo_head = topo_tail = topo_node;
6959 } else {
6960 topo_tail->next = topo_node;
6961 topo_tail = topo_node;
6962 }
6963 break;
6964 }
6965 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6966 {
6967 NDBG20(("mptsas%d phy %d physical_port %d "
6968 "dev_handle %d removed", mpt->m_instance,
6969 phy, physport, dev_handle));
6970 /*
6971 * Set association flag according to if an
6972 * expander is used or not.
6973 */
6974 exp_flag =
6975 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6976 if (flags ==
6977 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6978 flags = exp_flag;
6979 }
6980 /*
6981 * Target device is removed from the system
6982 * Before the device is really offline from
6983 * from system.
6984 */
6985 ptgt = mptsas_search_by_devhdl(tgttbl,
6986 dev_handle);
6987 /*
6988 * If ptgt is NULL here, it means that the
6989 * DevHandle is not in the hash table. This is
6990 * reasonable sometimes. For example, if a
6991 * disk was pulled, then added, then pulled
6992 * again, the disk will not have been put into
6993 * the hash table because the add event will
6994 * have an invalid phymask. BUT, this does not
6995 * mean that the DevHandle is invalid. The
6996 * controller will still have a valid DevHandle
6997 * that must be removed. To do this, use the
6998 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
6999 */
7000 if (ptgt == NULL) {
7001 topo_node = kmem_zalloc(
7002 sizeof (mptsas_topo_change_list_t),
7003 KM_SLEEP);
7004 topo_node->mpt = mpt;
7005 topo_node->un.phymask = 0;
7006 topo_node->event =
7007 MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
7008 topo_node->devhdl = dev_handle;
7009 topo_node->flags = flags;
7010 topo_node->object = NULL;
7011 if (topo_head == NULL) {
7012 topo_head = topo_tail =
7013 topo_node;
7014 } else {
7015 topo_tail->next = topo_node;
7016 topo_tail = topo_node;
7017 }
7018 break;
7019 }
7020
7021 /*
7022 * Update DR flag immediately avoid I/O failure
7023 * before failover finish. Pay attention to the
7024 * mutex protect, we need grab the per target
7025 * mutex during set m_dr_flag because the
7026 * m_mutex would not be held all the time in
7027 * mptsas_scsi_start().
7028 */
7029 mutex_enter(&ptgt->m_tgt_intr_mutex);
7030 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7031 mutex_exit(&ptgt->m_tgt_intr_mutex);
7032
7033 topo_node = kmem_zalloc(
7034 sizeof (mptsas_topo_change_list_t),
7035 KM_SLEEP);
7036 topo_node->mpt = mpt;
7037 topo_node->un.phymask = ptgt->m_phymask;
7038 topo_node->event =
7039 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7040 topo_node->devhdl = dev_handle;
7041 topo_node->flags = flags;
7042 topo_node->object = NULL;
7043 if (topo_head == NULL) {
7044 topo_head = topo_tail = topo_node;
7045 } else {
7046 topo_tail->next = topo_node;
7047 topo_tail = topo_node;
7048 }
7049 break;
7050 }
7051 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7052 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
7053 &sas_topo_change_list->PHY[i].LinkRate);
7054 state = (link_rate &
7055 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
7056 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
7057 pSmhba = &mpt->m_phy_info[i].smhba_info;
7058 pSmhba->negotiated_link_rate = state;
7059 switch (state) {
7060 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7061 (void) sprintf(curr, "is disabled");
7062 mptsas_smhba_log_sysevent(mpt,
7063 ESC_SAS_PHY_EVENT,
7064 SAS_PHY_REMOVE,
7065 &mpt->m_phy_info[i].smhba_info);
7066 mpt->m_phy_info[i].smhba_info.
7067 negotiated_link_rate
7068 = 0x1;
7069 break;
7070 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7071 (void) sprintf(curr, "is offline, "
7072 "failed speed negotiation");
7073 mptsas_smhba_log_sysevent(mpt,
7074 ESC_SAS_PHY_EVENT,
7075 SAS_PHY_OFFLINE,
7076 &mpt->m_phy_info[i].smhba_info);
7077 break;
7078 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7079 (void) sprintf(curr, "SATA OOB "
7080 "complete");
7081 break;
7082 case SMP_RESET_IN_PROGRESS:
7083 (void) sprintf(curr, "SMP reset in "
7084 "progress");
7085 break;
7086 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7087 (void) sprintf(curr, "is online at "
7088 "1.5 Gbps");
7089 if ((expd_handle == 0) &&
7090 (enc_handle == 1)) {
7091 mpt->m_port_chng = 1;
7092 }
7093 mptsas_smhba_log_sysevent(mpt,
7094 ESC_SAS_PHY_EVENT,
7095 SAS_PHY_ONLINE,
7096 &mpt->m_phy_info[i].smhba_info);
7097 break;
7098 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7099 (void) sprintf(curr, "is online at 3.0 "
7100 "Gbps");
7101 if ((expd_handle == 0) &&
7102 (enc_handle == 1)) {
7103 mpt->m_port_chng = 1;
7104 }
7105 mptsas_smhba_log_sysevent(mpt,
7106 ESC_SAS_PHY_EVENT,
7107 SAS_PHY_ONLINE,
7108 &mpt->m_phy_info[i].smhba_info);
7109 break;
7110 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7111 (void) sprintf(curr, "is online at "
7112 "6.0 Gbps");
7113 if ((expd_handle == 0) &&
7114 (enc_handle == 1)) {
7115 mpt->m_port_chng = 1;
7116 }
7117 mptsas_smhba_log_sysevent(mpt,
7118 ESC_SAS_PHY_EVENT,
7119 SAS_PHY_ONLINE,
7120 &mpt->m_phy_info[i].smhba_info);
7121 break;
7122 default:
7123 (void) sprintf(curr, "state is "
7124 "unknown");
7125 break;
7126 }
7127
7128 state = (link_rate &
7129 MPI2_EVENT_SAS_TOPO_LR_PREV_MASK) >>
7130 MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT;
7131 switch (state) {
7132 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7133 (void) sprintf(prev, ", was disabled");
7134 break;
7135 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7136 (void) sprintf(prev, ", was offline, "
7137 "failed speed negotiation");
7138 break;
7139 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7140 (void) sprintf(prev, ", was SATA OOB "
7141 "complete");
7142 break;
7143 case SMP_RESET_IN_PROGRESS:
7144 (void) sprintf(prev, ", was SMP reset "
7145 "in progress");
7146 break;
7147 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7148 (void) sprintf(prev, ", was online at "
7149 "1.5 Gbps");
7150 break;
7151 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7152 (void) sprintf(prev, ", was online at "
7153 "3.0 Gbps");
7154 break;
7155 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7156 (void) sprintf(prev, ", was online at "
7157 "6.0 Gbps");
7158 break;
7159 default:
7160 break;
7161 }
7162 (void) sprintf(&string[strlen(string)], "link "
7163 "changed, ");
7164 break;
7165 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7166 continue;
7167 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7168 (void) sprintf(&string[strlen(string)],
7169 "target not responding, delaying "
7170 "removal");
7171 break;
7172 }
7173 NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
7174 mpt->m_instance, phy, dev_handle, string, curr,
7175 prev));
7176 }
7177 if (topo_head != NULL) {
7178 /*
7179 * Launch DR taskq to handle topology change
7180 */
7181 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7182 mptsas_handle_dr, (void *)topo_head,
7183 DDI_NOSLEEP)) != DDI_SUCCESS) {
7184 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7185 "for handle SAS DR event failed. \n");
7186 }
7187 }
7188 break;
7189 }
7190 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7191 {
7192 Mpi2EventDataIrConfigChangeList_t *irChangeList;
7193 mptsas_topo_change_list_t *topo_head = NULL;
7194 mptsas_topo_change_list_t *topo_tail = NULL;
7195 mptsas_topo_change_list_t *topo_node = NULL;
7196 mptsas_target_t *ptgt;
7197 mptsas_hash_table_t *tgttbl;
7198 uint8_t num_entries, i, reason;
7199 uint16_t volhandle, diskhandle;
7200
7201 irChangeList = (pMpi2EventDataIrConfigChangeList_t)
7202 eventreply->EventData;
7203 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
7204 &irChangeList->NumElements);
7205
7206 tgttbl = &mpt->m_active->m_tgttbl;
7207
7208 NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
7209 mpt->m_instance));
7210
7211 for (i = 0; i < num_entries; i++) {
7212 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7213 &irChangeList->ConfigElement[i].ReasonCode);
7214 volhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7215 &irChangeList->ConfigElement[i].VolDevHandle);
7216 diskhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7217 &irChangeList->ConfigElement[i].PhysDiskDevHandle);
7218
7219 switch (reason) {
7220 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
7221 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
7222 {
7223 NDBG20(("mptsas %d volume added\n",
7224 mpt->m_instance));
7225
7226 topo_node = kmem_zalloc(
7227 sizeof (mptsas_topo_change_list_t),
7228 KM_SLEEP);
7229
7230 topo_node->mpt = mpt;
7231 topo_node->event =
7232 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7233 topo_node->un.physport = 0xff;
7234 topo_node->devhdl = volhandle;
7235 topo_node->flags =
7236 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7237 topo_node->object = NULL;
7238 if (topo_head == NULL) {
7239 topo_head = topo_tail = topo_node;
7240 } else {
7241 topo_tail->next = topo_node;
7242 topo_tail = topo_node;
7243 }
7244 break;
7245 }
7246 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
7247 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
7248 {
7249 NDBG20(("mptsas %d volume deleted\n",
7250 mpt->m_instance));
7251 ptgt = mptsas_search_by_devhdl(tgttbl,
7252 volhandle);
7253 if (ptgt == NULL)
7254 break;
7255
7256 /*
7257 * Clear any flags related to volume
7258 */
7259 (void) mptsas_delete_volume(mpt, volhandle);
7260
7261 /*
7262 * Update DR flag immediately avoid I/O failure
7263 */
7264 mutex_enter(&ptgt->m_tgt_intr_mutex);
7265 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7266 mutex_exit(&ptgt->m_tgt_intr_mutex);
7267
7268 topo_node = kmem_zalloc(
7269 sizeof (mptsas_topo_change_list_t),
7270 KM_SLEEP);
7271 topo_node->mpt = mpt;
7272 topo_node->un.phymask = ptgt->m_phymask;
7273 topo_node->event =
7274 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7275 topo_node->devhdl = volhandle;
7276 topo_node->flags =
7277 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7278 topo_node->object = (void *)ptgt;
7279 if (topo_head == NULL) {
7280 topo_head = topo_tail = topo_node;
7281 } else {
7282 topo_tail->next = topo_node;
7283 topo_tail = topo_node;
7284 }
7285 break;
7286 }
7287 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
7288 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
7289 {
7290 ptgt = mptsas_search_by_devhdl(tgttbl,
7291 diskhandle);
7292 if (ptgt == NULL)
7293 break;
7294
7295 /*
7296 * Update DR flag immediately avoid I/O failure
7297 */
7298 mutex_enter(&ptgt->m_tgt_intr_mutex);
7299 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7300 mutex_exit(&ptgt->m_tgt_intr_mutex);
7301
7302 topo_node = kmem_zalloc(
7303 sizeof (mptsas_topo_change_list_t),
7304 KM_SLEEP);
7305 topo_node->mpt = mpt;
7306 topo_node->un.phymask = ptgt->m_phymask;
7307 topo_node->event =
7308 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7309 topo_node->devhdl = diskhandle;
7310 topo_node->flags =
7311 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7312 topo_node->object = (void *)ptgt;
7313 if (topo_head == NULL) {
7314 topo_head = topo_tail = topo_node;
7315 } else {
7316 topo_tail->next = topo_node;
7317 topo_tail = topo_node;
7318 }
7319 break;
7320 }
7321 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
7322 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
7323 {
7324 /*
7325 * The physical drive is released by a IR
7326 * volume. But we cannot get the the physport
7327 * or phynum from the event data, so we only
7328 * can get the physport/phynum after SAS
7329 * Device Page0 request for the devhdl.
7330 */
7331 topo_node = kmem_zalloc(
7332 sizeof (mptsas_topo_change_list_t),
7333 KM_SLEEP);
7334 topo_node->mpt = mpt;
7335 topo_node->un.phymask = 0;
7336 topo_node->event =
7337 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7338 topo_node->devhdl = diskhandle;
7339 topo_node->flags =
7340 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7341 topo_node->object = NULL;
7342 mpt->m_port_chng = 1;
7343 if (topo_head == NULL) {
7344 topo_head = topo_tail = topo_node;
7345 } else {
7346 topo_tail->next = topo_node;
7347 topo_tail = topo_node;
7348 }
7349 break;
7350 }
7351 default:
7352 break;
7353 }
7354 }
7355
7356 if (topo_head != NULL) {
7357 /*
7358 * Launch DR taskq to handle topology change
7359 */
7360 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7361 mptsas_handle_dr, (void *)topo_head,
7362 DDI_NOSLEEP)) != DDI_SUCCESS) {
7363 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7364 "for handle SAS DR event failed. \n");
7365 }
7366 }
7367 break;
7368 }
7369 default:
7370 return (DDI_FAILURE);
7371 }
7372
7373 return (DDI_SUCCESS);
7374 }
7375
7376 /*
7377 * handle events from ioc
7378 */
7379 static void
7380 mptsas_handle_event(void *args)
7381 {
7382 m_replyh_arg_t *replyh_arg;
7383 pMpi2EventNotificationReply_t eventreply;
7384 uint32_t event, iocloginfo, rfm;
7385 uint32_t status;
7386 uint8_t port;
7387 mptsas_t *mpt;
7388 uint_t iocstatus;
7389
7390 replyh_arg = (m_replyh_arg_t *)args;
7391 rfm = replyh_arg->rfm;
7392 mpt = replyh_arg->mpt;
7393
7394 mutex_enter(&mpt->m_mutex);
7395
7396 eventreply = (pMpi2EventNotificationReply_t)
7397 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
7398 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7399
7400 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7401 &eventreply->IOCStatus)) {
7402 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7403 mptsas_log(mpt, CE_WARN,
7404 "!mptsas_handle_event: IOCStatus=0x%x, "
7405 "IOCLogInfo=0x%x", iocstatus,
7406 ddi_get32(mpt->m_acc_reply_frame_hdl,
7407 &eventreply->IOCLogInfo));
7408 } else {
7409 mptsas_log(mpt, CE_WARN,
7410 "mptsas_handle_event: IOCStatus=0x%x, "
7411 "IOCLogInfo=0x%x", iocstatus,
7412 ddi_get32(mpt->m_acc_reply_frame_hdl,
7413 &eventreply->IOCLogInfo));
7414 }
7415 }
7416
7417 /*
7418 * figure out what kind of event we got and handle accordingly
7419 */
7420 switch (event) {
7421 case MPI2_EVENT_LOG_ENTRY_ADDED:
7422 break;
7423 case MPI2_EVENT_LOG_DATA:
7424 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7425 &eventreply->IOCLogInfo);
7426 NDBG20(("mptsas %d log info %x received.\n", mpt->m_instance,
7427 iocloginfo));
7428 break;
7429 case MPI2_EVENT_STATE_CHANGE:
7430 NDBG20(("mptsas%d state change.", mpt->m_instance));
7431 break;
7432 case MPI2_EVENT_HARD_RESET_RECEIVED:
7433 NDBG20(("mptsas%d event change.", mpt->m_instance));
7434 break;
7435 case MPI2_EVENT_SAS_DISCOVERY:
7436 {
7437 MPI2_EVENT_DATA_SAS_DISCOVERY *sasdiscovery;
7438 char string[80];
7439 uint8_t rc;
7440
7441 sasdiscovery =
7442 (pMpi2EventDataSasDiscovery_t)eventreply->EventData;
7443
7444 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7445 &sasdiscovery->ReasonCode);
7446 port = ddi_get8(mpt->m_acc_reply_frame_hdl,
7447 &sasdiscovery->PhysicalPort);
7448 status = ddi_get32(mpt->m_acc_reply_frame_hdl,
7449 &sasdiscovery->DiscoveryStatus);
7450
7451 string[0] = 0;
7452 switch (rc) {
7453 case MPI2_EVENT_SAS_DISC_RC_STARTED:
7454 (void) sprintf(string, "STARTING");
7455 break;
7456 case MPI2_EVENT_SAS_DISC_RC_COMPLETED:
7457 (void) sprintf(string, "COMPLETED");
7458 break;
7459 default:
7460 (void) sprintf(string, "UNKNOWN");
7461 break;
7462 }
7463
7464 NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string,
7465 port, status));
7466
7467 break;
7468 }
7469 case MPI2_EVENT_EVENT_CHANGE:
7470 NDBG20(("mptsas%d event change.", mpt->m_instance));
7471 break;
7472 case MPI2_EVENT_TASK_SET_FULL:
7473 {
7474 pMpi2EventDataTaskSetFull_t taskfull;
7475
7476 taskfull = (pMpi2EventDataTaskSetFull_t)eventreply->EventData;
7477
7478 NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
7479 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7480 &taskfull->CurrentDepth)));
7481 break;
7482 }
7483 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7484 {
7485 /*
7486 * SAS TOPOLOGY CHANGE LIST Event has already been handled
7487 * in mptsas_handle_event_sync() of interrupt context
7488 */
7489 break;
7490 }
7491 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
7492 {
7493 pMpi2EventDataSasEnclDevStatusChange_t encstatus;
7494 uint8_t rc;
7495 char string[80];
7496
7497 encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
7498 eventreply->EventData;
7499
7500 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7501 &encstatus->ReasonCode);
7502 switch (rc) {
7503 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7504 (void) sprintf(string, "added");
7505 break;
7506 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7507 (void) sprintf(string, ", not responding");
7508 break;
7509 default:
7510 break;
7511 }
7512 NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure %x%s\n",
7513 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7514 &encstatus->EnclosureHandle), string));
7515 break;
7516 }
7517
7518 /*
7519 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
7520 * mptsas_handle_event_sync,in here just send ack message.
7521 */
7522 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
7523 {
7524 pMpi2EventDataSasDeviceStatusChange_t statuschange;
7525 uint8_t rc;
7526 uint16_t devhdl;
7527 uint64_t wwn = 0;
7528 uint32_t wwn_lo, wwn_hi;
7529
7530 statuschange = (pMpi2EventDataSasDeviceStatusChange_t)
7531 eventreply->EventData;
7532 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7533 &statuschange->ReasonCode);
7534 wwn_lo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7535 (uint32_t *)(void *)&statuschange->SASAddress);
7536 wwn_hi = ddi_get32(mpt->m_acc_reply_frame_hdl,
7537 (uint32_t *)(void *)&statuschange->SASAddress + 1);
7538 wwn = ((uint64_t)wwn_hi << 32) | wwn_lo;
7539 devhdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
7540 &statuschange->DevHandle);
7541
7542 NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64,
7543 wwn));
7544
7545 switch (rc) {
7546 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7547 NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x",
7548 ddi_get8(mpt->m_acc_reply_frame_hdl,
7549 &statuschange->ASC),
7550 ddi_get8(mpt->m_acc_reply_frame_hdl,
7551 &statuschange->ASCQ)));
7552 break;
7553
7554 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7555 NDBG20(("Device not supported"));
7556 break;
7557
7558 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7559 NDBG20(("IOC internally generated the Target Reset "
7560 "for devhdl:%x", devhdl));
7561 break;
7562
7563 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7564 NDBG20(("IOC's internally generated Target Reset "
7565 "completed for devhdl:%x", devhdl));
7566 break;
7567
7568 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7569 NDBG20(("IOC internally generated Abort Task"));
7570 break;
7571
7572 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7573 NDBG20(("IOC's internally generated Abort Task "
7574 "completed"));
7575 break;
7576
7577 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7578 NDBG20(("IOC internally generated Abort Task Set"));
7579 break;
7580
7581 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7582 NDBG20(("IOC internally generated Clear Task Set"));
7583 break;
7584
7585 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7586 NDBG20(("IOC internally generated Query Task"));
7587 break;
7588
7589 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7590 NDBG20(("Device sent an Asynchronous Notification"));
7591 break;
7592
7593 default:
7594 break;
7595 }
7596 break;
7597 }
7598 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7599 {
7600 /*
7601 * IR TOPOLOGY CHANGE LIST Event has already been handled
7602 * in mpt_handle_event_sync() of interrupt context
7603 */
7604 break;
7605 }
7606 case MPI2_EVENT_IR_OPERATION_STATUS:
7607 {
7608 Mpi2EventDataIrOperationStatus_t *irOpStatus;
7609 char reason_str[80];
7610 uint8_t rc, percent;
7611 uint16_t handle;
7612
7613 irOpStatus = (pMpi2EventDataIrOperationStatus_t)
7614 eventreply->EventData;
7615 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7616 &irOpStatus->RAIDOperation);
7617 percent = ddi_get8(mpt->m_acc_reply_frame_hdl,
7618 &irOpStatus->PercentComplete);
7619 handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7620 &irOpStatus->VolDevHandle);
7621
7622 switch (rc) {
7623 case MPI2_EVENT_IR_RAIDOP_RESYNC:
7624 (void) sprintf(reason_str, "resync");
7625 break;
7626 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
7627 (void) sprintf(reason_str, "online capacity "
7628 "expansion");
7629 break;
7630 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
7631 (void) sprintf(reason_str, "consistency check");
7632 break;
7633 default:
7634 (void) sprintf(reason_str, "unknown reason %x",
7635 rc);
7636 }
7637
7638 NDBG20(("mptsas%d raid operational status: (%s)"
7639 "\thandle(0x%04x), percent complete(%d)\n",
7640 mpt->m_instance, reason_str, handle, percent));
7641 break;
7642 }
7643 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
7644 {
7645 pMpi2EventDataSasBroadcastPrimitive_t sas_broadcast;
7646 uint8_t phy_num;
7647 uint8_t primitive;
7648
7649 sas_broadcast = (pMpi2EventDataSasBroadcastPrimitive_t)
7650 eventreply->EventData;
7651
7652 phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
7653 &sas_broadcast->PhyNum);
7654 primitive = ddi_get8(mpt->m_acc_reply_frame_hdl,
7655 &sas_broadcast->Primitive);
7656
7657 switch (primitive) {
7658 case MPI2_EVENT_PRIMITIVE_CHANGE:
7659 mptsas_smhba_log_sysevent(mpt,
7660 ESC_SAS_HBA_PORT_BROADCAST,
7661 SAS_PORT_BROADCAST_CHANGE,
7662 &mpt->m_phy_info[phy_num].smhba_info);
7663 break;
7664 case MPI2_EVENT_PRIMITIVE_SES:
7665 mptsas_smhba_log_sysevent(mpt,
7666 ESC_SAS_HBA_PORT_BROADCAST,
7667 SAS_PORT_BROADCAST_SES,
7668 &mpt->m_phy_info[phy_num].smhba_info);
7669 break;
7670 case MPI2_EVENT_PRIMITIVE_EXPANDER:
7671 mptsas_smhba_log_sysevent(mpt,
7672 ESC_SAS_HBA_PORT_BROADCAST,
7673 SAS_PORT_BROADCAST_D01_4,
7674 &mpt->m_phy_info[phy_num].smhba_info);
7675 break;
7676 case MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT:
7677 mptsas_smhba_log_sysevent(mpt,
7678 ESC_SAS_HBA_PORT_BROADCAST,
7679 SAS_PORT_BROADCAST_D04_7,
7680 &mpt->m_phy_info[phy_num].smhba_info);
7681 break;
7682 case MPI2_EVENT_PRIMITIVE_RESERVED3:
7683 mptsas_smhba_log_sysevent(mpt,
7684 ESC_SAS_HBA_PORT_BROADCAST,
7685 SAS_PORT_BROADCAST_D16_7,
7686 &mpt->m_phy_info[phy_num].smhba_info);
7687 break;
7688 case MPI2_EVENT_PRIMITIVE_RESERVED4:
7689 mptsas_smhba_log_sysevent(mpt,
7690 ESC_SAS_HBA_PORT_BROADCAST,
7691 SAS_PORT_BROADCAST_D29_7,
7692 &mpt->m_phy_info[phy_num].smhba_info);
7693 break;
7694 case MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED:
7695 mptsas_smhba_log_sysevent(mpt,
7696 ESC_SAS_HBA_PORT_BROADCAST,
7697 SAS_PORT_BROADCAST_D24_0,
7698 &mpt->m_phy_info[phy_num].smhba_info);
7699 break;
7700 case MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED:
7701 mptsas_smhba_log_sysevent(mpt,
7702 ESC_SAS_HBA_PORT_BROADCAST,
7703 SAS_PORT_BROADCAST_D27_4,
7704 &mpt->m_phy_info[phy_num].smhba_info);
7705 break;
7706 default:
7707 NDBG20(("mptsas%d: unknown BROADCAST PRIMITIVE"
7708 " %x received",
7709 mpt->m_instance, primitive));
7710 break;
7711 }
7712 NDBG20(("mptsas%d sas broadcast primitive: "
7713 "\tprimitive(0x%04x), phy(%d) complete\n",
7714 mpt->m_instance, primitive, phy_num));
7715 break;
7716 }
7717 case MPI2_EVENT_IR_VOLUME:
7718 {
7719 Mpi2EventDataIrVolume_t *irVolume;
7720 uint16_t devhandle;
7721 uint32_t state;
7722 int config, vol;
7723 mptsas_slots_t *slots = mpt->m_active;
7724 uint8_t found = FALSE;
7725
7726 irVolume = (pMpi2EventDataIrVolume_t)eventreply->EventData;
7727 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7728 &irVolume->NewValue);
7729 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7730 &irVolume->VolDevHandle);
7731
7732 NDBG20(("EVENT_IR_VOLUME event is received"));
7733
7734 /*
7735 * Get latest RAID info and then find the DevHandle for this
7736 * event in the configuration. If the DevHandle is not found
7737 * just exit the event.
7738 */
7739 (void) mptsas_get_raid_info(mpt);
7740 for (config = 0; (config < slots->m_num_raid_configs) &&
7741 (!found); config++) {
7742 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
7743 if (slots->m_raidconfig[config].m_raidvol[vol].
7744 m_raidhandle == devhandle) {
7745 found = TRUE;
7746 break;
7747 }
7748 }
7749 }
7750 if (!found) {
7751 break;
7752 }
7753
7754 switch (irVolume->ReasonCode) {
7755 case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
7756 {
7757 uint32_t i;
7758 slots->m_raidconfig[config].m_raidvol[vol].m_settings =
7759 state;
7760
7761 i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
7762 mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
7763 ", auto-config of hot-swap drives is %s"
7764 ", write caching is %s"
7765 ", hot-spare pool mask is %02x\n",
7766 vol, state &
7767 MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
7768 ? "disabled" : "enabled",
7769 i == MPI2_RAIDVOL0_SETTING_UNCHANGED
7770 ? "controlled by member disks" :
7771 i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
7772 ? "disabled" :
7773 i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
7774 ? "enabled" :
7775 "incorrectly set",
7776 (state >> 16) & 0xff);
7777 break;
7778 }
7779 case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
7780 {
7781 slots->m_raidconfig[config].m_raidvol[vol].m_state =
7782 (uint8_t)state;
7783
7784 mptsas_log(mpt, CE_NOTE,
7785 "Volume %d is now %s\n", vol,
7786 state == MPI2_RAID_VOL_STATE_OPTIMAL
7787 ? "optimal" :
7788 state == MPI2_RAID_VOL_STATE_DEGRADED
7789 ? "degraded" :
7790 state == MPI2_RAID_VOL_STATE_ONLINE
7791 ? "online" :
7792 state == MPI2_RAID_VOL_STATE_INITIALIZING
7793 ? "initializing" :
7794 state == MPI2_RAID_VOL_STATE_FAILED
7795 ? "failed" :
7796 state == MPI2_RAID_VOL_STATE_MISSING
7797 ? "missing" :
7798 "state unknown");
7799 break;
7800 }
7801 case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
7802 {
7803 slots->m_raidconfig[config].m_raidvol[vol].
7804 m_statusflags = state;
7805
7806 mptsas_log(mpt, CE_NOTE,
7807 " Volume %d is now %s%s%s%s%s%s%s%s%s\n",
7808 vol,
7809 state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
7810 ? ", enabled" : ", disabled",
7811 state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
7812 ? ", quiesced" : "",
7813 state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
7814 ? ", inactive" : ", active",
7815 state &
7816 MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
7817 ? ", bad block table is full" : "",
7818 state &
7819 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
7820 ? ", resync in progress" : "",
7821 state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
7822 ? ", background initialization in progress" : "",
7823 state &
7824 MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
7825 ? ", capacity expansion in progress" : "",
7826 state &
7827 MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
7828 ? ", consistency check in progress" : "",
7829 state & MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB
7830 ? ", data scrub in progress" : "");
7831 break;
7832 }
7833 default:
7834 break;
7835 }
7836 break;
7837 }
7838 case MPI2_EVENT_IR_PHYSICAL_DISK:
7839 {
7840 Mpi2EventDataIrPhysicalDisk_t *irPhysDisk;
7841 uint16_t devhandle, enchandle, slot;
7842 uint32_t status, state;
7843 uint8_t physdisknum, reason;
7844
7845 irPhysDisk = (Mpi2EventDataIrPhysicalDisk_t *)
7846 eventreply->EventData;
7847 physdisknum = ddi_get8(mpt->m_acc_reply_frame_hdl,
7848 &irPhysDisk->PhysDiskNum);
7849 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7850 &irPhysDisk->PhysDiskDevHandle);
7851 enchandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7852 &irPhysDisk->EnclosureHandle);
7853 slot = ddi_get16(mpt->m_acc_reply_frame_hdl,
7854 &irPhysDisk->Slot);
7855 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7856 &irPhysDisk->NewValue);
7857 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7858 &irPhysDisk->ReasonCode);
7859
7860 NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
7861
7862 switch (reason) {
7863 case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
7864 mptsas_log(mpt, CE_NOTE,
7865 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7866 "for enclosure with handle 0x%x is now in hot "
7867 "spare pool %d",
7868 physdisknum, devhandle, slot, enchandle,
7869 (state >> 16) & 0xff);
7870 break;
7871
7872 case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
7873 status = state;
7874 mptsas_log(mpt, CE_NOTE,
7875 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7876 "for enclosure with handle 0x%x is now "
7877 "%s%s%s%s%s\n", physdisknum, devhandle, slot,
7878 enchandle,
7879 status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
7880 ? ", inactive" : ", active",
7881 status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
7882 ? ", out of sync" : "",
7883 status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
7884 ? ", quiesced" : "",
7885 status &
7886 MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
7887 ? ", write cache enabled" : "",
7888 status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
7889 ? ", capacity expansion target" : "");
7890 break;
7891
7892 case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
7893 mptsas_log(mpt, CE_NOTE,
7894 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7895 "for enclosure with handle 0x%x is now %s\n",
7896 physdisknum, devhandle, slot, enchandle,
7897 state == MPI2_RAID_PD_STATE_OPTIMAL
7898 ? "optimal" :
7899 state == MPI2_RAID_PD_STATE_REBUILDING
7900 ? "rebuilding" :
7901 state == MPI2_RAID_PD_STATE_DEGRADED
7902 ? "degraded" :
7903 state == MPI2_RAID_PD_STATE_HOT_SPARE
7904 ? "a hot spare" :
7905 state == MPI2_RAID_PD_STATE_ONLINE
7906 ? "online" :
7907 state == MPI2_RAID_PD_STATE_OFFLINE
7908 ? "offline" :
7909 state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
7910 ? "not compatible" :
7911 state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
7912 ? "not configured" :
7913 "state unknown");
7914 break;
7915 }
7916 break;
7917 }
7918 default:
7919 NDBG20(("mptsas%d: unknown event %x received",
7920 mpt->m_instance, event));
7921 break;
7922 }
7923
7924 /*
7925 * Return the reply frame to the free queue.
7926 */
7927 ddi_put32(mpt->m_acc_free_queue_hdl,
7928 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm);
7929 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
7930 DDI_DMA_SYNC_FORDEV);
7931 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
7932 mpt->m_free_index = 0;
7933 }
7934 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
7935 mpt->m_free_index);
7936 mutex_exit(&mpt->m_mutex);
7937 }
7938
7939 /*
7940 * invoked from timeout() to restart qfull cmds with throttle == 0
7941 */
7942 static void
7943 mptsas_restart_cmd(void *arg)
7944 {
7945 mptsas_t *mpt = arg;
7946 mptsas_target_t *ptgt = NULL;
7947
7948 mutex_enter(&mpt->m_mutex);
7949
7950 mpt->m_restart_cmd_timeid = 0;
7951
7952 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
7953 MPTSAS_HASH_FIRST);
7954 while (ptgt != NULL) {
7955 mutex_enter(&ptgt->m_tgt_intr_mutex);
7956 if (ptgt->m_reset_delay == 0) {
7957 if (ptgt->m_t_throttle == QFULL_THROTTLE) {
7958 mptsas_set_throttle(mpt, ptgt,
7959 MAX_THROTTLE);
7960 }
7961 }
7962 mutex_exit(&ptgt->m_tgt_intr_mutex);
7963
7964 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
7965 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
7966 }
7967 mptsas_restart_hba(mpt);
7968 mutex_exit(&mpt->m_mutex);
7969 }
7970
7971 /*
7972 * mptsas_remove_cmd0 is similar to mptsas_remove_cmd except that it is called
7973 * where m_intr_mutex has already been held.
7974 */
7975 void
7976 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7977 {
7978 ASSERT(mutex_owned(&mpt->m_mutex));
7979
7980 /*
7981 * With new fine-grained lock mechanism, the outstanding cmd is only
7982 * linked to m_active before the dma is triggerred(MPTSAS_START_CMD)
7983 * to send it. that is, mptsas_save_cmd() doesn't link the outstanding
7984 * cmd now. So when mptsas_remove_cmd is called, a mptsas_save_cmd must
7985 * have been called, but the cmd may have not been linked.
7986 * For mptsas_remove_cmd0, the cmd must have been linked.
7987 * In order to keep the same semantic, we link the cmd to the
7988 * outstanding cmd list.
7989 */
7990 mpt->m_active->m_slot[cmd->cmd_slot] = cmd;
7991
7992 mutex_enter(&mpt->m_intr_mutex);
7993 mptsas_remove_cmd0(mpt, cmd);
7994 mutex_exit(&mpt->m_intr_mutex);
7995 }
7996
7997 static inline void
7998 mptsas_remove_cmd0(mptsas_t *mpt, mptsas_cmd_t *cmd)
7999 {
8000 int slot;
8001 mptsas_slots_t *slots = mpt->m_active;
8002 int t;
8003 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8004 mptsas_slot_free_e_t *pe;
8005
8006 ASSERT(cmd != NULL);
8007 ASSERT(cmd->cmd_queued == FALSE);
8008
8009 /*
8010 * Task Management cmds are removed in their own routines. Also,
8011 * we don't want to modify timeout based on TM cmds.
8012 */
8013 if (cmd->cmd_flags & CFLAG_TM_CMD) {
8014 return;
8015 }
8016
8017 t = Tgt(cmd);
8018 slot = cmd->cmd_slot;
8019 pe = mpt->m_slot_free_ae + slot - 1;
8020 ASSERT(cmd == slots->m_slot[slot]);
8021 ASSERT((slot > 0) && slot < (mpt->m_max_requests - 1));
8022
8023 /*
8024 * remove the cmd.
8025 */
8026 mutex_enter(&mpt->m_slot_freeq_pairp[pe->cpuid].
8027 m_slot_releq.s.m_fq_mutex);
8028 NDBG31(("mptsas_remove_cmd0: removing cmd=0x%p", (void *)cmd));
8029 slots->m_slot[slot] = NULL;
8030 ASSERT(pe->slot == slot);
8031 list_insert_tail(&mpt->m_slot_freeq_pairp[pe->cpuid].
8032 m_slot_releq.s.m_fq_list, pe);
8033 mpt->m_slot_freeq_pairp[pe->cpuid].m_slot_releq.s.m_fq_n++;
8034 ASSERT(mpt->m_slot_freeq_pairp[pe->cpuid].
8035 m_slot_releq.s.m_fq_n <= mpt->m_max_requests - 2);
8036 mutex_exit(&mpt->m_slot_freeq_pairp[pe->cpuid].
8037 m_slot_releq.s.m_fq_mutex);
8038
8039 /*
8040 * only decrement per target ncmds if command
8041 * has a target associated with it.
8042 */
8043 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
8044 mutex_enter(&ptgt->m_tgt_intr_mutex);
8045 ptgt->m_t_ncmds--;
8046 /*
8047 * reset throttle if we just ran an untagged command
8048 * to a tagged target
8049 */
8050 if ((ptgt->m_t_ncmds == 0) &&
8051 ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
8052 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8053 }
8054 mutex_exit(&ptgt->m_tgt_intr_mutex);
8055 }
8056
8057 /*
8058 * This is all we need to do for ioc commands.
8059 * The ioc cmds would never be handled in fastpath in ISR, so we make
8060 * sure the mptsas_return_to_pool() would always be called with
8061 * m_mutex protected.
8062 */
8063 if (cmd->cmd_flags & CFLAG_CMDIOC) {
8064 ASSERT(mutex_owned(&mpt->m_mutex));
8065 mptsas_return_to_pool(mpt, cmd);
8066 return;
8067 }
8068
8069 /*
8070 * Figure out what to set tag Q timeout for...
8071 *
8072 * Optimize: If we have duplicate's of same timeout
8073 * we're using, then we'll use it again until we run
8074 * out of duplicates. This should be the normal case
8075 * for block and raw I/O.
8076 * If no duplicates, we have to scan through tag que and
8077 * find the longest timeout value and use it. This is
8078 * going to take a while...
8079 * Add 1 to m_n_slots to account for TM request.
8080 */
8081 mutex_enter(&ptgt->m_tgt_intr_mutex);
8082 if (cmd->cmd_pkt->pkt_time == ptgt->m_timebase) {
8083 if (--(ptgt->m_dups) == 0) {
8084 if (ptgt->m_t_ncmds) {
8085 mptsas_cmd_t *ssp;
8086 uint_t n = 0;
8087 ushort_t nslots = (slots->m_n_slots + 1);
8088 ushort_t i;
8089 /*
8090 * This crude check assumes we don't do
8091 * this too often which seems reasonable
8092 * for block and raw I/O.
8093 */
8094 for (i = 0; i < nslots; i++) {
8095 ssp = slots->m_slot[i];
8096 if (ssp && (Tgt(ssp) == t) &&
8097 (ssp->cmd_pkt->pkt_time > n)) {
8098 n = ssp->cmd_pkt->pkt_time;
8099 ptgt->m_dups = 1;
8100 } else if (ssp && (Tgt(ssp) == t) &&
8101 (ssp->cmd_pkt->pkt_time == n)) {
8102 ptgt->m_dups++;
8103 }
8104 }
8105 ptgt->m_timebase = n;
8106 } else {
8107 ptgt->m_dups = 0;
8108 ptgt->m_timebase = 0;
8109 }
8110 }
8111 }
8112 ptgt->m_timeout = ptgt->m_timebase;
8113
8114 ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);
8115 mutex_exit(&ptgt->m_tgt_intr_mutex);
8116 }
8117
8118 /*
8119 * start a fresh request from the top of the device queue.
8120 */
8121 static void
8122 mptsas_restart_hba(mptsas_t *mpt)
8123 {
8124 mptsas_cmd_t *cmd, *next_cmd;
8125 mptsas_target_t *ptgt = NULL;
8126
8127 NDBG1(("mptsas_restart_hba: mpt=0x%p", (void *)mpt));
8128
8129 ASSERT(mutex_owned(&mpt->m_mutex));
8130
8131 /*
8132 * If there is a reset delay, don't start any cmds. Otherwise, start
8133 * as many cmds as possible.
8134 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
8135 * commands is m_max_requests - 2.
8136 */
8137 cmd = mpt->m_waitq;
8138
8139 while (cmd != NULL) {
8140 next_cmd = cmd->cmd_linkp;
8141 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
8142 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8143 /*
8144 * passthru command get slot need
8145 * set CFLAG_PREPARED.
8146 */
8147 cmd->cmd_flags |= CFLAG_PREPARED;
8148 mptsas_waitq_delete(mpt, cmd);
8149 mptsas_start_passthru(mpt, cmd);
8150 }
8151 cmd = next_cmd;
8152 continue;
8153 }
8154 if (cmd->cmd_flags & CFLAG_CONFIG) {
8155 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8156 /*
8157 * Send the config page request and delete it
8158 * from the waitq.
8159 */
8160 cmd->cmd_flags |= CFLAG_PREPARED;
8161 mptsas_waitq_delete(mpt, cmd);
8162 mptsas_start_config_page_access(mpt, cmd);
8163 }
8164 cmd = next_cmd;
8165 continue;
8166 }
8167 if (cmd->cmd_flags & CFLAG_FW_DIAG) {
8168 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8169 /*
8170 * Send the FW Diag request and delete if from
8171 * the waitq.
8172 */
8173 cmd->cmd_flags |= CFLAG_PREPARED;
8174 mptsas_waitq_delete(mpt, cmd);
8175 mptsas_start_diag(mpt, cmd);
8176 }
8177 cmd = next_cmd;
8178 continue;
8179 }
8180
8181 ptgt = cmd->cmd_tgt_addr;
8182 if (ptgt) {
8183 mutex_enter(&mpt->m_intr_mutex);
8184 mutex_enter(&ptgt->m_tgt_intr_mutex);
8185 if ((ptgt->m_t_throttle == DRAIN_THROTTLE) &&
8186 (ptgt->m_t_ncmds == 0)) {
8187 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8188 }
8189 if ((ptgt->m_reset_delay == 0) &&
8190 (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
8191 mutex_exit(&ptgt->m_tgt_intr_mutex);
8192 mutex_exit(&mpt->m_intr_mutex);
8193 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8194 mptsas_waitq_delete(mpt, cmd);
8195 (void) mptsas_start_cmd(mpt, cmd);
8196 }
8197 goto out;
8198 }
8199 mutex_exit(&ptgt->m_tgt_intr_mutex);
8200 mutex_exit(&mpt->m_intr_mutex);
8201 }
8202 out:
8203 cmd = next_cmd;
8204 }
8205 }
8206
8207 /*
8208 * mpt tag type lookup
8209 */
8210 static char mptsas_tag_lookup[] =
8211 {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
8212
8213 /*
8214 * mptsas_start_cmd0 is similar to mptsas_start_cmd, except that, it is called
8215 * without ANY mutex protected, while, mptsas_start_cmd is called with m_mutex
8216 * protected.
8217 *
8218 * the relevant field in ptgt should be protected by m_tgt_intr_mutex in both
8219 * functions.
8220 *
8221 * before the cmds are linked on the slot for monitor as outstanding cmds, they
8222 * are accessed as slab objects, so slab framework ensures the exclusive access,
8223 * and no other mutex is requireed. Linking for monitor and the trigger of dma
8224 * must be done exclusively.
8225 */
8226 static int
8227 mptsas_start_cmd0(mptsas_t *mpt, mptsas_cmd_t *cmd)
8228 {
8229 struct scsi_pkt *pkt = CMD2PKT(cmd);
8230 uint32_t control = 0;
8231 int n;
8232 caddr_t mem;
8233 pMpi2SCSIIORequest_t io_request;
8234 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
8235 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
8236 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8237 uint16_t SMID, io_flags = 0;
8238 uint32_t request_desc_low, request_desc_high;
8239
8240 NDBG1(("mptsas_start_cmd0: cmd=0x%p", (void *)cmd));
8241
8242 /*
8243 * Set SMID and increment index. Rollover to 1 instead of 0 if index
8244 * is at the max. 0 is an invalid SMID, so we call the first index 1.
8245 */
8246 SMID = cmd->cmd_slot;
8247
8248 /*
8249 * It is possible for back to back device reset to
8250 * happen before the reset delay has expired. That's
8251 * ok, just let the device reset go out on the bus.
8252 */
8253 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8254 ASSERT(ptgt->m_reset_delay == 0);
8255 }
8256
8257 /*
8258 * if a non-tagged cmd is submitted to an active tagged target
8259 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
8260 * to be untagged
8261 */
8262 mutex_enter(&ptgt->m_tgt_intr_mutex);
8263 if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
8264 (ptgt->m_t_ncmds > 1) &&
8265 ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
8266 (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
8267 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8268 NDBG23(("target=%d, untagged cmd, start draining\n",
8269 ptgt->m_devhdl));
8270
8271 if (ptgt->m_reset_delay == 0) {
8272 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
8273 }
8274 mutex_exit(&ptgt->m_tgt_intr_mutex);
8275
8276 mutex_enter(&mpt->m_mutex);
8277 mptsas_remove_cmd(mpt, cmd);
8278 cmd->cmd_pkt_flags |= FLAG_HEAD;
8279 mptsas_waitq_add(mpt, cmd);
8280 mutex_exit(&mpt->m_mutex);
8281 return (DDI_FAILURE);
8282 }
8283 mutex_exit(&ptgt->m_tgt_intr_mutex);
8284 return (DDI_FAILURE);
8285 }
8286 mutex_exit(&ptgt->m_tgt_intr_mutex);
8287
8288 /*
8289 * Set correct tag bits.
8290 */
8291 if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
8292 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
8293 FLAG_TAGMASK) >> 12)]) {
8294 case MSG_SIMPLE_QTAG:
8295 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8296 break;
8297 case MSG_HEAD_QTAG:
8298 control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
8299 break;
8300 case MSG_ORDERED_QTAG:
8301 control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
8302 break;
8303 default:
8304 mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
8305 break;
8306 }
8307 } else {
8308 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
8309 ptgt->m_t_throttle = 1;
8310 }
8311 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8312 }
8313
8314 if (cmd->cmd_pkt_flags & FLAG_TLR) {
8315 control |= MPI2_SCSIIO_CONTROL_TLR_ON;
8316 }
8317
8318 mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
8319 io_request = (pMpi2SCSIIORequest_t)mem;
8320
8321 bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
8322 ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
8323 (MPI2_SCSI_IO_REQUEST, SGL) / 4);
8324 mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
8325 MPI2_FUNCTION_SCSI_IO_REQUEST);
8326
8327 (void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
8328 io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
8329
8330 io_flags = cmd->cmd_cdblen;
8331 ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
8332 /*
8333 * setup the Scatter/Gather DMA list for this request
8334 */
8335 if (cmd->cmd_cookiec > 0) {
8336 mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
8337 } else {
8338 ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
8339 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
8340 MPI2_SGE_FLAGS_END_OF_BUFFER |
8341 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
8342 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
8343 }
8344
8345 /*
8346 * save ARQ information
8347 */
8348 ddi_put8(acc_hdl, &io_request->SenseBufferLength, cmd->cmd_rqslen);
8349 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
8350 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
8351 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8352 cmd->cmd_ext_arqcookie.dmac_address);
8353 } else {
8354 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8355 cmd->cmd_arqcookie.dmac_address);
8356 }
8357
8358 ddi_put32(acc_hdl, &io_request->Control, control);
8359
8360 NDBG31(("starting message=0x%p, with cmd=0x%p",
8361 (void *)(uintptr_t)mpt->m_req_frame_dma_addr, (void *)cmd));
8362
8363 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
8364
8365 /*
8366 * Build request descriptor and write it to the request desc post reg.
8367 */
8368 request_desc_low = (SMID << 16) + MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
8369 request_desc_high = ptgt->m_devhdl << 16;
8370
8371 mutex_enter(&mpt->m_mutex);
8372 mpt->m_active->m_slot[cmd->cmd_slot] = cmd;
8373 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
8374 mutex_exit(&mpt->m_mutex);
8375
8376 /*
8377 * Start timeout.
8378 */
8379 mutex_enter(&ptgt->m_tgt_intr_mutex);
8380 #ifdef MPTSAS_TEST
8381 /*
8382 * Temporarily set timebase = 0; needed for
8383 * timeout torture test.
8384 */
8385 if (mptsas_test_timeouts) {
8386 ptgt->m_timebase = 0;
8387 }
8388 #endif
8389 n = pkt->pkt_time - ptgt->m_timebase;
8390
8391 if (n == 0) {
8392 (ptgt->m_dups)++;
8393 ptgt->m_timeout = ptgt->m_timebase;
8394 } else if (n > 0) {
8395 ptgt->m_timeout =
8396 ptgt->m_timebase = pkt->pkt_time;
8397 ptgt->m_dups = 1;
8398 } else if (n < 0) {
8399 ptgt->m_timeout = ptgt->m_timebase;
8400 }
8401 #ifdef MPTSAS_TEST
8402 /*
8403 * Set back to a number higher than
8404 * mptsas_scsi_watchdog_tick
8405 * so timeouts will happen in mptsas_watchsubr
8406 */
8407 if (mptsas_test_timeouts) {
8408 ptgt->m_timebase = 60;
8409 }
8410 #endif
8411 mutex_exit(&ptgt->m_tgt_intr_mutex);
8412
8413 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8414 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8415 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8416 return (DDI_FAILURE);
8417 }
8418 return (DDI_SUCCESS);
8419 }
8420
8421 static int
8422 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8423 {
8424 struct scsi_pkt *pkt = CMD2PKT(cmd);
8425 uint32_t control = 0;
8426 int n;
8427 caddr_t mem;
8428 pMpi2SCSIIORequest_t io_request;
8429 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
8430 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
8431 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8432 uint16_t SMID, io_flags = 0;
8433 uint32_t request_desc_low, request_desc_high;
8434
8435 NDBG1(("mptsas_start_cmd: cmd=0x%p", (void *)cmd));
8436
8437 /*
8438 * Set SMID and increment index. Rollover to 1 instead of 0 if index
8439 * is at the max. 0 is an invalid SMID, so we call the first index 1.
8440 */
8441 SMID = cmd->cmd_slot;
8442
8443 /*
8444 * It is possible for back to back device reset to
8445 * happen before the reset delay has expired. That's
8446 * ok, just let the device reset go out on the bus.
8447 */
8448 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8449 ASSERT(ptgt->m_reset_delay == 0);
8450 }
8451
8452 /*
8453 * if a non-tagged cmd is submitted to an active tagged target
8454 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
8455 * to be untagged
8456 */
8457 mutex_enter(&ptgt->m_tgt_intr_mutex);
8458 if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
8459 (ptgt->m_t_ncmds > 1) &&
8460 ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
8461 (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
8462 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8463 NDBG23(("target=%d, untagged cmd, start draining\n",
8464 ptgt->m_devhdl));
8465
8466 if (ptgt->m_reset_delay == 0) {
8467 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
8468 }
8469 mutex_exit(&ptgt->m_tgt_intr_mutex);
8470
8471 mptsas_remove_cmd(mpt, cmd);
8472 cmd->cmd_pkt_flags |= FLAG_HEAD;
8473 mptsas_waitq_add(mpt, cmd);
8474 return (DDI_FAILURE);
8475 }
8476 mutex_exit(&ptgt->m_tgt_intr_mutex);
8477 return (DDI_FAILURE);
8478 }
8479 mutex_exit(&ptgt->m_tgt_intr_mutex);
8480
8481 /*
8482 * Set correct tag bits.
8483 */
8484 if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
8485 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
8486 FLAG_TAGMASK) >> 12)]) {
8487 case MSG_SIMPLE_QTAG:
8488 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8489 break;
8490 case MSG_HEAD_QTAG:
8491 control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
8492 break;
8493 case MSG_ORDERED_QTAG:
8494 control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
8495 break;
8496 default:
8497 mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
8498 break;
8499 }
8500 } else {
8501 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
8502 ptgt->m_t_throttle = 1;
8503 }
8504 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8505 }
8506
8507 if (cmd->cmd_pkt_flags & FLAG_TLR) {
8508 control |= MPI2_SCSIIO_CONTROL_TLR_ON;
8509 }
8510
8511 mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
8512 io_request = (pMpi2SCSIIORequest_t)mem;
8513
8514 bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
8515 ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
8516 (MPI2_SCSI_IO_REQUEST, SGL) / 4);
8517 mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
8518 MPI2_FUNCTION_SCSI_IO_REQUEST);
8519
8520 (void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
8521 io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
8522
8523 io_flags = cmd->cmd_cdblen;
8524 ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
8525 /*
8526 * setup the Scatter/Gather DMA list for this request
8527 */
8528 if (cmd->cmd_cookiec > 0) {
8529 mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
8530 } else {
8531 ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
8532 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
8533 MPI2_SGE_FLAGS_END_OF_BUFFER |
8534 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
8535 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
8536 }
8537
8538 /*
8539 * save ARQ information
8540 */
8541 ddi_put8(acc_hdl, &io_request->SenseBufferLength, cmd->cmd_rqslen);
8542 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
8543 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
8544 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8545 cmd->cmd_ext_arqcookie.dmac_address);
8546 } else {
8547 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8548 cmd->cmd_arqcookie.dmac_address);
8549 }
8550
8551 ddi_put32(acc_hdl, &io_request->Control, control);
8552
8553 NDBG31(("starting message=0x%p, with cmd=0x%p",
8554 (void *)(uintptr_t)mpt->m_req_frame_dma_addr, (void *)cmd));
8555
8556 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
8557
8558 /*
8559 * Build request descriptor and write it to the request desc post reg.
8560 */
8561 request_desc_low = (SMID << 16) + MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
8562 request_desc_high = ptgt->m_devhdl << 16;
8563
8564 mpt->m_active->m_slot[cmd->cmd_slot] = cmd;
8565 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
8566
8567 /*
8568 * Start timeout.
8569 */
8570 mutex_enter(&ptgt->m_tgt_intr_mutex);
8571 #ifdef MPTSAS_TEST
8572 /*
8573 * Temporarily set timebase = 0; needed for
8574 * timeout torture test.
8575 */
8576 if (mptsas_test_timeouts) {
8577 ptgt->m_timebase = 0;
8578 }
8579 #endif
8580 n = pkt->pkt_time - ptgt->m_timebase;
8581
8582 if (n == 0) {
8583 (ptgt->m_dups)++;
8584 ptgt->m_timeout = ptgt->m_timebase;
8585 } else if (n > 0) {
8586 ptgt->m_timeout =
8587 ptgt->m_timebase = pkt->pkt_time;
8588 ptgt->m_dups = 1;
8589 } else if (n < 0) {
8590 ptgt->m_timeout = ptgt->m_timebase;
8591 }
8592 #ifdef MPTSAS_TEST
8593 /*
8594 * Set back to a number higher than
8595 * mptsas_scsi_watchdog_tick
8596 * so timeouts will happen in mptsas_watchsubr
8597 */
8598 if (mptsas_test_timeouts) {
8599 ptgt->m_timebase = 60;
8600 }
8601 #endif
8602 mutex_exit(&ptgt->m_tgt_intr_mutex);
8603
8604 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8605 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8606 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8607 return (DDI_FAILURE);
8608 }
8609 return (DDI_SUCCESS);
8610 }
8611
8612 /*
8613 * Select a helper thread to handle current doneq
8614 */
8615 static void
8616 mptsas_deliver_doneq_thread(mptsas_t *mpt)
8617 {
8618 uint64_t t, i;
8619 uint32_t min = 0xffffffff;
8620 mptsas_doneq_thread_list_t *item;
8621
8622 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
8623 item = &mpt->m_doneq_thread_id[i];
8624 /*
8625 * If the completed command on help thread[i] less than
8626 * doneq_thread_threshold, then pick the thread[i]. Otherwise
8627 * pick a thread which has least completed command.
8628 */
8629
8630 mutex_enter(&item->mutex);
8631 if (item->len < mpt->m_doneq_thread_threshold) {
8632 t = i;
8633 mutex_exit(&item->mutex);
8634 break;
8635 }
8636 if (item->len < min) {
8637 min = item->len;
8638 t = i;
8639 }
8640 mutex_exit(&item->mutex);
8641 }
8642 mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
8643 mptsas_doneq_mv(mpt, t);
8644 cv_signal(&mpt->m_doneq_thread_id[t].cv);
8645 mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
8646 }
8647
8648 /*
8649 * move the current global doneq to the doneq of thread[t]
8650 */
8651 static void
8652 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
8653 {
8654 mptsas_cmd_t *cmd;
8655 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8656
8657 ASSERT(mutex_owned(&item->mutex));
8658 mutex_enter(&mpt->m_intr_mutex);
8659 while ((cmd = mpt->m_doneq) != NULL) {
8660 if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
8661 mpt->m_donetail = &mpt->m_doneq;
8662 }
8663 cmd->cmd_linkp = NULL;
8664 *item->donetail = cmd;
8665 item->donetail = &cmd->cmd_linkp;
8666 mpt->m_doneq_len--;
8667 item->len++;
8668 }
8669 mutex_exit(&mpt->m_intr_mutex);
8670 }
8671
8672 void
8673 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
8674 {
8675 struct scsi_pkt *pkt = CMD2PKT(cmd);
8676
8677 /* Check all acc and dma handles */
8678 if ((mptsas_check_acc_handle(mpt->m_datap) !=
8679 DDI_SUCCESS) ||
8680 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
8681 DDI_SUCCESS) ||
8682 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
8683 DDI_SUCCESS) ||
8684 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
8685 DDI_SUCCESS) ||
8686 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
8687 DDI_SUCCESS) ||
8688 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
8689 DDI_SUCCESS) ||
8690 (mptsas_check_acc_handle(mpt->m_config_handle) !=
8691 DDI_SUCCESS)) {
8692 ddi_fm_service_impact(mpt->m_dip,
8693 DDI_SERVICE_UNAFFECTED);
8694 ddi_fm_acc_err_clear(mpt->m_config_handle,
8695 DDI_FME_VER0);
8696 pkt->pkt_reason = CMD_TRAN_ERR;
8697 pkt->pkt_statistics = 0;
8698 }
8699 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
8700 DDI_SUCCESS) ||
8701 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
8702 DDI_SUCCESS) ||
8703 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
8704 DDI_SUCCESS) ||
8705 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
8706 DDI_SUCCESS) ||
8707 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
8708 DDI_SUCCESS)) {
8709 ddi_fm_service_impact(mpt->m_dip,
8710 DDI_SERVICE_UNAFFECTED);
8711 pkt->pkt_reason = CMD_TRAN_ERR;
8712 pkt->pkt_statistics = 0;
8713 }
8714 if (cmd->cmd_dmahandle &&
8715 (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
8716 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8717 pkt->pkt_reason = CMD_TRAN_ERR;
8718 pkt->pkt_statistics = 0;
8719 }
8720 if ((cmd->cmd_extra_frames &&
8721 ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
8722 DDI_SUCCESS) ||
8723 (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
8724 DDI_SUCCESS)))) {
8725 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8726 pkt->pkt_reason = CMD_TRAN_ERR;
8727 pkt->pkt_statistics = 0;
8728 }
8729 if (cmd->cmd_arqhandle &&
8730 (mptsas_check_dma_handle(cmd->cmd_arqhandle) != DDI_SUCCESS)) {
8731 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8732 pkt->pkt_reason = CMD_TRAN_ERR;
8733 pkt->pkt_statistics = 0;
8734 }
8735 if (cmd->cmd_ext_arqhandle &&
8736 (mptsas_check_dma_handle(cmd->cmd_ext_arqhandle) != DDI_SUCCESS)) {
8737 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8738 pkt->pkt_reason = CMD_TRAN_ERR;
8739 pkt->pkt_statistics = 0;
8740 }
8741 }
8742
8743 /*
8744 * mptsas_doneq_add0 is similar to mptsas_doneq_add except that it is called
8745 * where m_intr_mutex has already been held.
8746 */
8747 static inline void
8748 mptsas_doneq_add0(mptsas_t *mpt, mptsas_cmd_t *cmd)
8749 {
8750 struct scsi_pkt *pkt = CMD2PKT(cmd);
8751
8752 NDBG31(("mptsas_doneq_add0: cmd=0x%p", (void *)cmd));
8753
8754 ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
8755 cmd->cmd_linkp = NULL;
8756 cmd->cmd_flags |= CFLAG_FINISHED;
8757 cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
8758
8759 /*
8760 * only add scsi pkts that have completion routines to
8761 * the doneq. no intr cmds do not have callbacks.
8762 */
8763 if (pkt && (pkt->pkt_comp)) {
8764 *mpt->m_donetail = cmd;
8765 mpt->m_donetail = &cmd->cmd_linkp;
8766 mpt->m_doneq_len++;
8767 }
8768 }
8769
8770 /*
8771 * These routines manipulate the queue of commands that
8772 * are waiting for their completion routines to be called.
8773 * The queue is usually in FIFO order but on an MP system
8774 * it's possible for the completion routines to get out
8775 * of order. If that's a problem you need to add a global
8776 * mutex around the code that calls the completion routine
8777 * in the interrupt handler.
8778 */
8779 static void
8780 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8781 {
8782 ASSERT(mutex_owned(&mpt->m_mutex));
8783
8784 mptsas_fma_check(mpt, cmd);
8785
8786 mutex_enter(&mpt->m_intr_mutex);
8787 mptsas_doneq_add0(mpt, cmd);
8788 mutex_exit(&mpt->m_intr_mutex);
8789 }
8790
8791 static mptsas_cmd_t *
8792 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
8793 {
8794 mptsas_cmd_t *cmd;
8795 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8796
8797 /* pop one off the done queue */
8798 if ((cmd = item->doneq) != NULL) {
8799 /* if the queue is now empty fix the tail pointer */
8800 NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
8801 if ((item->doneq = cmd->cmd_linkp) == NULL) {
8802 item->donetail = &item->doneq;
8803 }
8804 cmd->cmd_linkp = NULL;
8805 item->len--;
8806 }
8807 return (cmd);
8808 }
8809
8810 static void
8811 mptsas_doneq_empty(mptsas_t *mpt)
8812 {
8813 mutex_enter(&mpt->m_intr_mutex);
8814 if (mpt->m_doneq && !mpt->m_in_callback) {
8815 mptsas_cmd_t *cmd, *next;
8816 struct scsi_pkt *pkt;
8817
8818 mpt->m_in_callback = 1;
8819 cmd = mpt->m_doneq;
8820 mpt->m_doneq = NULL;
8821 mpt->m_donetail = &mpt->m_doneq;
8822 mpt->m_doneq_len = 0;
8823
8824 mutex_exit(&mpt->m_intr_mutex);
8825
8826 /*
8827 * ONLY in ISR, is it called without m_mutex held, otherwise,
8828 * it is always called with m_mutex held.
8829 */
8830 if ((curthread->t_flag & T_INTR_THREAD) == 0)
8831 mutex_exit(&mpt->m_mutex);
8832 /*
8833 * run the completion routines of all the
8834 * completed commands
8835 */
8836 while (cmd != NULL) {
8837 next = cmd->cmd_linkp;
8838 cmd->cmd_linkp = NULL;
8839 /* run this command's completion routine */
8840 cmd->cmd_flags |= CFLAG_COMPLETED;
8841 pkt = CMD2PKT(cmd);
8842 mptsas_pkt_comp(pkt, cmd);
8843 cmd = next;
8844 }
8845 if ((curthread->t_flag & T_INTR_THREAD) == 0)
8846 mutex_enter(&mpt->m_mutex);
8847 mpt->m_in_callback = 0;
8848 return;
8849 }
8850 mutex_exit(&mpt->m_intr_mutex);
8851 }
8852
8853 /*
8854 * These routines manipulate the target's queue of pending requests
8855 */
8856 void
8857 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8858 {
8859 NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
8860 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8861 cmd->cmd_queued = TRUE;
8862 if (ptgt)
8863 ptgt->m_t_nwait++;
8864 if (cmd->cmd_pkt_flags & FLAG_HEAD) {
8865 mutex_enter(&mpt->m_intr_mutex);
8866 if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
8867 mpt->m_waitqtail = &cmd->cmd_linkp;
8868 }
8869 mpt->m_waitq = cmd;
8870 mutex_exit(&mpt->m_intr_mutex);
8871 } else {
8872 cmd->cmd_linkp = NULL;
8873 *(mpt->m_waitqtail) = cmd;
8874 mpt->m_waitqtail = &cmd->cmd_linkp;
8875 }
8876 }
8877
8878 static mptsas_cmd_t *
8879 mptsas_waitq_rm(mptsas_t *mpt)
8880 {
8881 mptsas_cmd_t *cmd;
8882 mptsas_target_t *ptgt;
8883 NDBG7(("mptsas_waitq_rm"));
8884
8885 mutex_enter(&mpt->m_intr_mutex);
8886 MPTSAS_WAITQ_RM(mpt, cmd);
8887 mutex_exit(&mpt->m_intr_mutex);
8888
8889 NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
8890 if (cmd) {
8891 ptgt = cmd->cmd_tgt_addr;
8892 if (ptgt) {
8893 ptgt->m_t_nwait--;
8894 ASSERT(ptgt->m_t_nwait >= 0);
8895 }
8896 }
8897 return (cmd);
8898 }
8899
8900 /*
8901 * remove specified cmd from the middle of the wait queue.
8902 */
8903 static void
8904 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8905 {
8906 mptsas_cmd_t *prevp = mpt->m_waitq;
8907 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8908
8909 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8910 (void *)mpt, (void *)cmd));
8911 if (ptgt) {
8912 ptgt->m_t_nwait--;
8913 ASSERT(ptgt->m_t_nwait >= 0);
8914 }
8915
8916 if (prevp == cmd) {
8917 mutex_enter(&mpt->m_intr_mutex);
8918 if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
8919 mpt->m_waitqtail = &mpt->m_waitq;
8920 mutex_exit(&mpt->m_intr_mutex);
8921
8922 cmd->cmd_linkp = NULL;
8923 cmd->cmd_queued = FALSE;
8924 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8925 (void *)mpt, (void *)cmd));
8926 return;
8927 }
8928
8929 while (prevp != NULL) {
8930 if (prevp->cmd_linkp == cmd) {
8931 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8932 mpt->m_waitqtail = &prevp->cmd_linkp;
8933
8934 cmd->cmd_linkp = NULL;
8935 cmd->cmd_queued = FALSE;
8936 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8937 (void *)mpt, (void *)cmd));
8938 return;
8939 }
8940 prevp = prevp->cmd_linkp;
8941 }
8942 cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
8943 }
8944
8945 /*
8946 * device and bus reset handling
8947 *
8948 * Notes:
8949 * - RESET_ALL: reset the controller
8950 * - RESET_TARGET: reset the target specified in scsi_address
8951 */
8952 static int
8953 mptsas_scsi_reset(struct scsi_address *ap, int level)
8954 {
8955 mptsas_t *mpt = ADDR2MPT(ap);
8956 int rval;
8957 mptsas_tgt_private_t *tgt_private;
8958 mptsas_target_t *ptgt = NULL;
8959
8960 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
8961 ptgt = tgt_private->t_private;
8962 if (ptgt == NULL) {
8963 return (FALSE);
8964 }
8965 NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,
8966 level));
8967
8968 mutex_enter(&mpt->m_mutex);
8969 /*
8970 * if we are not in panic set up a reset delay for this target
8971 */
8972 if (!ddi_in_panic()) {
8973 mptsas_setup_bus_reset_delay(mpt);
8974 } else {
8975 drv_usecwait(mpt->m_scsi_reset_delay * 1000);
8976 }
8977 rval = mptsas_do_scsi_reset(mpt, ptgt->m_devhdl);
8978 mutex_exit(&mpt->m_mutex);
8979
8980 /*
8981 * The transport layer expect to only see TRUE and
8982 * FALSE. Therefore, we will adjust the return value
8983 * if mptsas_do_scsi_reset returns FAILED.
8984 */
8985 if (rval == FAILED)
8986 rval = FALSE;
8987 return (rval);
8988 }
8989
8990 static int
8991 mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl)
8992 {
8993 int rval = FALSE;
8994 uint8_t config, disk;
8995 mptsas_slots_t *slots = mpt->m_active;
8996
8997 ASSERT(mutex_owned(&mpt->m_mutex));
8998
8999 if (mptsas_debug_resets) {
9000 mptsas_log(mpt, CE_WARN, "mptsas_do_scsi_reset: target=%d",
9001 devhdl);
9002 }
9003
9004 /*
9005 * Issue a Target Reset message to the target specified but not to a
9006 * disk making up a raid volume. Just look through the RAID config
9007 * Phys Disk list of DevHandles. If the target's DevHandle is in this
9008 * list, then don't reset this target.
9009 */
9010 for (config = 0; config < slots->m_num_raid_configs; config++) {
9011 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
9012 if (devhdl == slots->m_raidconfig[config].
9013 m_physdisk_devhdl[disk]) {
9014 return (TRUE);
9015 }
9016 }
9017 }
9018
9019 rval = mptsas_ioc_task_management(mpt,
9020 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, devhdl, 0, NULL, 0, 0);
9021
9022 mptsas_doneq_empty(mpt);
9023 return (rval);
9024 }
9025
9026 static int
9027 mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
9028 void (*callback)(caddr_t), caddr_t arg)
9029 {
9030 mptsas_t *mpt = ADDR2MPT(ap);
9031
9032 NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap->a_target));
9033
9034 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
9035 &mpt->m_mutex, &mpt->m_reset_notify_listf));
9036 }
9037
9038 static int
9039 mptsas_get_name(struct scsi_device *sd, char *name, int len)
9040 {
9041 dev_info_t *lun_dip = NULL;
9042
9043 ASSERT(sd != NULL);
9044 ASSERT(name != NULL);
9045 lun_dip = sd->sd_dev;
9046 ASSERT(lun_dip != NULL);
9047
9048 if (mptsas_name_child(lun_dip, name, len) == DDI_SUCCESS) {
9049 return (1);
9050 } else {
9051 return (0);
9052 }
9053 }
9054
9055 static int
9056 mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len)
9057 {
9058 return (mptsas_get_name(sd, name, len));
9059 }
9060
9061 void
9062 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
9063 {
9064
9065 NDBG25(("mptsas_set_throttle: throttle=%x", what));
9066
9067 /*
9068 * if the bus is draining/quiesced, no changes to the throttles
9069 * are allowed. Not allowing change of throttles during draining
9070 * limits error recovery but will reduce draining time
9071 *
9072 * all throttles should have been set to HOLD_THROTTLE
9073 */
9074 if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
9075 return;
9076 }
9077
9078 if (what == HOLD_THROTTLE) {
9079 ptgt->m_t_throttle = HOLD_THROTTLE;
9080 } else if (ptgt->m_reset_delay == 0) {
9081 ptgt->m_t_throttle = what;
9082 }
9083 }
9084
9085 /*
9086 * Clean up from a device reset.
9087 * For the case of target reset, this function clears the waitq of all
9088 * commands for a particular target. For the case of abort task set, this
9089 * function clears the waitq of all commonds for a particular target/lun.
9090 */
9091 static void
9092 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
9093 {
9094 mptsas_slots_t *slots = mpt->m_active;
9095 mptsas_cmd_t *cmd, *next_cmd;
9096 int slot;
9097 uchar_t reason;
9098 uint_t stat;
9099
9100 NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
9101
9102 /*
9103 * Make sure the I/O Controller has flushed all cmds
9104 * that are associated with this target for a target reset
9105 * and target/lun for abort task set.
9106 * Account for TM requests, which use the last SMID.
9107 */
9108 mutex_enter(&mpt->m_intr_mutex);
9109 for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
9110 if ((cmd = slots->m_slot[slot]) == NULL) {
9111 continue;
9112 }
9113 reason = CMD_RESET;
9114 stat = STAT_DEV_RESET;
9115 switch (tasktype) {
9116 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
9117 if (Tgt(cmd) == target) {
9118 NDBG25(("mptsas_flush_target discovered non-"
9119 "NULL cmd in slot %d, tasktype 0x%x", slot,
9120 tasktype));
9121 mptsas_dump_cmd(mpt, cmd);
9122 mptsas_remove_cmd0(mpt, cmd);
9123 mptsas_set_pkt_reason(mpt, cmd, reason, stat);
9124 mptsas_doneq_add0(mpt, cmd);
9125 }
9126 break;
9127 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9128 reason = CMD_ABORTED;
9129 stat = STAT_ABORTED;
9130 /*FALLTHROUGH*/
9131 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9132 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9133
9134 NDBG25(("mptsas_flush_target discovered non-"
9135 "NULL cmd in slot %d, tasktype 0x%x", slot,
9136 tasktype));
9137 mptsas_dump_cmd(mpt, cmd);
9138 mptsas_remove_cmd0(mpt, cmd);
9139 mptsas_set_pkt_reason(mpt, cmd, reason,
9140 stat);
9141 mptsas_doneq_add0(mpt, cmd);
9142 }
9143 break;
9144 default:
9145 break;
9146 }
9147 }
9148 mutex_exit(&mpt->m_intr_mutex);
9149
9150 /*
9151 * Flush the waitq of this target's cmds
9152 */
9153 cmd = mpt->m_waitq;
9154
9155 reason = CMD_RESET;
9156 stat = STAT_DEV_RESET;
9157
9158 switch (tasktype) {
9159 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
9160 while (cmd != NULL) {
9161 next_cmd = cmd->cmd_linkp;
9162 if (Tgt(cmd) == target) {
9163 mptsas_waitq_delete(mpt, cmd);
9164 mptsas_set_pkt_reason(mpt, cmd,
9165 reason, stat);
9166 mptsas_doneq_add(mpt, cmd);
9167 }
9168 cmd = next_cmd;
9169 }
9170 break;
9171 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9172 reason = CMD_ABORTED;
9173 stat = STAT_ABORTED;
9174 /*FALLTHROUGH*/
9175 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9176 while (cmd != NULL) {
9177 next_cmd = cmd->cmd_linkp;
9178 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9179 mptsas_waitq_delete(mpt, cmd);
9180 mptsas_set_pkt_reason(mpt, cmd,
9181 reason, stat);
9182 mptsas_doneq_add(mpt, cmd);
9183 }
9184 cmd = next_cmd;
9185 }
9186 break;
9187 default:
9188 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
9189 tasktype);
9190 break;
9191 }
9192 }
9193
9194 /*
9195 * Clean up hba state, abort all outstanding command and commands in waitq
9196 * reset timeout of all targets.
9197 */
9198 static void
9199 mptsas_flush_hba(mptsas_t *mpt)
9200 {
9201 mptsas_slots_t *slots = mpt->m_active;
9202 mptsas_cmd_t *cmd;
9203 int slot;
9204
9205 NDBG25(("mptsas_flush_hba"));
9206
9207 /*
9208 * The I/O Controller should have already sent back
9209 * all commands via the scsi I/O reply frame. Make
9210 * sure all commands have been flushed.
9211 * Account for TM request, which use the last SMID.
9212 */
9213 mutex_enter(&mpt->m_intr_mutex);
9214 for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
9215 if ((cmd = slots->m_slot[slot]) == NULL) {
9216 continue;
9217 }
9218
9219 if (cmd->cmd_flags & CFLAG_CMDIOC) {
9220 /*
9221 * Need to make sure to tell everyone that might be
9222 * waiting on this command that it's going to fail. If
9223 * we get here, this command will never timeout because
9224 * the active command table is going to be re-allocated,
9225 * so there will be nothing to check against a time out.
9226 * Instead, mark the command as failed due to reset.
9227 */
9228 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
9229 STAT_BUS_RESET);
9230 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9231 (cmd->cmd_flags & CFLAG_CONFIG) ||
9232 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9233 cmd->cmd_flags |= CFLAG_FINISHED;
9234 cv_broadcast(&mpt->m_passthru_cv);
9235 cv_broadcast(&mpt->m_config_cv);
9236 cv_broadcast(&mpt->m_fw_diag_cv);
9237 }
9238 continue;
9239 }
9240
9241 NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
9242 slot));
9243 mptsas_dump_cmd(mpt, cmd);
9244
9245 mptsas_remove_cmd0(mpt, cmd);
9246 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9247 mptsas_doneq_add0(mpt, cmd);
9248 }
9249 mutex_exit(&mpt->m_intr_mutex);
9250
9251 /*
9252 * Flush the waitq.
9253 */
9254 while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
9255 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9256 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9257 (cmd->cmd_flags & CFLAG_CONFIG) ||
9258 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9259 cmd->cmd_flags |= CFLAG_FINISHED;
9260 cv_broadcast(&mpt->m_passthru_cv);
9261 cv_broadcast(&mpt->m_config_cv);
9262 cv_broadcast(&mpt->m_fw_diag_cv);
9263 } else {
9264 mptsas_doneq_add(mpt, cmd);
9265 }
9266 }
9267 }
9268
9269 /*
9270 * set pkt_reason and OR in pkt_statistics flag
9271 */
9272 static void
9273 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
9274 uint_t stat)
9275 {
9276 #ifndef __lock_lint
9277 _NOTE(ARGUNUSED(mpt))
9278 #endif
9279
9280 NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
9281 (void *)cmd, reason, stat));
9282
9283 if (cmd) {
9284 if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
9285 cmd->cmd_pkt->pkt_reason = reason;
9286 }
9287 cmd->cmd_pkt->pkt_statistics |= stat;
9288 }
9289 }
9290
9291 static void
9292 mptsas_start_watch_reset_delay()
9293 {
9294 NDBG22(("mptsas_start_watch_reset_delay"));
9295
9296 mutex_enter(&mptsas_global_mutex);
9297 if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
9298 mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
9299 drv_usectohz((clock_t)
9300 MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
9301 ASSERT(mptsas_reset_watch != NULL);
9302 }
9303 mutex_exit(&mptsas_global_mutex);
9304 }
9305
9306 static void
9307 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
9308 {
9309 mptsas_target_t *ptgt = NULL;
9310
9311 NDBG22(("mptsas_setup_bus_reset_delay"));
9312 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9313 MPTSAS_HASH_FIRST);
9314 while (ptgt != NULL) {
9315 mutex_enter(&ptgt->m_tgt_intr_mutex);
9316 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9317 ptgt->m_reset_delay = mpt->m_scsi_reset_delay;
9318 mutex_exit(&ptgt->m_tgt_intr_mutex);
9319
9320 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9321 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9322 }
9323
9324 mptsas_start_watch_reset_delay();
9325 }
9326
9327 /*
9328 * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
9329 * mpt instance for active reset delays
9330 */
9331 static void
9332 mptsas_watch_reset_delay(void *arg)
9333 {
9334 #ifndef __lock_lint
9335 _NOTE(ARGUNUSED(arg))
9336 #endif
9337
9338 mptsas_t *mpt;
9339 int not_done = 0;
9340
9341 NDBG22(("mptsas_watch_reset_delay"));
9342
9343 mutex_enter(&mptsas_global_mutex);
9344 mptsas_reset_watch = 0;
9345 mutex_exit(&mptsas_global_mutex);
9346 rw_enter(&mptsas_global_rwlock, RW_READER);
9347 for (mpt = mptsas_head; mpt != NULL; mpt = mpt->m_next) {
9348 if (mpt->m_tran == 0) {
9349 continue;
9350 }
9351 mutex_enter(&mpt->m_mutex);
9352 not_done += mptsas_watch_reset_delay_subr(mpt);
9353 mutex_exit(&mpt->m_mutex);
9354 }
9355 rw_exit(&mptsas_global_rwlock);
9356
9357 if (not_done) {
9358 mptsas_start_watch_reset_delay();
9359 }
9360 }
9361
9362 static int
9363 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
9364 {
9365 int done = 0;
9366 int restart = 0;
9367 mptsas_target_t *ptgt = NULL;
9368
9369 NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
9370
9371 ASSERT(mutex_owned(&mpt->m_mutex));
9372
9373 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9374 MPTSAS_HASH_FIRST);
9375 while (ptgt != NULL) {
9376 mutex_enter(&ptgt->m_tgt_intr_mutex);
9377 if (ptgt->m_reset_delay != 0) {
9378 ptgt->m_reset_delay -=
9379 MPTSAS_WATCH_RESET_DELAY_TICK;
9380 if (ptgt->m_reset_delay <= 0) {
9381 ptgt->m_reset_delay = 0;
9382 mptsas_set_throttle(mpt, ptgt,
9383 MAX_THROTTLE);
9384 restart++;
9385 } else {
9386 done = -1;
9387 }
9388 }
9389 mutex_exit(&ptgt->m_tgt_intr_mutex);
9390
9391 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9392 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9393 }
9394
9395 if (restart > 0) {
9396 mptsas_restart_hba(mpt);
9397 }
9398 return (done);
9399 }
9400
9401 #ifdef MPTSAS_TEST
9402 static void
9403 mptsas_test_reset(mptsas_t *mpt, int target)
9404 {
9405 mptsas_target_t *ptgt = NULL;
9406
9407 if (mptsas_rtest == target) {
9408 if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
9409 mptsas_rtest = -1;
9410 }
9411 if (mptsas_rtest == -1) {
9412 NDBG22(("mptsas_test_reset success"));
9413 }
9414 }
9415 }
9416 #endif
9417
9418 /*
9419 * abort handling:
9420 *
9421 * Notes:
9422 * - if pkt is not NULL, abort just that command
9423 * - if pkt is NULL, abort all outstanding commands for target
9424 */
9425 static int
9426 mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
9427 {
9428 mptsas_t *mpt = ADDR2MPT(ap);
9429 int rval;
9430 mptsas_tgt_private_t *tgt_private;
9431 int target, lun;
9432
9433 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
9434 tran_tgt_private;
9435 ASSERT(tgt_private != NULL);
9436 target = tgt_private->t_private->m_devhdl;
9437 lun = tgt_private->t_lun;
9438
9439 NDBG23(("mptsas_scsi_abort: target=%d.%d", target, lun));
9440
9441 mutex_enter(&mpt->m_mutex);
9442 rval = mptsas_do_scsi_abort(mpt, target, lun, pkt);
9443 mutex_exit(&mpt->m_mutex);
9444 return (rval);
9445 }
9446
9447 static int
9448 mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, struct scsi_pkt *pkt)
9449 {
9450 mptsas_cmd_t *sp = NULL;
9451 mptsas_slots_t *slots = mpt->m_active;
9452 int rval = FALSE;
9453
9454 ASSERT(mutex_owned(&mpt->m_mutex));
9455
9456 /*
9457 * Abort the command pkt on the target/lun in ap. If pkt is
9458 * NULL, abort all outstanding commands on that target/lun.
9459 * If you can abort them, return 1, else return 0.
9460 * Each packet that's aborted should be sent back to the target
9461 * driver through the callback routine, with pkt_reason set to
9462 * CMD_ABORTED.
9463 *
9464 * abort cmd pkt on HBA hardware; clean out of outstanding
9465 * command lists, etc.
9466 */
9467 if (pkt != NULL) {
9468 /* abort the specified packet */
9469 sp = PKT2CMD(pkt);
9470
9471 if (sp->cmd_queued) {
9472 NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
9473 (void *)sp));
9474 mptsas_waitq_delete(mpt, sp);
9475 mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
9476 STAT_ABORTED);
9477 mptsas_doneq_add(mpt, sp);
9478 rval = TRUE;
9479 goto done;
9480 }
9481
9482 /*
9483 * Have mpt firmware abort this command
9484 */
9485 mutex_enter(&mpt->m_intr_mutex);
9486 if (slots->m_slot[sp->cmd_slot] != NULL) {
9487 mutex_exit(&mpt->m_intr_mutex);
9488 rval = mptsas_ioc_task_management(mpt,
9489 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
9490 lun, NULL, 0, 0);
9491
9492 /*
9493 * The transport layer expects only TRUE and FALSE.
9494 * Therefore, if mptsas_ioc_task_management returns
9495 * FAILED we will return FALSE.
9496 */
9497 if (rval == FAILED)
9498 rval = FALSE;
9499 goto done;
9500 }
9501 mutex_exit(&mpt->m_intr_mutex);
9502 }
9503
9504 /*
9505 * If pkt is NULL then abort task set
9506 */
9507 rval = mptsas_ioc_task_management(mpt,
9508 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0);
9509
9510 /*
9511 * The transport layer expects only TRUE and FALSE.
9512 * Therefore, if mptsas_ioc_task_management returns
9513 * FAILED we will return FALSE.
9514 */
9515 if (rval == FAILED)
9516 rval = FALSE;
9517
9518 #ifdef MPTSAS_TEST
9519 if (rval && mptsas_test_stop) {
9520 debug_enter("mptsas_do_scsi_abort");
9521 }
9522 #endif
9523
9524 done:
9525 mptsas_doneq_empty(mpt);
9526 return (rval);
9527 }
9528
9529 /*
9530 * capability handling:
9531 * (*tran_getcap). Get the capability named, and return its value.
9532 */
9533 static int
9534 mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
9535 {
9536 mptsas_t *mpt = ADDR2MPT(ap);
9537 int ckey;
9538 int rval = FALSE;
9539
9540 NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
9541 ap->a_target, cap, tgtonly));
9542
9543 mutex_enter(&mpt->m_mutex);
9544
9545 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9546 mutex_exit(&mpt->m_mutex);
9547 return (UNDEFINED);
9548 }
9549
9550 switch (ckey) {
9551 case SCSI_CAP_DMA_MAX:
9552 rval = (int)mpt->m_msg_dma_attr.dma_attr_maxxfer;
9553 break;
9554 case SCSI_CAP_ARQ:
9555 rval = TRUE;
9556 break;
9557 case SCSI_CAP_MSG_OUT:
9558 case SCSI_CAP_PARITY:
9559 case SCSI_CAP_UNTAGGED_QING:
9560 rval = TRUE;
9561 break;
9562 case SCSI_CAP_TAGGED_QING:
9563 rval = TRUE;
9564 break;
9565 case SCSI_CAP_RESET_NOTIFICATION:
9566 rval = TRUE;
9567 break;
9568 case SCSI_CAP_LINKED_CMDS:
9569 rval = FALSE;
9570 break;
9571 case SCSI_CAP_QFULL_RETRIES:
9572 rval = ((mptsas_tgt_private_t *)(ap->a_hba_tran->
9573 tran_tgt_private))->t_private->m_qfull_retries;
9574 break;
9575 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9576 rval = drv_hztousec(((mptsas_tgt_private_t *)
9577 (ap->a_hba_tran->tran_tgt_private))->
9578 t_private->m_qfull_retry_interval) / 1000;
9579 break;
9580 case SCSI_CAP_CDB_LEN:
9581 rval = CDB_GROUP4;
9582 break;
9583 case SCSI_CAP_INTERCONNECT_TYPE:
9584 rval = INTERCONNECT_SAS;
9585 break;
9586 case SCSI_CAP_TRAN_LAYER_RETRIES:
9587 if (mpt->m_ioc_capabilities &
9588 MPI2_IOCFACTS_CAPABILITY_TLR)
9589 rval = TRUE;
9590 else
9591 rval = FALSE;
9592 break;
9593 default:
9594 rval = UNDEFINED;
9595 break;
9596 }
9597
9598 NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
9599
9600 mutex_exit(&mpt->m_mutex);
9601 return (rval);
9602 }
9603
9604 /*
9605 * (*tran_setcap). Set the capability named to the value given.
9606 */
9607 static int
9608 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
9609 {
9610 mptsas_t *mpt = ADDR2MPT(ap);
9611 int ckey;
9612 int rval = FALSE;
9613 mptsas_target_t *ptgt;
9614
9615 NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
9616 ap->a_target, cap, value, tgtonly));
9617
9618 if (!tgtonly) {
9619 return (rval);
9620 }
9621
9622 mutex_enter(&mpt->m_mutex);
9623
9624 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9625 mutex_exit(&mpt->m_mutex);
9626 return (UNDEFINED);
9627 }
9628
9629 switch (ckey) {
9630 case SCSI_CAP_DMA_MAX:
9631 case SCSI_CAP_MSG_OUT:
9632 case SCSI_CAP_PARITY:
9633 case SCSI_CAP_INITIATOR_ID:
9634 case SCSI_CAP_LINKED_CMDS:
9635 case SCSI_CAP_UNTAGGED_QING:
9636 case SCSI_CAP_RESET_NOTIFICATION:
9637 /*
9638 * None of these are settable via
9639 * the capability interface.
9640 */
9641 break;
9642 case SCSI_CAP_ARQ:
9643 /*
9644 * We cannot turn off arq so return false if asked to
9645 */
9646 if (value) {
9647 rval = TRUE;
9648 } else {
9649 rval = FALSE;
9650 }
9651 break;
9652 case SCSI_CAP_TAGGED_QING:
9653 ptgt = ((mptsas_tgt_private_t *)
9654 (ap->a_hba_tran->tran_tgt_private))->t_private;
9655 mutex_enter(&ptgt->m_tgt_intr_mutex);
9656 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9657 mutex_exit(&ptgt->m_tgt_intr_mutex);
9658 rval = TRUE;
9659 break;
9660 case SCSI_CAP_QFULL_RETRIES:
9661 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9662 t_private->m_qfull_retries = (uchar_t)value;
9663 rval = TRUE;
9664 break;
9665 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9666 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9667 t_private->m_qfull_retry_interval =
9668 drv_usectohz(value * 1000);
9669 rval = TRUE;
9670 break;
9671 default:
9672 rval = UNDEFINED;
9673 break;
9674 }
9675 mutex_exit(&mpt->m_mutex);
9676 return (rval);
9677 }
9678
9679 /*
9680 * Utility routine for mptsas_ifsetcap/ifgetcap
9681 */
9682 /*ARGSUSED*/
9683 static int
9684 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp)
9685 {
9686 NDBG24(("mptsas_scsi_capchk: cap=%s", cap));
9687
9688 if (!cap)
9689 return (FALSE);
9690
9691 *cidxp = scsi_hba_lookup_capstr(cap);
9692 return (TRUE);
9693 }
9694
9695 static int
9696 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
9697 {
9698 mptsas_slots_t *old_active = mpt->m_active;
9699 mptsas_slots_t *new_active;
9700 size_t size;
9701 int rval = -1, nslot, i;
9702 mptsas_slot_free_e_t *pe;
9703
9704 if (mptsas_outstanding_cmds_n(mpt)) {
9705 NDBG9(("cannot change size of active slots array"));
9706 return (rval);
9707 }
9708
9709 size = MPTSAS_SLOTS_SIZE(mpt);
9710 new_active = kmem_zalloc(size, flag);
9711 if (new_active == NULL) {
9712 NDBG1(("new active alloc failed"));
9713 return (rval);
9714 }
9715 /*
9716 * Since SMID 0 is reserved and the TM slot is reserved, the
9717 * number of slots that can be used at any one time is
9718 * m_max_requests - 2.
9719 */
9720 new_active->m_n_slots = nslot = (mpt->m_max_requests - 2);
9721 new_active->m_size = size;
9722 new_active->m_tags = 1;
9723
9724 if (old_active) {
9725 new_active->m_tgttbl = old_active->m_tgttbl;
9726 new_active->m_smptbl = old_active->m_smptbl;
9727 new_active->m_num_raid_configs =
9728 old_active->m_num_raid_configs;
9729 for (i = 0; i < new_active->m_num_raid_configs; i++) {
9730 new_active->m_raidconfig[i] =
9731 old_active->m_raidconfig[i];
9732 }
9733 mptsas_free_active_slots(mpt);
9734 }
9735
9736 if (max_ncpus & (max_ncpus - 1)) {
9737 mpt->m_slot_freeq_pair_n = (1 << highbit(max_ncpus));
9738 } else {
9739 mpt->m_slot_freeq_pair_n = max_ncpus;
9740 }
9741 mpt->m_slot_freeq_pairp = kmem_zalloc(
9742 mpt->m_slot_freeq_pair_n *
9743 sizeof (mptsas_slot_freeq_pair_t), KM_SLEEP);
9744 for (i = 0; i < mpt->m_slot_freeq_pair_n; i++) {
9745 list_create(&mpt->m_slot_freeq_pairp[i].
9746 m_slot_allocq.s.m_fq_list,
9747 sizeof (mptsas_slot_free_e_t),
9748 offsetof(mptsas_slot_free_e_t, node));
9749 list_create(&mpt->m_slot_freeq_pairp[i].
9750 m_slot_releq.s.m_fq_list,
9751 sizeof (mptsas_slot_free_e_t),
9752 offsetof(mptsas_slot_free_e_t, node));
9753 mpt->m_slot_freeq_pairp[i].m_slot_allocq.s.m_fq_n = 0;
9754 mpt->m_slot_freeq_pairp[i].m_slot_releq.s.m_fq_n = 0;
9755 mutex_init(&mpt->m_slot_freeq_pairp[i].
9756 m_slot_allocq.s.m_fq_mutex, NULL, MUTEX_DRIVER,
9757 DDI_INTR_PRI(mpt->m_intr_pri));
9758 mutex_init(&mpt->m_slot_freeq_pairp[i].
9759 m_slot_releq.s.m_fq_mutex, NULL, MUTEX_DRIVER,
9760 DDI_INTR_PRI(mpt->m_intr_pri));
9761 }
9762 pe = mpt->m_slot_free_ae = kmem_zalloc(nslot *
9763 sizeof (mptsas_slot_free_e_t), KM_SLEEP);
9764 /*
9765 * An array of Mpi2ReplyDescriptorsUnion_t is defined here.
9766 * We are trying to eliminate the m_mutex in the context
9767 * reply code path in the ISR. Since the read of the
9768 * ReplyDescriptor and update/write of the ReplyIndex must
9769 * be atomic (since the poll thread may also update them at
9770 * the same time) so we first read out of the ReplyDescriptor
9771 * into this array and update the ReplyIndex register with a
9772 * separate mutex m_intr_mutex protected, and then release the
9773 * mutex and process all of them. the length of the array is
9774 * defined as max as 128(128*64=8k), which is
9775 * assumed as the maxmium depth of the interrupt coalese.
9776 */
9777 mpt->m_reply = kmem_zalloc(MPI_ADDRESS_COALSCE_MAX *
9778 sizeof (Mpi2ReplyDescriptorsUnion_t), KM_SLEEP);
9779 for (i = 0; i < nslot; i++, pe++) {
9780 pe->slot = i + 1; /* SMID 0 is reserved */
9781 pe->cpuid = i % mpt->m_slot_freeq_pair_n;
9782 list_insert_tail(&mpt->m_slot_freeq_pairp
9783 [i % mpt->m_slot_freeq_pair_n]
9784 .m_slot_allocq.s.m_fq_list, pe);
9785 mpt->m_slot_freeq_pairp[i % mpt->m_slot_freeq_pair_n]
9786 .m_slot_allocq.s.m_fq_n++;
9787 mpt->m_slot_freeq_pairp[i % mpt->m_slot_freeq_pair_n]
9788 .m_slot_allocq.s.m_fq_n_init++;
9789 }
9790
9791 mpt->m_active = new_active;
9792 rval = 0;
9793
9794 return (rval);
9795 }
9796
9797 static void
9798 mptsas_free_active_slots(mptsas_t *mpt)
9799 {
9800 mptsas_slots_t *active = mpt->m_active;
9801 size_t size;
9802 mptsas_slot_free_e_t *pe;
9803 int i;
9804
9805 if (active == NULL)
9806 return;
9807
9808 if (mpt->m_slot_freeq_pairp) {
9809 for (i = 0; i < mpt->m_slot_freeq_pair_n; i++) {
9810 while ((pe = list_head(&mpt->m_slot_freeq_pairp
9811 [i].m_slot_allocq.s.m_fq_list)) != NULL) {
9812 list_remove(&mpt->m_slot_freeq_pairp[i]
9813 .m_slot_allocq.s.m_fq_list, pe);
9814 }
9815 list_destroy(&mpt->m_slot_freeq_pairp
9816 [i].m_slot_allocq.s.m_fq_list);
9817 while ((pe = list_head(&mpt->m_slot_freeq_pairp
9818 [i].m_slot_releq.s.m_fq_list)) != NULL) {
9819 list_remove(&mpt->m_slot_freeq_pairp[i]
9820 .m_slot_releq.s.m_fq_list, pe);
9821 }
9822 list_destroy(&mpt->m_slot_freeq_pairp
9823 [i].m_slot_releq.s.m_fq_list);
9824 mutex_destroy(&mpt->m_slot_freeq_pairp
9825 [i].m_slot_allocq.s.m_fq_mutex);
9826 mutex_destroy(&mpt->m_slot_freeq_pairp
9827 [i].m_slot_releq.s.m_fq_mutex);
9828 }
9829 kmem_free(mpt->m_slot_freeq_pairp, mpt->m_slot_freeq_pair_n *
9830 sizeof (mptsas_slot_freeq_pair_t));
9831 }
9832 if (mpt->m_slot_free_ae)
9833 kmem_free(mpt->m_slot_free_ae, mpt->m_active->m_n_slots *
9834 sizeof (mptsas_slot_free_e_t));
9835
9836 if (mpt->m_reply)
9837 kmem_free(mpt->m_reply, MPI_ADDRESS_COALSCE_MAX *
9838 sizeof (Mpi2ReplyDescriptorsUnion_t));
9839
9840 size = active->m_size;
9841 kmem_free(active, size);
9842 mpt->m_active = NULL;
9843 }
9844
9845 /*
9846 * Error logging, printing, and debug print routines.
9847 */
9848 static char *mptsas_label = "mpt_sas";
9849
9850 /*PRINTFLIKE3*/
9851 void
9852 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
9853 {
9854 dev_info_t *dev;
9855 va_list ap;
9856
9857 if (mpt) {
9858 dev = mpt->m_dip;
9859 } else {
9860 dev = 0;
9861 }
9862
9863 mutex_enter(&mptsas_log_mutex);
9864
9865 va_start(ap, fmt);
9866 (void) vsprintf(mptsas_log_buf, fmt, ap);
9867 va_end(ap);
9868
9869 if (level == CE_CONT) {
9870 scsi_log(dev, mptsas_label, level, "%s\n", mptsas_log_buf);
9871 } else {
9872 scsi_log(dev, mptsas_label, level, "%s", mptsas_log_buf);
9873 }
9874
9875 mutex_exit(&mptsas_log_mutex);
9876 }
9877
9878 #ifdef MPTSAS_DEBUG
9879 /*PRINTFLIKE1*/
9880 void
9881 mptsas_printf(char *fmt, ...)
9882 {
9883 dev_info_t *dev = 0;
9884 va_list ap;
9885
9886 mutex_enter(&mptsas_log_mutex);
9887
9888 va_start(ap, fmt);
9889 (void) vsprintf(mptsas_log_buf, fmt, ap);
9890 va_end(ap);
9891
9892 #ifdef PROM_PRINTF
9893 prom_printf("%s:\t%s\n", mptsas_label, mptsas_log_buf);
9894 #else
9895 scsi_log(dev, mptsas_label, SCSI_DEBUG, "%s\n", mptsas_log_buf);
9896 #endif
9897 mutex_exit(&mptsas_log_mutex);
9898 }
9899 #endif
9900
9901 /*
9902 * timeout handling
9903 */
9904 static void
9905 mptsas_watch(void *arg)
9906 {
9907 #ifndef __lock_lint
9908 _NOTE(ARGUNUSED(arg))
9909 #endif
9910
9911 mptsas_t *mpt;
9912 uint32_t doorbell;
9913
9914 NDBG30(("mptsas_watch"));
9915
9916 rw_enter(&mptsas_global_rwlock, RW_READER);
9917 for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
9918
9919 mutex_enter(&mpt->m_mutex);
9920
9921 /* Skip device if not powered on */
9922 if (mpt->m_options & MPTSAS_OPT_PM) {
9923 if (mpt->m_power_level == PM_LEVEL_D0) {
9924 (void) pm_busy_component(mpt->m_dip, 0);
9925 mpt->m_busy = 1;
9926 } else {
9927 mutex_exit(&mpt->m_mutex);
9928 continue;
9929 }
9930 }
9931
9932 /*
9933 * Check if controller is in a FAULT state. If so, reset it.
9934 */
9935 doorbell = ddi_get32(mpt->m_datap, &mpt->m_reg->Doorbell);
9936 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
9937 doorbell &= MPI2_DOORBELL_DATA_MASK;
9938 mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
9939 "code: %04x", doorbell);
9940 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
9941 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
9942 mptsas_log(mpt, CE_WARN, "Reset failed"
9943 "after fault was detected");
9944 }
9945 }
9946
9947 /*
9948 * For now, always call mptsas_watchsubr.
9949 */
9950 mptsas_watchsubr(mpt);
9951
9952 if (mpt->m_options & MPTSAS_OPT_PM) {
9953 mpt->m_busy = 0;
9954 (void) pm_idle_component(mpt->m_dip, 0);
9955 }
9956
9957 mutex_exit(&mpt->m_mutex);
9958 }
9959 rw_exit(&mptsas_global_rwlock);
9960
9961 mutex_enter(&mptsas_global_mutex);
9962 if (mptsas_timeouts_enabled)
9963 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
9964 mutex_exit(&mptsas_global_mutex);
9965 }
9966
9967 static void
9968 mptsas_watchsubr(mptsas_t *mpt)
9969 {
9970 int i;
9971 mptsas_cmd_t *cmd;
9972 mptsas_target_t *ptgt = NULL;
9973
9974 NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
9975
9976 #ifdef MPTSAS_TEST
9977 if (mptsas_enable_untagged) {
9978 mptsas_test_untagged++;
9979 }
9980 #endif
9981
9982 /*
9983 * Check for commands stuck in active slot
9984 * Account for TM requests, which use the last SMID.
9985 */
9986 mutex_enter(&mpt->m_intr_mutex);
9987 for (i = 0; i <= mpt->m_active->m_n_slots; i++) {
9988 if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
9989 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
9990 cmd->cmd_active_timeout -=
9991 mptsas_scsi_watchdog_tick;
9992 if (cmd->cmd_active_timeout <= 0) {
9993 /*
9994 * There seems to be a command stuck
9995 * in the active slot. Drain throttle.
9996 */
9997 ptgt = cmd->cmd_tgt_addr;
9998 mutex_enter(&ptgt->m_tgt_intr_mutex);
9999 mptsas_set_throttle(mpt, ptgt,
10000 DRAIN_THROTTLE);
10001 mutex_exit(&ptgt->m_tgt_intr_mutex);
10002 }
10003 }
10004 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
10005 (cmd->cmd_flags & CFLAG_CONFIG) ||
10006 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
10007 cmd->cmd_active_timeout -=
10008 mptsas_scsi_watchdog_tick;
10009 if (cmd->cmd_active_timeout <= 0) {
10010 /*
10011 * passthrough command timeout
10012 */
10013 cmd->cmd_flags |= (CFLAG_FINISHED |
10014 CFLAG_TIMEOUT);
10015 cv_broadcast(&mpt->m_passthru_cv);
10016 cv_broadcast(&mpt->m_config_cv);
10017 cv_broadcast(&mpt->m_fw_diag_cv);
10018 }
10019 }
10020 }
10021 }
10022 mutex_exit(&mpt->m_intr_mutex);
10023
10024 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
10025 MPTSAS_HASH_FIRST);
10026 while (ptgt != NULL) {
10027 /*
10028 * In order to avoid using m_mutex in the key code path in ISR,
10029 * separate mutexs are introduced to protect those elements
10030 * shown in ISR.
10031 */
10032 mutex_enter(&ptgt->m_tgt_intr_mutex);
10033
10034 /*
10035 * If we were draining due to a qfull condition,
10036 * go back to full throttle.
10037 */
10038 if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
10039 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
10040 (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
10041 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10042 mptsas_restart_hba(mpt);
10043 }
10044
10045 if ((ptgt->m_t_ncmds > 0) &&
10046 (ptgt->m_timebase)) {
10047
10048 if (ptgt->m_timebase <=
10049 mptsas_scsi_watchdog_tick) {
10050 ptgt->m_timebase +=
10051 mptsas_scsi_watchdog_tick;
10052 mutex_exit(&ptgt->m_tgt_intr_mutex);
10053 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10054 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10055 continue;
10056 }
10057
10058 ptgt->m_timeout -= mptsas_scsi_watchdog_tick;
10059
10060 if (ptgt->m_timeout < 0) {
10061 mutex_exit(&ptgt->m_tgt_intr_mutex);
10062 mptsas_cmd_timeout(mpt, ptgt->m_devhdl);
10063 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10064 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10065 continue;
10066 }
10067
10068 if ((ptgt->m_timeout) <=
10069 mptsas_scsi_watchdog_tick) {
10070 NDBG23(("pending timeout"));
10071 mptsas_set_throttle(mpt, ptgt,
10072 DRAIN_THROTTLE);
10073 }
10074 }
10075 mutex_exit(&ptgt->m_tgt_intr_mutex);
10076 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10077 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10078 }
10079 }
10080
10081 /*
10082 * timeout recovery
10083 */
10084 static void
10085 mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl)
10086 {
10087
10088 NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
10089 mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
10090 "Target %d", devhdl);
10091
10092 /*
10093 * If the current target is not the target passed in,
10094 * try to reset that target.
10095 */
10096 NDBG29(("mptsas_cmd_timeout: device reset"));
10097 if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
10098 mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
10099 "recovery failed!", devhdl);
10100 }
10101 }
10102
10103 /*
10104 * Device / Hotplug control
10105 */
10106 static int
10107 mptsas_scsi_quiesce(dev_info_t *dip)
10108 {
10109 mptsas_t *mpt;
10110 scsi_hba_tran_t *tran;
10111
10112 tran = ddi_get_driver_private(dip);
10113 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
10114 return (-1);
10115
10116 return (mptsas_quiesce_bus(mpt));
10117 }
10118
10119 static int
10120 mptsas_scsi_unquiesce(dev_info_t *dip)
10121 {
10122 mptsas_t *mpt;
10123 scsi_hba_tran_t *tran;
10124
10125 tran = ddi_get_driver_private(dip);
10126 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
10127 return (-1);
10128
10129 return (mptsas_unquiesce_bus(mpt));
10130 }
10131
10132 static int
10133 mptsas_quiesce_bus(mptsas_t *mpt)
10134 {
10135 mptsas_target_t *ptgt = NULL;
10136
10137 NDBG28(("mptsas_quiesce_bus"));
10138 mutex_enter(&mpt->m_mutex);
10139
10140 /* Set all the throttles to zero */
10141 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
10142 MPTSAS_HASH_FIRST);
10143 while (ptgt != NULL) {
10144 mutex_enter(&ptgt->m_tgt_intr_mutex);
10145 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10146 mutex_exit(&ptgt->m_tgt_intr_mutex);
10147
10148 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10149 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10150 }
10151
10152 /* If there are any outstanding commands in the queue */
10153 mutex_enter(&mpt->m_intr_mutex);
10154 if (mptsas_outstanding_cmds_n(mpt)) {
10155 mutex_exit(&mpt->m_intr_mutex);
10156 mpt->m_softstate |= MPTSAS_SS_DRAINING;
10157 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
10158 mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
10159 if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
10160 /*
10161 * Quiesce has been interrupted
10162 */
10163 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
10164 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10165 &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
10166 while (ptgt != NULL) {
10167 mutex_enter(&ptgt->m_tgt_intr_mutex);
10168 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10169 mutex_exit(&ptgt->m_tgt_intr_mutex);
10170
10171 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10172 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10173 }
10174 mptsas_restart_hba(mpt);
10175 if (mpt->m_quiesce_timeid != 0) {
10176 timeout_id_t tid = mpt->m_quiesce_timeid;
10177 mpt->m_quiesce_timeid = 0;
10178 mutex_exit(&mpt->m_mutex);
10179 (void) untimeout(tid);
10180 return (-1);
10181 }
10182 mutex_exit(&mpt->m_mutex);
10183 return (-1);
10184 } else {
10185 /* Bus has been quiesced */
10186 ASSERT(mpt->m_quiesce_timeid == 0);
10187 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
10188 mpt->m_softstate |= MPTSAS_SS_QUIESCED;
10189 mutex_exit(&mpt->m_mutex);
10190 return (0);
10191 }
10192 }
10193 mutex_exit(&mpt->m_intr_mutex);
10194 /* Bus was not busy - QUIESCED */
10195 mutex_exit(&mpt->m_mutex);
10196
10197 return (0);
10198 }
10199
10200 static int
10201 mptsas_unquiesce_bus(mptsas_t *mpt)
10202 {
10203 mptsas_target_t *ptgt = NULL;
10204
10205 NDBG28(("mptsas_unquiesce_bus"));
10206 mutex_enter(&mpt->m_mutex);
10207 mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
10208 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
10209 MPTSAS_HASH_FIRST);
10210 while (ptgt != NULL) {
10211 mutex_enter(&ptgt->m_tgt_intr_mutex);
10212 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10213 mutex_exit(&ptgt->m_tgt_intr_mutex);
10214
10215 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10216 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10217 }
10218 mptsas_restart_hba(mpt);
10219 mutex_exit(&mpt->m_mutex);
10220 return (0);
10221 }
10222
10223 static void
10224 mptsas_ncmds_checkdrain(void *arg)
10225 {
10226 mptsas_t *mpt = arg;
10227 mptsas_target_t *ptgt = NULL;
10228
10229 mutex_enter(&mpt->m_mutex);
10230 if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
10231 mpt->m_quiesce_timeid = 0;
10232 mutex_enter(&mpt->m_intr_mutex);
10233 if (mptsas_outstanding_cmds_n(mpt)) {
10234 mutex_exit(&mpt->m_intr_mutex);
10235 /*
10236 * The throttle may have been reset because
10237 * of a SCSI bus reset
10238 */
10239 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10240 &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
10241 while (ptgt != NULL) {
10242 mutex_enter(&ptgt->m_tgt_intr_mutex);
10243 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10244 mutex_exit(&ptgt->m_tgt_intr_mutex);
10245
10246 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10247 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10248 }
10249
10250 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
10251 mpt, (MPTSAS_QUIESCE_TIMEOUT *
10252 drv_usectohz(1000000)));
10253 } else {
10254 mutex_exit(&mpt->m_intr_mutex);
10255 /* Command queue has been drained */
10256 cv_signal(&mpt->m_cv);
10257 }
10258 }
10259 mutex_exit(&mpt->m_mutex);
10260 }
10261
10262 /*ARGSUSED*/
10263 static void
10264 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
10265 {
10266 int i;
10267 uint8_t *cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
10268 char buf[128];
10269
10270 buf[0] = '\0';
10271 NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd,
10272 Tgt(cmd), Lun(cmd)));
10273 (void) sprintf(&buf[0], "\tcdb=[");
10274 for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
10275 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
10276 }
10277 (void) sprintf(&buf[strlen(buf)], " ]");
10278 NDBG25(("?%s\n", buf));
10279 NDBG25(("?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
10280 cmd->cmd_pkt->pkt_flags, cmd->cmd_pkt->pkt_statistics,
10281 cmd->cmd_pkt->pkt_state));
10282 NDBG25(("?pkt_scbp=0x%x cmd_flags=0x%x\n", cmd->cmd_pkt->pkt_scbp ?
10283 *(cmd->cmd_pkt->pkt_scbp) : 0, cmd->cmd_flags));
10284 }
10285
10286 static void
10287 mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd)
10288 {
10289 caddr_t memp;
10290 pMPI2RequestHeader_t request_hdrp;
10291 struct scsi_pkt *pkt = cmd->cmd_pkt;
10292 mptsas_pt_request_t *pt = pkt->pkt_ha_private;
10293 uint32_t request_size, data_size, dataout_size;
10294 uint32_t direction;
10295 ddi_dma_cookie_t data_cookie;
10296 ddi_dma_cookie_t dataout_cookie;
10297 uint32_t request_desc_low, request_desc_high = 0;
10298 uint32_t i, sense_bufp;
10299 uint8_t desc_type;
10300 uint8_t *request, function;
10301 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
10302 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
10303
10304 desc_type = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10305
10306 request = pt->request;
10307 direction = pt->direction;
10308 request_size = pt->request_size;
10309 data_size = pt->data_size;
10310 dataout_size = pt->dataout_size;
10311 data_cookie = pt->data_cookie;
10312 dataout_cookie = pt->dataout_cookie;
10313
10314 /*
10315 * Store the passthrough message in memory location
10316 * corresponding to our slot number
10317 */
10318 memp = mpt->m_req_frame + (mpt->m_req_frame_size * cmd->cmd_slot);
10319 request_hdrp = (pMPI2RequestHeader_t)memp;
10320 bzero(memp, mpt->m_req_frame_size);
10321
10322 for (i = 0; i < request_size; i++) {
10323 bcopy(request + i, memp + i, 1);
10324 }
10325
10326 if (data_size || dataout_size) {
10327 pMpi2SGESimple64_t sgep;
10328 uint32_t sge_flags;
10329
10330 sgep = (pMpi2SGESimple64_t)((uint8_t *)request_hdrp +
10331 request_size);
10332 if (dataout_size) {
10333
10334 sge_flags = dataout_size |
10335 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10336 MPI2_SGE_FLAGS_END_OF_BUFFER |
10337 MPI2_SGE_FLAGS_HOST_TO_IOC |
10338 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10339 MPI2_SGE_FLAGS_SHIFT);
10340 ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
10341 ddi_put32(acc_hdl, &sgep->Address.Low,
10342 (uint32_t)(dataout_cookie.dmac_laddress &
10343 0xffffffffull));
10344 ddi_put32(acc_hdl, &sgep->Address.High,
10345 (uint32_t)(dataout_cookie.dmac_laddress
10346 >> 32));
10347 sgep++;
10348 }
10349 sge_flags = data_size;
10350 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10351 MPI2_SGE_FLAGS_LAST_ELEMENT |
10352 MPI2_SGE_FLAGS_END_OF_BUFFER |
10353 MPI2_SGE_FLAGS_END_OF_LIST |
10354 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10355 MPI2_SGE_FLAGS_SHIFT);
10356 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10357 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
10358 MPI2_SGE_FLAGS_SHIFT);
10359 } else {
10360 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
10361 MPI2_SGE_FLAGS_SHIFT);
10362 }
10363 ddi_put32(acc_hdl, &sgep->FlagsLength,
10364 sge_flags);
10365 ddi_put32(acc_hdl, &sgep->Address.Low,
10366 (uint32_t)(data_cookie.dmac_laddress &
10367 0xffffffffull));
10368 ddi_put32(acc_hdl, &sgep->Address.High,
10369 (uint32_t)(data_cookie.dmac_laddress >> 32));
10370 }
10371
10372 function = request_hdrp->Function;
10373 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10374 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10375 pMpi2SCSIIORequest_t scsi_io_req;
10376
10377 scsi_io_req = (pMpi2SCSIIORequest_t)request_hdrp;
10378 /*
10379 * Put SGE for data and data_out buffer at the end of
10380 * scsi_io_request message header.(64 bytes in total)
10381 * Following above SGEs, the residual space will be
10382 * used by sense data.
10383 */
10384 ddi_put8(acc_hdl,
10385 &scsi_io_req->SenseBufferLength,
10386 (uint8_t)(request_size - 64));
10387
10388 sense_bufp = mpt->m_req_frame_dma_addr +
10389 (mpt->m_req_frame_size * cmd->cmd_slot);
10390 sense_bufp += 64;
10391 ddi_put32(acc_hdl,
10392 &scsi_io_req->SenseBufferLowAddress, sense_bufp);
10393
10394 /*
10395 * Set SGLOffset0 value
10396 */
10397 ddi_put8(acc_hdl, &scsi_io_req->SGLOffset0,
10398 offsetof(MPI2_SCSI_IO_REQUEST, SGL) / 4);
10399
10400 /*
10401 * Setup descriptor info. RAID passthrough must use the
10402 * default request descriptor which is already set, so if this
10403 * is a SCSI IO request, change the descriptor to SCSI IO.
10404 */
10405 if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
10406 desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
10407 request_desc_high = (ddi_get16(acc_hdl,
10408 &scsi_io_req->DevHandle) << 16);
10409 }
10410 }
10411
10412 /*
10413 * We must wait till the message has been completed before
10414 * beginning the next message so we wait for this one to
10415 * finish.
10416 */
10417 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
10418 request_desc_low = (cmd->cmd_slot << 16) + desc_type;
10419 cmd->cmd_rfm = NULL;
10420 mpt->m_active->m_slot[cmd->cmd_slot] = cmd;
10421 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
10422 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
10423 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
10424 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10425 }
10426 }
10427
10428
10429
10430 static int
10431 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
10432 uint8_t *data, uint32_t request_size, uint32_t reply_size,
10433 uint32_t data_size, uint32_t direction, uint8_t *dataout,
10434 uint32_t dataout_size, short timeout, int mode)
10435 {
10436 mptsas_pt_request_t pt;
10437 mptsas_dma_alloc_state_t data_dma_state;
10438 mptsas_dma_alloc_state_t dataout_dma_state;
10439 caddr_t memp;
10440 mptsas_cmd_t *cmd = NULL;
10441 struct scsi_pkt *pkt;
10442 uint32_t reply_len = 0, sense_len = 0;
10443 pMPI2RequestHeader_t request_hdrp;
10444 pMPI2RequestHeader_t request_msg;
10445 pMPI2DefaultReply_t reply_msg;
10446 Mpi2SCSIIOReply_t rep_msg;
10447 int i, status = 0, pt_flags = 0, rv = 0;
10448 int rvalue;
10449 uint8_t function;
10450
10451 ASSERT(mutex_owned(&mpt->m_mutex));
10452
10453 reply_msg = (pMPI2DefaultReply_t)(&rep_msg);
10454 bzero(reply_msg, sizeof (MPI2_DEFAULT_REPLY));
10455 request_msg = kmem_zalloc(request_size, KM_SLEEP);
10456
10457 mutex_exit(&mpt->m_mutex);
10458 /*
10459 * copy in the request buffer since it could be used by
10460 * another thread when the pt request into waitq
10461 */
10462 if (ddi_copyin(request, request_msg, request_size, mode)) {
10463 mutex_enter(&mpt->m_mutex);
10464 status = EFAULT;
10465 mptsas_log(mpt, CE_WARN, "failed to copy request data");
10466 goto out;
10467 }
10468 mutex_enter(&mpt->m_mutex);
10469
10470 function = request_msg->Function;
10471 if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
10472 pMpi2SCSITaskManagementRequest_t task;
10473 task = (pMpi2SCSITaskManagementRequest_t)request_msg;
10474 mptsas_setup_bus_reset_delay(mpt);
10475 rv = mptsas_ioc_task_management(mpt, task->TaskType,
10476 task->DevHandle, (int)task->LUN[1], reply, reply_size,
10477 mode);
10478
10479 if (rv != TRUE) {
10480 status = EIO;
10481 mptsas_log(mpt, CE_WARN, "task management failed");
10482 }
10483 goto out;
10484 }
10485
10486 if (data_size != 0) {
10487 data_dma_state.size = data_size;
10488 if (mptsas_dma_alloc(mpt, &data_dma_state) != DDI_SUCCESS) {
10489 status = ENOMEM;
10490 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
10491 "resource");
10492 goto out;
10493 }
10494 pt_flags |= MPTSAS_DATA_ALLOCATED;
10495 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10496 mutex_exit(&mpt->m_mutex);
10497 for (i = 0; i < data_size; i++) {
10498 if (ddi_copyin(data + i, (uint8_t *)
10499 data_dma_state.memp + i, 1, mode)) {
10500 mutex_enter(&mpt->m_mutex);
10501 status = EFAULT;
10502 mptsas_log(mpt, CE_WARN, "failed to "
10503 "copy read data");
10504 goto out;
10505 }
10506 }
10507 mutex_enter(&mpt->m_mutex);
10508 }
10509 }
10510
10511 if (dataout_size != 0) {
10512 dataout_dma_state.size = dataout_size;
10513 if (mptsas_dma_alloc(mpt, &dataout_dma_state) != DDI_SUCCESS) {
10514 status = ENOMEM;
10515 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
10516 "resource");
10517 goto out;
10518 }
10519 pt_flags |= MPTSAS_DATAOUT_ALLOCATED;
10520 mutex_exit(&mpt->m_mutex);
10521 for (i = 0; i < dataout_size; i++) {
10522 if (ddi_copyin(dataout + i, (uint8_t *)
10523 dataout_dma_state.memp + i, 1, mode)) {
10524 mutex_enter(&mpt->m_mutex);
10525 mptsas_log(mpt, CE_WARN, "failed to copy out"
10526 " data");
10527 status = EFAULT;
10528 goto out;
10529 }
10530 }
10531 mutex_enter(&mpt->m_mutex);
10532 }
10533
10534 if ((rvalue = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10535 status = EAGAIN;
10536 mptsas_log(mpt, CE_NOTE, "event ack command pool is full");
10537 goto out;
10538 }
10539 pt_flags |= MPTSAS_REQUEST_POOL_CMD;
10540
10541 bzero((caddr_t)cmd, sizeof (*cmd));
10542 bzero((caddr_t)pkt, scsi_pkt_size());
10543 bzero((caddr_t)&pt, sizeof (pt));
10544
10545 cmd->ioc_cmd_slot = (uint32_t)(rvalue);
10546
10547 pt.request = (uint8_t *)request_msg;
10548 pt.direction = direction;
10549 pt.request_size = request_size;
10550 pt.data_size = data_size;
10551 pt.dataout_size = dataout_size;
10552 pt.data_cookie = data_dma_state.cookie;
10553 pt.dataout_cookie = dataout_dma_state.cookie;
10554
10555 /*
10556 * Form a blank cmd/pkt to store the acknowledgement message
10557 */
10558 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb[0];
10559 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
10560 pkt->pkt_ha_private = (opaque_t)&pt;
10561 pkt->pkt_flags = FLAG_HEAD;
10562 pkt->pkt_time = timeout;
10563 cmd->cmd_pkt = pkt;
10564 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_PASSTHRU;
10565
10566 /*
10567 * Save the command in a slot
10568 */
10569 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10570 /*
10571 * Once passthru command get slot, set cmd_flags
10572 * CFLAG_PREPARED.
10573 */
10574 cmd->cmd_flags |= CFLAG_PREPARED;
10575 mptsas_start_passthru(mpt, cmd);
10576 } else {
10577 mptsas_waitq_add(mpt, cmd);
10578 }
10579
10580 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10581 cv_wait(&mpt->m_passthru_cv, &mpt->m_mutex);
10582 }
10583
10584 if (cmd->cmd_flags & CFLAG_PREPARED) {
10585 memp = mpt->m_req_frame + (mpt->m_req_frame_size *
10586 cmd->cmd_slot);
10587 request_hdrp = (pMPI2RequestHeader_t)memp;
10588 }
10589
10590 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10591 status = ETIMEDOUT;
10592 mptsas_log(mpt, CE_WARN, "passthrough command timeout");
10593 pt_flags |= MPTSAS_CMD_TIMEOUT;
10594 goto out;
10595 }
10596
10597 if (cmd->cmd_rfm) {
10598 /*
10599 * cmd_rfm is zero means the command reply is a CONTEXT
10600 * reply and no PCI Write to post the free reply SMFA
10601 * because no reply message frame is used.
10602 * cmd_rfm is non-zero means the reply is a ADDRESS
10603 * reply and reply message frame is used.
10604 */
10605 pt_flags |= MPTSAS_ADDRESS_REPLY;
10606 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10607 DDI_DMA_SYNC_FORCPU);
10608 reply_msg = (pMPI2DefaultReply_t)
10609 (mpt->m_reply_frame + (cmd->cmd_rfm -
10610 mpt->m_reply_frame_dma_addr));
10611 }
10612
10613 mptsas_fma_check(mpt, cmd);
10614 if (pkt->pkt_reason == CMD_TRAN_ERR) {
10615 status = EAGAIN;
10616 mptsas_log(mpt, CE_WARN, "passthru fma error");
10617 goto out;
10618 }
10619 if (pkt->pkt_reason == CMD_RESET) {
10620 status = EAGAIN;
10621 mptsas_log(mpt, CE_WARN, "ioc reset abort passthru");
10622 goto out;
10623 }
10624
10625 if (pkt->pkt_reason == CMD_INCOMPLETE) {
10626 status = EIO;
10627 mptsas_log(mpt, CE_WARN, "passthrough command incomplete");
10628 goto out;
10629 }
10630
10631 mutex_exit(&mpt->m_mutex);
10632 if (cmd->cmd_flags & CFLAG_PREPARED) {
10633 function = request_hdrp->Function;
10634 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10635 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10636 reply_len = sizeof (MPI2_SCSI_IO_REPLY);
10637 sense_len = reply_size - reply_len;
10638 } else {
10639 reply_len = reply_size;
10640 sense_len = 0;
10641 }
10642
10643 for (i = 0; i < reply_len; i++) {
10644 if (ddi_copyout((uint8_t *)reply_msg + i, reply + i, 1,
10645 mode)) {
10646 mutex_enter(&mpt->m_mutex);
10647 status = EFAULT;
10648 mptsas_log(mpt, CE_WARN, "failed to copy out "
10649 "reply data");
10650 goto out;
10651 }
10652 }
10653 for (i = 0; i < sense_len; i++) {
10654 if (ddi_copyout((uint8_t *)request_hdrp + 64 + i,
10655 reply + reply_len + i, 1, mode)) {
10656 mutex_enter(&mpt->m_mutex);
10657 status = EFAULT;
10658 mptsas_log(mpt, CE_WARN, "failed to copy out "
10659 "sense data");
10660 goto out;
10661 }
10662 }
10663 }
10664
10665 if (data_size) {
10666 if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10667 (void) ddi_dma_sync(data_dma_state.handle, 0, 0,
10668 DDI_DMA_SYNC_FORCPU);
10669 for (i = 0; i < data_size; i++) {
10670 if (ddi_copyout((uint8_t *)(
10671 data_dma_state.memp + i), data + i, 1,
10672 mode)) {
10673 mutex_enter(&mpt->m_mutex);
10674 status = EFAULT;
10675 mptsas_log(mpt, CE_WARN, "failed to "
10676 "copy out the reply data");
10677 goto out;
10678 }
10679 }
10680 }
10681 }
10682 mutex_enter(&mpt->m_mutex);
10683 out:
10684 /*
10685 * Put the reply frame back on the free queue, increment the free
10686 * index, and write the new index to the free index register. But only
10687 * if this reply is an ADDRESS reply.
10688 */
10689 if (pt_flags & MPTSAS_ADDRESS_REPLY) {
10690 ddi_put32(mpt->m_acc_free_queue_hdl,
10691 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10692 cmd->cmd_rfm);
10693 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10694 DDI_DMA_SYNC_FORDEV);
10695 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10696 mpt->m_free_index = 0;
10697 }
10698 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10699 mpt->m_free_index);
10700 }
10701 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10702 mptsas_remove_cmd(mpt, cmd);
10703 pt_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10704 }
10705 if (pt_flags & MPTSAS_REQUEST_POOL_CMD)
10706 mptsas_return_to_pool(mpt, cmd);
10707 if (pt_flags & MPTSAS_DATA_ALLOCATED) {
10708 if (mptsas_check_dma_handle(data_dma_state.handle) !=
10709 DDI_SUCCESS) {
10710 ddi_fm_service_impact(mpt->m_dip,
10711 DDI_SERVICE_UNAFFECTED);
10712 status = EFAULT;
10713 }
10714 mptsas_dma_free(&data_dma_state);
10715 }
10716 if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
10717 if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
10718 DDI_SUCCESS) {
10719 ddi_fm_service_impact(mpt->m_dip,
10720 DDI_SERVICE_UNAFFECTED);
10721 status = EFAULT;
10722 }
10723 mptsas_dma_free(&dataout_dma_state);
10724 }
10725 if (pt_flags & MPTSAS_CMD_TIMEOUT) {
10726 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
10727 mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
10728 }
10729 }
10730 if (request_msg)
10731 kmem_free(request_msg, request_size);
10732
10733 return (status);
10734 }
10735
10736 static int
10737 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
10738 {
10739 /*
10740 * If timeout is 0, set timeout to default of 60 seconds.
10741 */
10742 if (data->Timeout == 0) {
10743 data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
10744 }
10745
10746 if (((data->DataSize == 0) &&
10747 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_NONE)) ||
10748 ((data->DataSize != 0) &&
10749 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_READ) ||
10750 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_WRITE) ||
10751 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) &&
10752 (data->DataOutSize != 0))))) {
10753 if (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) {
10754 data->DataDirection = MPTSAS_PASS_THRU_DIRECTION_READ;
10755 } else {
10756 data->DataOutSize = 0;
10757 }
10758 /*
10759 * Send passthru request messages
10760 */
10761 return (mptsas_do_passthru(mpt,
10762 (uint8_t *)((uintptr_t)data->PtrRequest),
10763 (uint8_t *)((uintptr_t)data->PtrReply),
10764 (uint8_t *)((uintptr_t)data->PtrData),
10765 data->RequestSize, data->ReplySize,
10766 data->DataSize, data->DataDirection,
10767 (uint8_t *)((uintptr_t)data->PtrDataOut),
10768 data->DataOutSize, data->Timeout, mode));
10769 } else {
10770 return (EINVAL);
10771 }
10772 }
10773
10774 static uint8_t
10775 mptsas_get_fw_diag_buffer_number(mptsas_t *mpt, uint32_t unique_id)
10776 {
10777 uint8_t index;
10778
10779 for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
10780 if (mpt->m_fw_diag_buffer_list[index].unique_id == unique_id) {
10781 return (index);
10782 }
10783 }
10784
10785 return (MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND);
10786 }
10787
10788 static void
10789 mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd)
10790 {
10791 pMpi2DiagBufferPostRequest_t pDiag_post_msg;
10792 pMpi2DiagReleaseRequest_t pDiag_release_msg;
10793 struct scsi_pkt *pkt = cmd->cmd_pkt;
10794 mptsas_diag_request_t *diag = pkt->pkt_ha_private;
10795 uint32_t request_desc_low, i;
10796
10797 ASSERT(mutex_owned(&mpt->m_mutex));
10798
10799 /*
10800 * Form the diag message depending on the post or release function.
10801 */
10802 if (diag->function == MPI2_FUNCTION_DIAG_BUFFER_POST) {
10803 pDiag_post_msg = (pMpi2DiagBufferPostRequest_t)
10804 (mpt->m_req_frame + (mpt->m_req_frame_size *
10805 cmd->cmd_slot));
10806 bzero(pDiag_post_msg, mpt->m_req_frame_size);
10807 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->Function,
10808 diag->function);
10809 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->BufferType,
10810 diag->pBuffer->buffer_type);
10811 ddi_put8(mpt->m_acc_req_frame_hdl,
10812 &pDiag_post_msg->ExtendedType,
10813 diag->pBuffer->extended_type);
10814 ddi_put32(mpt->m_acc_req_frame_hdl,
10815 &pDiag_post_msg->BufferLength,
10816 diag->pBuffer->buffer_data.size);
10817 for (i = 0; i < (sizeof (pDiag_post_msg->ProductSpecific) / 4);
10818 i++) {
10819 ddi_put32(mpt->m_acc_req_frame_hdl,
10820 &pDiag_post_msg->ProductSpecific[i],
10821 diag->pBuffer->product_specific[i]);
10822 }
10823 ddi_put32(mpt->m_acc_req_frame_hdl,
10824 &pDiag_post_msg->BufferAddress.Low,
10825 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
10826 & 0xffffffffull));
10827 ddi_put32(mpt->m_acc_req_frame_hdl,
10828 &pDiag_post_msg->BufferAddress.High,
10829 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
10830 >> 32));
10831 } else {
10832 pDiag_release_msg = (pMpi2DiagReleaseRequest_t)
10833 (mpt->m_req_frame + (mpt->m_req_frame_size *
10834 cmd->cmd_slot));
10835 bzero(pDiag_release_msg, mpt->m_req_frame_size);
10836 ddi_put8(mpt->m_acc_req_frame_hdl,
10837 &pDiag_release_msg->Function, diag->function);
10838 ddi_put8(mpt->m_acc_req_frame_hdl,
10839 &pDiag_release_msg->BufferType,
10840 diag->pBuffer->buffer_type);
10841 }
10842
10843 /*
10844 * Send the message
10845 */
10846 (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
10847 DDI_DMA_SYNC_FORDEV);
10848 request_desc_low = (cmd->cmd_slot << 16) +
10849 MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10850 cmd->cmd_rfm = NULL;
10851 mpt->m_active->m_slot[cmd->cmd_slot] = cmd;
10852 MPTSAS_START_CMD(mpt, request_desc_low, 0);
10853 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
10854 DDI_SUCCESS) ||
10855 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
10856 DDI_SUCCESS)) {
10857 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10858 }
10859 }
10860
10861 static int
10862 mptsas_post_fw_diag_buffer(mptsas_t *mpt,
10863 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
10864 {
10865 mptsas_diag_request_t diag;
10866 int status, slot_num, post_flags = 0;
10867 mptsas_cmd_t *cmd = NULL;
10868 struct scsi_pkt *pkt;
10869 pMpi2DiagBufferPostReply_t reply;
10870 uint16_t iocstatus;
10871 uint32_t iocloginfo, transfer_length;
10872
10873 /*
10874 * If buffer is not enabled, just leave.
10875 */
10876 *return_code = MPTSAS_FW_DIAG_ERROR_POST_FAILED;
10877 if (!pBuffer->enabled) {
10878 status = DDI_FAILURE;
10879 goto out;
10880 }
10881
10882 /*
10883 * Clear some flags initially.
10884 */
10885 pBuffer->force_release = FALSE;
10886 pBuffer->valid_data = FALSE;
10887 pBuffer->owned_by_firmware = FALSE;
10888
10889 /*
10890 * Get a cmd buffer from the cmd buffer pool
10891 */
10892 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10893 status = DDI_FAILURE;
10894 mptsas_log(mpt, CE_NOTE, "command pool is full: Post FW Diag");
10895 goto out;
10896 }
10897 post_flags |= MPTSAS_REQUEST_POOL_CMD;
10898
10899 bzero((caddr_t)cmd, sizeof (*cmd));
10900 bzero((caddr_t)pkt, scsi_pkt_size());
10901
10902 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
10903
10904 diag.pBuffer = pBuffer;
10905 diag.function = MPI2_FUNCTION_DIAG_BUFFER_POST;
10906
10907 /*
10908 * Form a blank cmd/pkt to store the acknowledgement message
10909 */
10910 pkt->pkt_ha_private = (opaque_t)&diag;
10911 pkt->pkt_flags = FLAG_HEAD;
10912 pkt->pkt_time = 60;
10913 cmd->cmd_pkt = pkt;
10914 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
10915
10916 /*
10917 * Save the command in a slot
10918 */
10919 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10920 /*
10921 * Once passthru command get slot, set cmd_flags
10922 * CFLAG_PREPARED.
10923 */
10924 cmd->cmd_flags |= CFLAG_PREPARED;
10925 mptsas_start_diag(mpt, cmd);
10926 } else {
10927 mptsas_waitq_add(mpt, cmd);
10928 }
10929
10930 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10931 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
10932 }
10933
10934 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10935 status = DDI_FAILURE;
10936 mptsas_log(mpt, CE_WARN, "Post FW Diag command timeout");
10937 goto out;
10938 }
10939
10940 /*
10941 * cmd_rfm points to the reply message if a reply was given. Check the
10942 * IOCStatus to make sure everything went OK with the FW diag request
10943 * and set buffer flags.
10944 */
10945 if (cmd->cmd_rfm) {
10946 post_flags |= MPTSAS_ADDRESS_REPLY;
10947 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10948 DDI_DMA_SYNC_FORCPU);
10949 reply = (pMpi2DiagBufferPostReply_t)(mpt->m_reply_frame +
10950 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
10951
10952 /*
10953 * Get the reply message data
10954 */
10955 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
10956 &reply->IOCStatus);
10957 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
10958 &reply->IOCLogInfo);
10959 transfer_length = ddi_get32(mpt->m_acc_reply_frame_hdl,
10960 &reply->TransferLength);
10961
10962 /*
10963 * If post failed quit.
10964 */
10965 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
10966 status = DDI_FAILURE;
10967 NDBG13(("post FW Diag Buffer failed: IOCStatus=0x%x, "
10968 "IOCLogInfo=0x%x, TransferLength=0x%x", iocstatus,
10969 iocloginfo, transfer_length));
10970 goto out;
10971 }
10972
10973 /*
10974 * Post was successful.
10975 */
10976 pBuffer->valid_data = TRUE;
10977 pBuffer->owned_by_firmware = TRUE;
10978 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10979 status = DDI_SUCCESS;
10980 }
10981
10982 out:
10983 /*
10984 * Put the reply frame back on the free queue, increment the free
10985 * index, and write the new index to the free index register. But only
10986 * if this reply is an ADDRESS reply.
10987 */
10988 if (post_flags & MPTSAS_ADDRESS_REPLY) {
10989 ddi_put32(mpt->m_acc_free_queue_hdl,
10990 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10991 cmd->cmd_rfm);
10992 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10993 DDI_DMA_SYNC_FORDEV);
10994 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10995 mpt->m_free_index = 0;
10996 }
10997 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10998 mpt->m_free_index);
10999 }
11000 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
11001 mptsas_remove_cmd(mpt, cmd);
11002 post_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11003 }
11004 if (post_flags & MPTSAS_REQUEST_POOL_CMD) {
11005 mptsas_return_to_pool(mpt, cmd);
11006 }
11007
11008 return (status);
11009 }
11010
11011 static int
11012 mptsas_release_fw_diag_buffer(mptsas_t *mpt,
11013 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
11014 uint32_t diag_type)
11015 {
11016 mptsas_diag_request_t diag;
11017 int status, slot_num, rel_flags = 0;
11018 mptsas_cmd_t *cmd = NULL;
11019 struct scsi_pkt *pkt;
11020 pMpi2DiagReleaseReply_t reply;
11021 uint16_t iocstatus;
11022 uint32_t iocloginfo;
11023
11024 /*
11025 * If buffer is not enabled, just leave.
11026 */
11027 *return_code = MPTSAS_FW_DIAG_ERROR_RELEASE_FAILED;
11028 if (!pBuffer->enabled) {
11029 mptsas_log(mpt, CE_NOTE, "This buffer type is not supported "
11030 "by the IOC");
11031 status = DDI_FAILURE;
11032 goto out;
11033 }
11034
11035 /*
11036 * Clear some flags initially.
11037 */
11038 pBuffer->force_release = FALSE;
11039 pBuffer->valid_data = FALSE;
11040 pBuffer->owned_by_firmware = FALSE;
11041
11042 /*
11043 * Get a cmd buffer from the cmd buffer pool
11044 */
11045 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11046 status = DDI_FAILURE;
11047 mptsas_log(mpt, CE_NOTE, "command pool is full: Release FW "
11048 "Diag");
11049 goto out;
11050 }
11051 rel_flags |= MPTSAS_REQUEST_POOL_CMD;
11052
11053 bzero((caddr_t)cmd, sizeof (*cmd));
11054 bzero((caddr_t)pkt, scsi_pkt_size());
11055
11056 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
11057
11058 diag.pBuffer = pBuffer;
11059 diag.function = MPI2_FUNCTION_DIAG_RELEASE;
11060
11061 /*
11062 * Form a blank cmd/pkt to store the acknowledgement message
11063 */
11064 pkt->pkt_ha_private = (opaque_t)&diag;
11065 pkt->pkt_flags = FLAG_HEAD;
11066 pkt->pkt_time = 60;
11067 cmd->cmd_pkt = pkt;
11068 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
11069
11070 /*
11071 * Save the command in a slot
11072 */
11073 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11074 /*
11075 * Once passthru command get slot, set cmd_flags
11076 * CFLAG_PREPARED.
11077 */
11078 cmd->cmd_flags |= CFLAG_PREPARED;
11079 mptsas_start_diag(mpt, cmd);
11080 } else {
11081 mptsas_waitq_add(mpt, cmd);
11082 }
11083
11084 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11085 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
11086 }
11087
11088 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11089 status = DDI_FAILURE;
11090 mptsas_log(mpt, CE_WARN, "Release FW Diag command timeout");
11091 goto out;
11092 }
11093
11094 /*
11095 * cmd_rfm points to the reply message if a reply was given. Check the
11096 * IOCStatus to make sure everything went OK with the FW diag request
11097 * and set buffer flags.
11098 */
11099 if (cmd->cmd_rfm) {
11100 rel_flags |= MPTSAS_ADDRESS_REPLY;
11101 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11102 DDI_DMA_SYNC_FORCPU);
11103 reply = (pMpi2DiagReleaseReply_t)(mpt->m_reply_frame +
11104 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
11105
11106 /*
11107 * Get the reply message data
11108 */
11109 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
11110 &reply->IOCStatus);
11111 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
11112 &reply->IOCLogInfo);
11113
11114 /*
11115 * If release failed quit.
11116 */
11117 if ((iocstatus != MPI2_IOCSTATUS_SUCCESS) ||
11118 pBuffer->owned_by_firmware) {
11119 status = DDI_FAILURE;
11120 NDBG13(("release FW Diag Buffer failed: "
11121 "IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus,
11122 iocloginfo));
11123 goto out;
11124 }
11125
11126 /*
11127 * Release was successful.
11128 */
11129 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11130 status = DDI_SUCCESS;
11131
11132 /*
11133 * If this was for an UNREGISTER diag type command, clear the
11134 * unique ID.
11135 */
11136 if (diag_type == MPTSAS_FW_DIAG_TYPE_UNREGISTER) {
11137 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
11138 }
11139 }
11140
11141 out:
11142 /*
11143 * Put the reply frame back on the free queue, increment the free
11144 * index, and write the new index to the free index register. But only
11145 * if this reply is an ADDRESS reply.
11146 */
11147 if (rel_flags & MPTSAS_ADDRESS_REPLY) {
11148 ddi_put32(mpt->m_acc_free_queue_hdl,
11149 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11150 cmd->cmd_rfm);
11151 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11152 DDI_DMA_SYNC_FORDEV);
11153 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11154 mpt->m_free_index = 0;
11155 }
11156 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11157 mpt->m_free_index);
11158 }
11159 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
11160 mptsas_remove_cmd(mpt, cmd);
11161 rel_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11162 }
11163 if (rel_flags & MPTSAS_REQUEST_POOL_CMD) {
11164 mptsas_return_to_pool(mpt, cmd);
11165 }
11166
11167 return (status);
11168 }
11169
11170 static int
11171 mptsas_diag_register(mptsas_t *mpt, mptsas_fw_diag_register_t *diag_register,
11172 uint32_t *return_code)
11173 {
11174 mptsas_fw_diagnostic_buffer_t *pBuffer;
11175 uint8_t extended_type, buffer_type, i;
11176 uint32_t buffer_size;
11177 uint32_t unique_id;
11178 int status;
11179
11180 ASSERT(mutex_owned(&mpt->m_mutex));
11181
11182 extended_type = diag_register->ExtendedType;
11183 buffer_type = diag_register->BufferType;
11184 buffer_size = diag_register->RequestedBufferSize;
11185 unique_id = diag_register->UniqueId;
11186
11187 /*
11188 * Check for valid buffer type
11189 */
11190 if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
11191 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11192 return (DDI_FAILURE);
11193 }
11194
11195 /*
11196 * Get the current buffer and look up the unique ID. The unique ID
11197 * should not be found. If it is, the ID is already in use.
11198 */
11199 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11200 pBuffer = &mpt->m_fw_diag_buffer_list[buffer_type];
11201 if (i != MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11202 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11203 return (DDI_FAILURE);
11204 }
11205
11206 /*
11207 * The buffer's unique ID should not be registered yet, and the given
11208 * unique ID cannot be 0.
11209 */
11210 if ((pBuffer->unique_id != MPTSAS_FW_DIAG_INVALID_UID) ||
11211 (unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
11212 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11213 return (DDI_FAILURE);
11214 }
11215
11216 /*
11217 * If this buffer is already posted as immediate, just change owner.
11218 */
11219 if (pBuffer->immediate && pBuffer->owned_by_firmware &&
11220 (pBuffer->unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
11221 pBuffer->immediate = FALSE;
11222 pBuffer->unique_id = unique_id;
11223 return (DDI_SUCCESS);
11224 }
11225
11226 /*
11227 * Post a new buffer after checking if it's enabled. The DMA buffer
11228 * that is allocated will be contiguous (sgl_len = 1).
11229 */
11230 if (!pBuffer->enabled) {
11231 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
11232 return (DDI_FAILURE);
11233 }
11234 bzero(&pBuffer->buffer_data, sizeof (mptsas_dma_alloc_state_t));
11235 pBuffer->buffer_data.size = buffer_size;
11236 if (mptsas_dma_alloc(mpt, &pBuffer->buffer_data) != DDI_SUCCESS) {
11237 mptsas_log(mpt, CE_WARN, "failed to alloc DMA resource for "
11238 "diag buffer: size = %d bytes", buffer_size);
11239 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
11240 return (DDI_FAILURE);
11241 }
11242
11243 /*
11244 * Copy the given info to the diag buffer and post the buffer.
11245 */
11246 pBuffer->buffer_type = buffer_type;
11247 pBuffer->immediate = FALSE;
11248 if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
11249 for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
11250 i++) {
11251 pBuffer->product_specific[i] =
11252 diag_register->ProductSpecific[i];
11253 }
11254 }
11255 pBuffer->extended_type = extended_type;
11256 pBuffer->unique_id = unique_id;
11257 status = mptsas_post_fw_diag_buffer(mpt, pBuffer, return_code);
11258
11259 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
11260 DDI_SUCCESS) {
11261 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed in "
11262 "mptsas_diag_register.");
11263 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11264 status = DDI_FAILURE;
11265 }
11266
11267 /*
11268 * In case there was a failure, free the DMA buffer.
11269 */
11270 if (status == DDI_FAILURE) {
11271 mptsas_dma_free(&pBuffer->buffer_data);
11272 }
11273
11274 return (status);
11275 }
11276
11277 static int
11278 mptsas_diag_unregister(mptsas_t *mpt,
11279 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
11280 {
11281 mptsas_fw_diagnostic_buffer_t *pBuffer;
11282 uint8_t i;
11283 uint32_t unique_id;
11284 int status;
11285
11286 ASSERT(mutex_owned(&mpt->m_mutex));
11287
11288 unique_id = diag_unregister->UniqueId;
11289
11290 /*
11291 * Get the current buffer and look up the unique ID. The unique ID
11292 * should be there.
11293 */
11294 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11295 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11296 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11297 return (DDI_FAILURE);
11298 }
11299
11300 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11301
11302 /*
11303 * Try to release the buffer from FW before freeing it. If release
11304 * fails, don't free the DMA buffer in case FW tries to access it
11305 * later. If buffer is not owned by firmware, can't release it.
11306 */
11307 if (!pBuffer->owned_by_firmware) {
11308 status = DDI_SUCCESS;
11309 } else {
11310 status = mptsas_release_fw_diag_buffer(mpt, pBuffer,
11311 return_code, MPTSAS_FW_DIAG_TYPE_UNREGISTER);
11312 }
11313
11314 /*
11315 * At this point, return the current status no matter what happens with
11316 * the DMA buffer.
11317 */
11318 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
11319 if (status == DDI_SUCCESS) {
11320 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
11321 DDI_SUCCESS) {
11322 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed "
11323 "in mptsas_diag_unregister.");
11324 ddi_fm_service_impact(mpt->m_dip,
11325 DDI_SERVICE_UNAFFECTED);
11326 }
11327 mptsas_dma_free(&pBuffer->buffer_data);
11328 }
11329
11330 return (status);
11331 }
11332
11333 static int
11334 mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
11335 uint32_t *return_code)
11336 {
11337 mptsas_fw_diagnostic_buffer_t *pBuffer;
11338 uint8_t i;
11339 uint32_t unique_id;
11340
11341 ASSERT(mutex_owned(&mpt->m_mutex));
11342
11343 unique_id = diag_query->UniqueId;
11344
11345 /*
11346 * If ID is valid, query on ID.
11347 * If ID is invalid, query on buffer type.
11348 */
11349 if (unique_id == MPTSAS_FW_DIAG_INVALID_UID) {
11350 i = diag_query->BufferType;
11351 if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
11352 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11353 return (DDI_FAILURE);
11354 }
11355 } else {
11356 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11357 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11358 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11359 return (DDI_FAILURE);
11360 }
11361 }
11362
11363 /*
11364 * Fill query structure with the diag buffer info.
11365 */
11366 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11367 diag_query->BufferType = pBuffer->buffer_type;
11368 diag_query->ExtendedType = pBuffer->extended_type;
11369 if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
11370 for (i = 0; i < (sizeof (diag_query->ProductSpecific) / 4);
11371 i++) {
11372 diag_query->ProductSpecific[i] =
11373 pBuffer->product_specific[i];
11374 }
11375 }
11376 diag_query->TotalBufferSize = pBuffer->buffer_data.size;
11377 diag_query->DriverAddedBufferSize = 0;
11378 diag_query->UniqueId = pBuffer->unique_id;
11379 diag_query->ApplicationFlags = 0;
11380 diag_query->DiagnosticFlags = 0;
11381
11382 /*
11383 * Set/Clear application flags
11384 */
11385 if (pBuffer->immediate) {
11386 diag_query->ApplicationFlags &= ~MPTSAS_FW_DIAG_FLAG_APP_OWNED;
11387 } else {
11388 diag_query->ApplicationFlags |= MPTSAS_FW_DIAG_FLAG_APP_OWNED;
11389 }
11390 if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
11391 diag_query->ApplicationFlags |=
11392 MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
11393 } else {
11394 diag_query->ApplicationFlags &=
11395 ~MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
11396 }
11397 if (pBuffer->owned_by_firmware) {
11398 diag_query->ApplicationFlags |=
11399 MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
11400 } else {
11401 diag_query->ApplicationFlags &=
11402 ~MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
11403 }
11404
11405 return (DDI_SUCCESS);
11406 }
11407
11408 static int
11409 mptsas_diag_read_buffer(mptsas_t *mpt,
11410 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
11411 uint32_t *return_code, int ioctl_mode)
11412 {
11413 mptsas_fw_diagnostic_buffer_t *pBuffer;
11414 uint8_t i, *pData;
11415 uint32_t unique_id, byte;
11416 int status;
11417
11418 ASSERT(mutex_owned(&mpt->m_mutex));
11419
11420 unique_id = diag_read_buffer->UniqueId;
11421
11422 /*
11423 * Get the current buffer and look up the unique ID. The unique ID
11424 * should be there.
11425 */
11426 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11427 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11428 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11429 return (DDI_FAILURE);
11430 }
11431
11432 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11433
11434 /*
11435 * Make sure requested read is within limits
11436 */
11437 if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
11438 pBuffer->buffer_data.size) {
11439 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11440 return (DDI_FAILURE);
11441 }
11442
11443 /*
11444 * Copy the requested data from DMA to the diag_read_buffer. The DMA
11445 * buffer that was allocated is one contiguous buffer.
11446 */
11447 pData = (uint8_t *)(pBuffer->buffer_data.memp +
11448 diag_read_buffer->StartingOffset);
11449 (void) ddi_dma_sync(pBuffer->buffer_data.handle, 0, 0,
11450 DDI_DMA_SYNC_FORCPU);
11451 for (byte = 0; byte < diag_read_buffer->BytesToRead; byte++) {
11452 if (ddi_copyout(pData + byte, ioctl_buf + byte, 1, ioctl_mode)
11453 != 0) {
11454 return (DDI_FAILURE);
11455 }
11456 }
11457 diag_read_buffer->Status = 0;
11458
11459 /*
11460 * Set or clear the Force Release flag.
11461 */
11462 if (pBuffer->force_release) {
11463 diag_read_buffer->Flags |= MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
11464 } else {
11465 diag_read_buffer->Flags &= ~MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
11466 }
11467
11468 /*
11469 * If buffer is to be reregistered, make sure it's not already owned by
11470 * firmware first.
11471 */
11472 status = DDI_SUCCESS;
11473 if (!pBuffer->owned_by_firmware) {
11474 if (diag_read_buffer->Flags & MPTSAS_FW_DIAG_FLAG_REREGISTER) {
11475 status = mptsas_post_fw_diag_buffer(mpt, pBuffer,
11476 return_code);
11477 }
11478 }
11479
11480 return (status);
11481 }
11482
11483 static int
11484 mptsas_diag_release(mptsas_t *mpt, mptsas_fw_diag_release_t *diag_release,
11485 uint32_t *return_code)
11486 {
11487 mptsas_fw_diagnostic_buffer_t *pBuffer;
11488 uint8_t i;
11489 uint32_t unique_id;
11490 int status;
11491
11492 ASSERT(mutex_owned(&mpt->m_mutex));
11493
11494 unique_id = diag_release->UniqueId;
11495
11496 /*
11497 * Get the current buffer and look up the unique ID. The unique ID
11498 * should be there.
11499 */
11500 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11501 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11502 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11503 return (DDI_FAILURE);
11504 }
11505
11506 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11507
11508 /*
11509 * If buffer is not owned by firmware, it's already been released.
11510 */
11511 if (!pBuffer->owned_by_firmware) {
11512 *return_code = MPTSAS_FW_DIAG_ERROR_ALREADY_RELEASED;
11513 return (DDI_FAILURE);
11514 }
11515
11516 /*
11517 * Release the buffer.
11518 */
11519 status = mptsas_release_fw_diag_buffer(mpt, pBuffer, return_code,
11520 MPTSAS_FW_DIAG_TYPE_RELEASE);
11521 return (status);
11522 }
11523
11524 static int
11525 mptsas_do_diag_action(mptsas_t *mpt, uint32_t action, uint8_t *diag_action,
11526 uint32_t length, uint32_t *return_code, int ioctl_mode)
11527 {
11528 mptsas_fw_diag_register_t diag_register;
11529 mptsas_fw_diag_unregister_t diag_unregister;
11530 mptsas_fw_diag_query_t diag_query;
11531 mptsas_diag_read_buffer_t diag_read_buffer;
11532 mptsas_fw_diag_release_t diag_release;
11533 int status = DDI_SUCCESS;
11534 uint32_t original_return_code, read_buf_len;
11535
11536 ASSERT(mutex_owned(&mpt->m_mutex));
11537
11538 original_return_code = *return_code;
11539 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11540
11541 switch (action) {
11542 case MPTSAS_FW_DIAG_TYPE_REGISTER:
11543 if (!length) {
11544 *return_code =
11545 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11546 status = DDI_FAILURE;
11547 break;
11548 }
11549 if (ddi_copyin(diag_action, &diag_register,
11550 sizeof (diag_register), ioctl_mode) != 0) {
11551 return (DDI_FAILURE);
11552 }
11553 status = mptsas_diag_register(mpt, &diag_register,
11554 return_code);
11555 break;
11556
11557 case MPTSAS_FW_DIAG_TYPE_UNREGISTER:
11558 if (length < sizeof (diag_unregister)) {
11559 *return_code =
11560 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11561 status = DDI_FAILURE;
11562 break;
11563 }
11564 if (ddi_copyin(diag_action, &diag_unregister,
11565 sizeof (diag_unregister), ioctl_mode) != 0) {
11566 return (DDI_FAILURE);
11567 }
11568 status = mptsas_diag_unregister(mpt, &diag_unregister,
11569 return_code);
11570 break;
11571
11572 case MPTSAS_FW_DIAG_TYPE_QUERY:
11573 if (length < sizeof (diag_query)) {
11574 *return_code =
11575 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11576 status = DDI_FAILURE;
11577 break;
11578 }
11579 if (ddi_copyin(diag_action, &diag_query,
11580 sizeof (diag_query), ioctl_mode) != 0) {
11581 return (DDI_FAILURE);
11582 }
11583 status = mptsas_diag_query(mpt, &diag_query,
11584 return_code);
11585 if (status == DDI_SUCCESS) {
11586 if (ddi_copyout(&diag_query, diag_action,
11587 sizeof (diag_query), ioctl_mode) != 0) {
11588 return (DDI_FAILURE);
11589 }
11590 }
11591 break;
11592
11593 case MPTSAS_FW_DIAG_TYPE_READ_BUFFER:
11594 if (ddi_copyin(diag_action, &diag_read_buffer,
11595 sizeof (diag_read_buffer) - 4, ioctl_mode) != 0) {
11596 return (DDI_FAILURE);
11597 }
11598 read_buf_len = sizeof (diag_read_buffer) -
11599 sizeof (diag_read_buffer.DataBuffer) +
11600 diag_read_buffer.BytesToRead;
11601 if (length < read_buf_len) {
11602 *return_code =
11603 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11604 status = DDI_FAILURE;
11605 break;
11606 }
11607 status = mptsas_diag_read_buffer(mpt,
11608 &diag_read_buffer, diag_action +
11609 sizeof (diag_read_buffer) - 4, return_code,
11610 ioctl_mode);
11611 if (status == DDI_SUCCESS) {
11612 if (ddi_copyout(&diag_read_buffer, diag_action,
11613 sizeof (diag_read_buffer) - 4, ioctl_mode)
11614 != 0) {
11615 return (DDI_FAILURE);
11616 }
11617 }
11618 break;
11619
11620 case MPTSAS_FW_DIAG_TYPE_RELEASE:
11621 if (length < sizeof (diag_release)) {
11622 *return_code =
11623 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11624 status = DDI_FAILURE;
11625 break;
11626 }
11627 if (ddi_copyin(diag_action, &diag_release,
11628 sizeof (diag_release), ioctl_mode) != 0) {
11629 return (DDI_FAILURE);
11630 }
11631 status = mptsas_diag_release(mpt, &diag_release,
11632 return_code);
11633 break;
11634
11635 default:
11636 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11637 status = DDI_FAILURE;
11638 break;
11639 }
11640
11641 if ((status == DDI_FAILURE) &&
11642 (original_return_code == MPTSAS_FW_DIAG_NEW) &&
11643 (*return_code != MPTSAS_FW_DIAG_ERROR_SUCCESS)) {
11644 status = DDI_SUCCESS;
11645 }
11646
11647 return (status);
11648 }
11649
11650 static int
11651 mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *user_data, int mode)
11652 {
11653 int status;
11654 mptsas_diag_action_t driver_data;
11655
11656 ASSERT(mutex_owned(&mpt->m_mutex));
11657
11658 /*
11659 * Copy the user data to a driver data buffer.
11660 */
11661 if (ddi_copyin(user_data, &driver_data, sizeof (mptsas_diag_action_t),
11662 mode) == 0) {
11663 /*
11664 * Send diag action request if Action is valid
11665 */
11666 if (driver_data.Action == MPTSAS_FW_DIAG_TYPE_REGISTER ||
11667 driver_data.Action == MPTSAS_FW_DIAG_TYPE_UNREGISTER ||
11668 driver_data.Action == MPTSAS_FW_DIAG_TYPE_QUERY ||
11669 driver_data.Action == MPTSAS_FW_DIAG_TYPE_READ_BUFFER ||
11670 driver_data.Action == MPTSAS_FW_DIAG_TYPE_RELEASE) {
11671 status = mptsas_do_diag_action(mpt, driver_data.Action,
11672 (void *)(uintptr_t)driver_data.PtrDiagAction,
11673 driver_data.Length, &driver_data.ReturnCode,
11674 mode);
11675 if (status == DDI_SUCCESS) {
11676 if (ddi_copyout(&driver_data.ReturnCode,
11677 &user_data->ReturnCode,
11678 sizeof (user_data->ReturnCode), mode)
11679 != 0) {
11680 status = EFAULT;
11681 } else {
11682 status = 0;
11683 }
11684 } else {
11685 status = EIO;
11686 }
11687 } else {
11688 status = EINVAL;
11689 }
11690 } else {
11691 status = EFAULT;
11692 }
11693
11694 return (status);
11695 }
11696
11697 /*
11698 * This routine handles the "event query" ioctl.
11699 */
11700 static int
11701 mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, int mode,
11702 int *rval)
11703 {
11704 int status;
11705 mptsas_event_query_t driverdata;
11706 uint8_t i;
11707
11708 driverdata.Entries = MPTSAS_EVENT_QUEUE_SIZE;
11709
11710 mutex_enter(&mpt->m_mutex);
11711 for (i = 0; i < 4; i++) {
11712 driverdata.Types[i] = mpt->m_event_mask[i];
11713 }
11714 mutex_exit(&mpt->m_mutex);
11715
11716 if (ddi_copyout(&driverdata, data, sizeof (driverdata), mode) != 0) {
11717 status = EFAULT;
11718 } else {
11719 *rval = MPTIOCTL_STATUS_GOOD;
11720 status = 0;
11721 }
11722
11723 return (status);
11724 }
11725
11726 /*
11727 * This routine handles the "event enable" ioctl.
11728 */
11729 static int
11730 mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, int mode,
11731 int *rval)
11732 {
11733 int status;
11734 mptsas_event_enable_t driverdata;
11735 uint8_t i;
11736
11737 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11738 mutex_enter(&mpt->m_mutex);
11739 for (i = 0; i < 4; i++) {
11740 mpt->m_event_mask[i] = driverdata.Types[i];
11741 }
11742 mutex_exit(&mpt->m_mutex);
11743
11744 *rval = MPTIOCTL_STATUS_GOOD;
11745 status = 0;
11746 } else {
11747 status = EFAULT;
11748 }
11749 return (status);
11750 }
11751
11752 /*
11753 * This routine handles the "event report" ioctl.
11754 */
11755 static int
11756 mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, int mode,
11757 int *rval)
11758 {
11759 int status;
11760 mptsas_event_report_t driverdata;
11761
11762 mutex_enter(&mpt->m_mutex);
11763
11764 if (ddi_copyin(&data->Size, &driverdata.Size, sizeof (driverdata.Size),
11765 mode) == 0) {
11766 if (driverdata.Size >= sizeof (mpt->m_events)) {
11767 if (ddi_copyout(mpt->m_events, data->Events,
11768 sizeof (mpt->m_events), mode) != 0) {
11769 status = EFAULT;
11770 } else {
11771 if (driverdata.Size > sizeof (mpt->m_events)) {
11772 driverdata.Size =
11773 sizeof (mpt->m_events);
11774 if (ddi_copyout(&driverdata.Size,
11775 &data->Size,
11776 sizeof (driverdata.Size),
11777 mode) != 0) {
11778 status = EFAULT;
11779 } else {
11780 *rval = MPTIOCTL_STATUS_GOOD;
11781 status = 0;
11782 }
11783 } else {
11784 *rval = MPTIOCTL_STATUS_GOOD;
11785 status = 0;
11786 }
11787 }
11788 } else {
11789 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
11790 status = 0;
11791 }
11792 } else {
11793 status = EFAULT;
11794 }
11795
11796 mutex_exit(&mpt->m_mutex);
11797 return (status);
11798 }
11799
11800 static void
11801 mptsas_lookup_pci_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
11802 {
11803 int *reg_data;
11804 uint_t reglen;
11805
11806 /*
11807 * Lookup the 'reg' property and extract the other data
11808 */
11809 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
11810 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
11811 DDI_PROP_SUCCESS) {
11812 /*
11813 * Extract the PCI data from the 'reg' property first DWORD.
11814 * The entry looks like the following:
11815 * First DWORD:
11816 * Bits 0 - 7 8-bit Register number
11817 * Bits 8 - 10 3-bit Function number
11818 * Bits 11 - 15 5-bit Device number
11819 * Bits 16 - 23 8-bit Bus number
11820 * Bits 24 - 25 2-bit Address Space type identifier
11821 *
11822 */
11823 adapter_data->PciInformation.u.bits.BusNumber =
11824 (reg_data[0] & 0x00FF0000) >> 16;
11825 adapter_data->PciInformation.u.bits.DeviceNumber =
11826 (reg_data[0] & 0x0000F800) >> 11;
11827 adapter_data->PciInformation.u.bits.FunctionNumber =
11828 (reg_data[0] & 0x00000700) >> 8;
11829 ddi_prop_free((void *)reg_data);
11830 } else {
11831 /*
11832 * If we can't determine the PCI data then we fill in FF's for
11833 * the data to indicate this.
11834 */
11835 adapter_data->PCIDeviceHwId = 0xFFFFFFFF;
11836 adapter_data->MpiPortNumber = 0xFFFFFFFF;
11837 adapter_data->PciInformation.u.AsDWORD = 0xFFFFFFFF;
11838 }
11839
11840 /*
11841 * Saved in the mpt->m_fwversion
11842 */
11843 adapter_data->MpiFirmwareVersion = mpt->m_fwversion;
11844 }
11845
11846 static void
11847 mptsas_read_adapter_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
11848 {
11849 char *driver_verstr = MPTSAS_MOD_STRING;
11850
11851 mptsas_lookup_pci_data(mpt, adapter_data);
11852 adapter_data->AdapterType = MPTIOCTL_ADAPTER_TYPE_SAS2;
11853 adapter_data->PCIDeviceHwId = (uint32_t)mpt->m_devid;
11854 adapter_data->PCIDeviceHwRev = (uint32_t)mpt->m_revid;
11855 adapter_data->SubSystemId = (uint32_t)mpt->m_ssid;
11856 adapter_data->SubsystemVendorId = (uint32_t)mpt->m_svid;
11857 (void) strcpy((char *)&adapter_data->DriverVersion[0], driver_verstr);
11858 adapter_data->BiosVersion = 0;
11859 (void) mptsas_get_bios_page3(mpt, &adapter_data->BiosVersion);
11860 }
11861
11862 static void
11863 mptsas_read_pci_info(mptsas_t *mpt, mptsas_pci_info_t *pci_info)
11864 {
11865 int *reg_data, i;
11866 uint_t reglen;
11867
11868 /*
11869 * Lookup the 'reg' property and extract the other data
11870 */
11871 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
11872 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
11873 DDI_PROP_SUCCESS) {
11874 /*
11875 * Extract the PCI data from the 'reg' property first DWORD.
11876 * The entry looks like the following:
11877 * First DWORD:
11878 * Bits 8 - 10 3-bit Function number
11879 * Bits 11 - 15 5-bit Device number
11880 * Bits 16 - 23 8-bit Bus number
11881 */
11882 pci_info->BusNumber = (reg_data[0] & 0x00FF0000) >> 16;
11883 pci_info->DeviceNumber = (reg_data[0] & 0x0000F800) >> 11;
11884 pci_info->FunctionNumber = (reg_data[0] & 0x00000700) >> 8;
11885 ddi_prop_free((void *)reg_data);
11886 } else {
11887 /*
11888 * If we can't determine the PCI info then we fill in FF's for
11889 * the data to indicate this.
11890 */
11891 pci_info->BusNumber = 0xFFFFFFFF;
11892 pci_info->DeviceNumber = 0xFF;
11893 pci_info->FunctionNumber = 0xFF;
11894 }
11895
11896 /*
11897 * Now get the interrupt vector and the pci header. The vector can
11898 * only be 0 right now. The header is the first 256 bytes of config
11899 * space.
11900 */
11901 pci_info->InterruptVector = 0;
11902 for (i = 0; i < sizeof (pci_info->PciHeader); i++) {
11903 pci_info->PciHeader[i] = pci_config_get8(mpt->m_config_handle,
11904 i);
11905 }
11906 }
11907
11908 static int
11909 mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data, int mode)
11910 {
11911 int status = 0;
11912 mptsas_reg_access_t driverdata;
11913
11914 mutex_enter(&mpt->m_mutex);
11915 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11916 switch (driverdata.Command) {
11917 /*
11918 * IO access is not supported.
11919 */
11920 case REG_IO_READ:
11921 case REG_IO_WRITE:
11922 mptsas_log(mpt, CE_WARN, "IO access is not "
11923 "supported. Use memory access.");
11924 status = EINVAL;
11925 break;
11926
11927 case REG_MEM_READ:
11928 driverdata.RegData = ddi_get32(mpt->m_datap,
11929 (uint32_t *)(void *)mpt->m_reg +
11930 driverdata.RegOffset);
11931 if (ddi_copyout(&driverdata.RegData,
11932 &data->RegData,
11933 sizeof (driverdata.RegData), mode) != 0) {
11934 mptsas_log(mpt, CE_WARN, "Register "
11935 "Read Failed");
11936 status = EFAULT;
11937 }
11938 break;
11939
11940 case REG_MEM_WRITE:
11941 ddi_put32(mpt->m_datap,
11942 (uint32_t *)(void *)mpt->m_reg +
11943 driverdata.RegOffset,
11944 driverdata.RegData);
11945 break;
11946
11947 default:
11948 status = EINVAL;
11949 break;
11950 }
11951 } else {
11952 status = EFAULT;
11953 }
11954
11955 mutex_exit(&mpt->m_mutex);
11956 return (status);
11957 }
11958
11959 static int
11960 led_control(mptsas_t *mpt, intptr_t data, int mode)
11961 {
11962 int ret = 0;
11963 mptsas_led_control_t lc;
11964 mptsas_target_t *ptgt;
11965
11966 if (ddi_copyin((void *)data, &lc, sizeof (lc), mode) != 0) {
11967 return (EFAULT);
11968 }
11969
11970 if ((lc.Command != MPTSAS_LEDCTL_FLAG_SET &&
11971 lc.Command != MPTSAS_LEDCTL_FLAG_GET) ||
11972 lc.Led < MPTSAS_LEDCTL_LED_IDENT ||
11973 lc.Led > MPTSAS_LEDCTL_LED_OK2RM) {
11974 return (EINVAL);
11975 }
11976
11977 /* Locate the target we're interrogating... */
11978 mutex_enter(&mpt->m_mutex);
11979 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
11980 MPTSAS_HASH_FIRST);
11981 while (ptgt != NULL) {
11982 if (ptgt->m_enclosure == lc.Enclosure &&
11983 ptgt->m_slot_num == lc.Slot) {
11984 break;
11985 }
11986 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
11987 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
11988 }
11989 if (ptgt == NULL) {
11990 /* We could not find a target for that enclosure/slot. */
11991 mutex_exit(&mpt->m_mutex);
11992 return (ENOENT);
11993 }
11994
11995 if (lc.Command == MPTSAS_LEDCTL_FLAG_SET) {
11996 /* Update our internal LED state. */
11997 ptgt->m_led_status &= ~(1 << (lc.Led - 1));
11998 ptgt->m_led_status |= (!!lc.LedStatus) << (lc.Led - 1);
11999
12000 /* Flush it to the controller. */
12001 ret = mptsas_flush_led_status(mpt, ptgt);
12002 mutex_exit(&mpt->m_mutex);
12003 return (ret);
12004 }
12005
12006 /* Return our internal LED state. */
12007 lc.LedStatus = !!(ptgt->m_led_status & (1 << (lc.Led - 1)));
12008 mutex_exit(&mpt->m_mutex);
12009
12010 if (ddi_copyout(&lc, (void *)data, sizeof (lc), mode) != 0) {
12011 return (EFAULT);
12012 }
12013
12014 return (0);
12015 }
12016
12017 static int
12018 get_disk_info(mptsas_t *mpt, intptr_t data, int mode)
12019 {
12020 int i;
12021 int count = 0;
12022 int ret = 0;
12023 mptsas_target_t *ptgt;
12024 mptsas_disk_info_t *di;
12025 STRUCT_DECL(mptsas_get_disk_info, gdi);
12026
12027 STRUCT_INIT(gdi, get_udatamodel());
12028
12029 if (ddi_copyin((void *)data, STRUCT_BUF(gdi), STRUCT_SIZE(gdi),
12030 mode) != 0) {
12031 return (EFAULT);
12032 }
12033
12034 restart:
12035 /* Find out how many targets there are. */
12036 mutex_enter(&mpt->m_mutex);
12037 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
12038 MPTSAS_HASH_FIRST);
12039 while (ptgt != NULL) {
12040 count++;
12041 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
12042 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
12043 }
12044 mutex_exit(&mpt->m_mutex);
12045
12046 /*
12047 * If we haven't been asked to copy out information on each target,
12048 * then just return the count.
12049 */
12050 STRUCT_FSET(gdi, DiskCount, count);
12051 if (STRUCT_FGETP(gdi, PtrDiskInfoArray) == NULL)
12052 goto copy_out;
12053
12054 /*
12055 * If we haven't been given a large enough buffer to copy out into,
12056 * let the caller know.
12057 */
12058 if (STRUCT_FGET(gdi, DiskInfoArraySize) <
12059 count * sizeof (mptsas_disk_info_t)) {
12060 ret = ENOSPC;
12061 goto copy_out;
12062 }
12063
12064 di = kmem_zalloc(count * sizeof (mptsas_disk_info_t), KM_SLEEP);
12065
12066 mutex_enter(&mpt->m_mutex);
12067 i = 0;
12068 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
12069 MPTSAS_HASH_FIRST);
12070 while (ptgt != NULL) {
12071 if (i >= count) {
12072 /*
12073 * The number of targets changed while we weren't
12074 * looking. Go again.
12075 */
12076 mutex_exit(&mpt->m_mutex);
12077 kmem_free(di, count * sizeof (mptsas_disk_info_t));
12078 goto restart;
12079 }
12080 di[i].Instance = mpt->m_instance;
12081 di[i].Enclosure = ptgt->m_enclosure;
12082 di[i].Slot = ptgt->m_slot_num;
12083 di[i].SasAddress = ptgt->m_sas_wwn;
12084
12085 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
12086 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
12087 i++;
12088 }
12089 mutex_exit(&mpt->m_mutex);
12090 STRUCT_FSET(gdi, DiskCount, i);
12091
12092 /* Copy out the disk information to the caller. */
12093 if (ddi_copyout((void *)di, STRUCT_FGETP(gdi, PtrDiskInfoArray),
12094 i * sizeof (mptsas_disk_info_t), mode) != 0) {
12095 ret = EFAULT;
12096 }
12097
12098 kmem_free(di, count * sizeof (mptsas_disk_info_t));
12099
12100 copy_out:
12101 if (ddi_copyout(STRUCT_BUF(gdi), (void *)data, STRUCT_SIZE(gdi),
12102 mode) != 0) {
12103 ret = EFAULT;
12104 }
12105
12106 return (ret);
12107 }
12108
12109 static int
12110 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
12111 int *rval)
12112 {
12113 int status = 0;
12114 mptsas_t *mpt;
12115 mptsas_update_flash_t flashdata;
12116 mptsas_pass_thru_t passthru_data;
12117 mptsas_adapter_data_t adapter_data;
12118 mptsas_pci_info_t pci_info;
12119 int copylen;
12120
12121 int iport_flag = 0;
12122 dev_info_t *dip = NULL;
12123 mptsas_phymask_t phymask = 0;
12124 struct devctl_iocdata *dcp = NULL;
12125 uint32_t slotstatus = 0;
12126 char *addr = NULL;
12127 mptsas_target_t *ptgt = NULL;
12128
12129 *rval = MPTIOCTL_STATUS_GOOD;
12130 if (secpolicy_sys_config(credp, B_FALSE) != 0) {
12131 return (EPERM);
12132 }
12133
12134 mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
12135 if (mpt == NULL) {
12136 /*
12137 * Called from iport node, get the states
12138 */
12139 iport_flag = 1;
12140 dip = mptsas_get_dip_from_dev(dev, &phymask);
12141 if (dip == NULL) {
12142 return (ENXIO);
12143 }
12144 mpt = DIP2MPT(dip);
12145 }
12146 /* Make sure power level is D0 before accessing registers */
12147 mutex_enter(&mpt->m_mutex);
12148 if (mpt->m_options & MPTSAS_OPT_PM) {
12149 (void) pm_busy_component(mpt->m_dip, 0);
12150 if (mpt->m_power_level != PM_LEVEL_D0) {
12151 mutex_exit(&mpt->m_mutex);
12152 if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
12153 DDI_SUCCESS) {
12154 mptsas_log(mpt, CE_WARN,
12155 "mptsas%d: mptsas_ioctl: Raise power "
12156 "request failed.", mpt->m_instance);
12157 (void) pm_idle_component(mpt->m_dip, 0);
12158 return (ENXIO);
12159 }
12160 } else {
12161 mutex_exit(&mpt->m_mutex);
12162 }
12163 } else {
12164 mutex_exit(&mpt->m_mutex);
12165 }
12166
12167 if (iport_flag) {
12168 status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
12169 if (status != 0) {
12170 goto out;
12171 }
12172 /*
12173 * The following code control the OK2RM LED, it doesn't affect
12174 * the ioctl return status.
12175 */
12176 if ((cmd == DEVCTL_DEVICE_ONLINE) ||
12177 (cmd == DEVCTL_DEVICE_OFFLINE)) {
12178 if (ndi_dc_allochdl((void *)data, &dcp) !=
12179 NDI_SUCCESS) {
12180 goto out;
12181 }
12182 addr = ndi_dc_getaddr(dcp);
12183 ptgt = mptsas_addr_to_ptgt(mpt, addr, phymask);
12184 if (ptgt == NULL) {
12185 NDBG14(("mptsas_ioctl led control: tgt %s not "
12186 "found", addr));
12187 ndi_dc_freehdl(dcp);
12188 goto out;
12189 }
12190 mutex_enter(&mpt->m_mutex);
12191 if (cmd == DEVCTL_DEVICE_ONLINE) {
12192 ptgt->m_tgt_unconfigured = 0;
12193 } else if (cmd == DEVCTL_DEVICE_OFFLINE) {
12194 ptgt->m_tgt_unconfigured = 1;
12195 }
12196 if (cmd == DEVCTL_DEVICE_OFFLINE) {
12197 ptgt->m_led_status |=
12198 (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1));
12199 } else {
12200 ptgt->m_led_status &=
12201 ~(1 << (MPTSAS_LEDCTL_LED_OK2RM - 1));
12202 }
12203 if (mptsas_flush_led_status(mpt, ptgt) != DDI_SUCCESS) {
12204 NDBG14(("mptsas_ioctl: set LED for tgt %s "
12205 "failed %x", addr, slotstatus));
12206 }
12207 mutex_exit(&mpt->m_mutex);
12208 ndi_dc_freehdl(dcp);
12209 }
12210 goto out;
12211 }
12212 switch (cmd) {
12213 case MPTIOCTL_GET_DISK_INFO:
12214 status = get_disk_info(mpt, data, mode);
12215 break;
12216 case MPTIOCTL_LED_CONTROL:
12217 status = led_control(mpt, data, mode);
12218 break;
12219 case MPTIOCTL_UPDATE_FLASH:
12220 if (ddi_copyin((void *)data, &flashdata,
12221 sizeof (struct mptsas_update_flash), mode)) {
12222 status = EFAULT;
12223 break;
12224 }
12225
12226 mutex_enter(&mpt->m_mutex);
12227 if (mptsas_update_flash(mpt,
12228 (caddr_t)(long)flashdata.PtrBuffer,
12229 flashdata.ImageSize, flashdata.ImageType, mode)) {
12230 status = EFAULT;
12231 }
12232
12233 /*
12234 * Reset the chip to start using the new
12235 * firmware. Reset if failed also.
12236 */
12237 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12238 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
12239 status = EFAULT;
12240 }
12241 mutex_exit(&mpt->m_mutex);
12242 break;
12243 case MPTIOCTL_PASS_THRU:
12244 /*
12245 * The user has requested to pass through a command to
12246 * be executed by the MPT firmware. Call our routine
12247 * which does this. Only allow one passthru IOCTL at
12248 * one time. Other threads will block on
12249 * m_passthru_mutex, which is of adaptive variant.
12250 */
12251 if (ddi_copyin((void *)data, &passthru_data,
12252 sizeof (mptsas_pass_thru_t), mode)) {
12253 status = EFAULT;
12254 break;
12255 }
12256 mutex_enter(&mpt->m_passthru_mutex);
12257 mutex_enter(&mpt->m_mutex);
12258 status = mptsas_pass_thru(mpt, &passthru_data, mode);
12259 mutex_exit(&mpt->m_mutex);
12260 mutex_exit(&mpt->m_passthru_mutex);
12261
12262 break;
12263 case MPTIOCTL_GET_ADAPTER_DATA:
12264 /*
12265 * The user has requested to read adapter data. Call
12266 * our routine which does this.
12267 */
12268 bzero(&adapter_data, sizeof (mptsas_adapter_data_t));
12269 if (ddi_copyin((void *)data, (void *)&adapter_data,
12270 sizeof (mptsas_adapter_data_t), mode)) {
12271 status = EFAULT;
12272 break;
12273 }
12274 if (adapter_data.StructureLength >=
12275 sizeof (mptsas_adapter_data_t)) {
12276 adapter_data.StructureLength = (uint32_t)
12277 sizeof (mptsas_adapter_data_t);
12278 copylen = sizeof (mptsas_adapter_data_t);
12279 mutex_enter(&mpt->m_mutex);
12280 mptsas_read_adapter_data(mpt, &adapter_data);
12281 mutex_exit(&mpt->m_mutex);
12282 } else {
12283 adapter_data.StructureLength = (uint32_t)
12284 sizeof (mptsas_adapter_data_t);
12285 copylen = sizeof (adapter_data.StructureLength);
12286 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
12287 }
12288 if (ddi_copyout((void *)(&adapter_data), (void *)data,
12289 copylen, mode) != 0) {
12290 status = EFAULT;
12291 }
12292 break;
12293 case MPTIOCTL_GET_PCI_INFO:
12294 /*
12295 * The user has requested to read pci info. Call
12296 * our routine which does this.
12297 */
12298 bzero(&pci_info, sizeof (mptsas_pci_info_t));
12299 mutex_enter(&mpt->m_mutex);
12300 mptsas_read_pci_info(mpt, &pci_info);
12301 mutex_exit(&mpt->m_mutex);
12302 if (ddi_copyout((void *)(&pci_info), (void *)data,
12303 sizeof (mptsas_pci_info_t), mode) != 0) {
12304 status = EFAULT;
12305 }
12306 break;
12307 case MPTIOCTL_RESET_ADAPTER:
12308 mutex_enter(&mpt->m_mutex);
12309 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12310 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
12311 mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
12312 "failed");
12313 status = EFAULT;
12314 }
12315 mutex_exit(&mpt->m_mutex);
12316 break;
12317 case MPTIOCTL_DIAG_ACTION:
12318 /*
12319 * The user has done a diag buffer action. Call our
12320 * routine which does this. Only allow one diag action
12321 * at one time.
12322 */
12323 mutex_enter(&mpt->m_mutex);
12324 if (mpt->m_diag_action_in_progress) {
12325 mutex_exit(&mpt->m_mutex);
12326 return (EBUSY);
12327 }
12328 mpt->m_diag_action_in_progress = 1;
12329 status = mptsas_diag_action(mpt,
12330 (mptsas_diag_action_t *)data, mode);
12331 mpt->m_diag_action_in_progress = 0;
12332 mutex_exit(&mpt->m_mutex);
12333 break;
12334 case MPTIOCTL_EVENT_QUERY:
12335 /*
12336 * The user has done an event query. Call our routine
12337 * which does this.
12338 */
12339 status = mptsas_event_query(mpt,
12340 (mptsas_event_query_t *)data, mode, rval);
12341 break;
12342 case MPTIOCTL_EVENT_ENABLE:
12343 /*
12344 * The user has done an event enable. Call our routine
12345 * which does this.
12346 */
12347 status = mptsas_event_enable(mpt,
12348 (mptsas_event_enable_t *)data, mode, rval);
12349 break;
12350 case MPTIOCTL_EVENT_REPORT:
12351 /*
12352 * The user has done an event report. Call our routine
12353 * which does this.
12354 */
12355 status = mptsas_event_report(mpt,
12356 (mptsas_event_report_t *)data, mode, rval);
12357 break;
12358 case MPTIOCTL_REG_ACCESS:
12359 /*
12360 * The user has requested register access. Call our
12361 * routine which does this.
12362 */
12363 status = mptsas_reg_access(mpt,
12364 (mptsas_reg_access_t *)data, mode);
12365 break;
12366 default:
12367 status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
12368 rval);
12369 break;
12370 }
12371
12372 out:
12373 if (mpt->m_options & MPTSAS_OPT_PM)
12374 (void) pm_idle_component(mpt->m_dip, 0);
12375 return (status);
12376 }
12377
12378 int
12379 mptsas_restart_ioc(mptsas_t *mpt)
12380 {
12381 int rval = DDI_SUCCESS;
12382 mptsas_target_t *ptgt = NULL;
12383
12384 ASSERT(mutex_owned(&mpt->m_mutex));
12385
12386 /*
12387 * Set a flag telling I/O path that we're processing a reset. This is
12388 * needed because after the reset is complete, the hash table still
12389 * needs to be rebuilt. If I/Os are started before the hash table is
12390 * rebuilt, I/O errors will occur. This flag allows I/Os to be marked
12391 * so that they can be retried.
12392 */
12393 mpt->m_in_reset = TRUE;
12394
12395 /*
12396 * Set all throttles to HOLD
12397 */
12398 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
12399 MPTSAS_HASH_FIRST);
12400 while (ptgt != NULL) {
12401 mutex_enter(&ptgt->m_tgt_intr_mutex);
12402 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
12403 mutex_exit(&ptgt->m_tgt_intr_mutex);
12404
12405 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
12406 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
12407 }
12408
12409 /*
12410 * Disable interrupts
12411 */
12412 MPTSAS_DISABLE_INTR(mpt);
12413
12414 /*
12415 * Abort all commands: outstanding commands, commands in waitq
12416 */
12417 mptsas_flush_hba(mpt);
12418
12419 /*
12420 * Reinitialize the chip.
12421 */
12422 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
12423 rval = DDI_FAILURE;
12424 }
12425
12426 /*
12427 * Enable interrupts again
12428 */
12429 MPTSAS_ENABLE_INTR(mpt);
12430
12431 /*
12432 * If mptsas_init_chip was successful, update the driver data.
12433 */
12434 if (rval == DDI_SUCCESS) {
12435 mptsas_update_driver_data(mpt);
12436 }
12437
12438 /*
12439 * Reset the throttles
12440 */
12441 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
12442 MPTSAS_HASH_FIRST);
12443 while (ptgt != NULL) {
12444 mutex_enter(&ptgt->m_tgt_intr_mutex);
12445 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
12446 mutex_exit(&ptgt->m_tgt_intr_mutex);
12447
12448 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
12449 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
12450 }
12451
12452 mptsas_doneq_empty(mpt);
12453 mptsas_restart_hba(mpt);
12454
12455 if (rval != DDI_SUCCESS) {
12456 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
12457 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
12458 }
12459
12460 /*
12461 * Clear the reset flag so that I/Os can continue.
12462 */
12463 mpt->m_in_reset = FALSE;
12464
12465 return (rval);
12466 }
12467
12468 static int
12469 mptsas_init_chip(mptsas_t *mpt, int first_time)
12470 {
12471 ddi_dma_cookie_t cookie;
12472 uint32_t i;
12473 int rval;
12474
12475 /*
12476 * Check to see if the firmware image is valid
12477 */
12478 if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) &
12479 MPI2_DIAG_FLASH_BAD_SIG) {
12480 mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
12481 goto fail;
12482 }
12483
12484 /*
12485 * Reset the chip
12486 */
12487 rval = mptsas_ioc_reset(mpt, first_time);
12488 if (rval == MPTSAS_RESET_FAIL) {
12489 mptsas_log(mpt, CE_WARN, "hard reset failed!");
12490 goto fail;
12491 }
12492
12493 if ((rval == MPTSAS_SUCCESS_MUR) && (!first_time)) {
12494 goto mur;
12495 }
12496 /*
12497 * Setup configuration space
12498 */
12499 if (mptsas_config_space_init(mpt) == FALSE) {
12500 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
12501 "failed!");
12502 goto fail;
12503 }
12504
12505 /*
12506 * IOC facts can change after a diag reset so all buffers that are
12507 * based on these numbers must be de-allocated and re-allocated. Get
12508 * new IOC facts each time chip is initialized.
12509 */
12510 if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
12511 mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed");
12512 goto fail;
12513 }
12514
12515 if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
12516 goto fail;
12517 }
12518 /*
12519 * Allocate request message frames, reply free queue, reply descriptor
12520 * post queue, and reply message frames using latest IOC facts.
12521 */
12522 if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
12523 mptsas_log(mpt, CE_WARN, "mptsas_alloc_request_frames failed");
12524 goto fail;
12525 }
12526 if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
12527 mptsas_log(mpt, CE_WARN, "mptsas_alloc_free_queue failed!");
12528 goto fail;
12529 }
12530 if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
12531 mptsas_log(mpt, CE_WARN, "mptsas_alloc_post_queue failed!");
12532 goto fail;
12533 }
12534 if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
12535 mptsas_log(mpt, CE_WARN, "mptsas_alloc_reply_frames failed!");
12536 goto fail;
12537 }
12538
12539 mur:
12540 /*
12541 * Re-Initialize ioc to operational state
12542 */
12543 if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
12544 mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
12545 goto fail;
12546 }
12547
12548 mptsas_alloc_reply_args(mpt);
12549
12550 /*
12551 * Initialize reply post index. Reply free index is initialized after
12552 * the next loop.
12553 */
12554 mpt->m_post_index = 0;
12555
12556 /*
12557 * Initialize the Reply Free Queue with the physical addresses of our
12558 * reply frames.
12559 */
12560 cookie.dmac_address = mpt->m_reply_frame_dma_addr;
12561 for (i = 0; i < mpt->m_max_replies; i++) {
12562 ddi_put32(mpt->m_acc_free_queue_hdl,
12563 &((uint32_t *)(void *)mpt->m_free_queue)[i],
12564 cookie.dmac_address);
12565 cookie.dmac_address += mpt->m_reply_frame_size;
12566 }
12567 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
12568 DDI_DMA_SYNC_FORDEV);
12569
12570 /*
12571 * Initialize the reply free index to one past the last frame on the
12572 * queue. This will signify that the queue is empty to start with.
12573 */
12574 mpt->m_free_index = i;
12575 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, i);
12576
12577 /*
12578 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
12579 */
12580 for (i = 0; i < mpt->m_post_queue_depth; i++) {
12581 ddi_put64(mpt->m_acc_post_queue_hdl,
12582 &((uint64_t *)(void *)mpt->m_post_queue)[i],
12583 0xFFFFFFFFFFFFFFFF);
12584 }
12585 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
12586 DDI_DMA_SYNC_FORDEV);
12587
12588 /*
12589 * Enable ports
12590 */
12591 if (mptsas_ioc_enable_port(mpt) == DDI_FAILURE) {
12592 mptsas_log(mpt, CE_WARN, "mptsas_ioc_enable_port failed");
12593 goto fail;
12594 }
12595
12596 /*
12597 * enable events
12598 */
12599 if (mptsas_ioc_enable_event_notification(mpt)) {
12600 goto fail;
12601 }
12602
12603 /*
12604 * We need checks in attach and these.
12605 * chip_init is called in mult. places
12606 */
12607
12608 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
12609 DDI_SUCCESS) ||
12610 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
12611 DDI_SUCCESS) ||
12612 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
12613 DDI_SUCCESS) ||
12614 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
12615 DDI_SUCCESS) ||
12616 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
12617 DDI_SUCCESS)) {
12618 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
12619 goto fail;
12620 }
12621
12622 /* Check all acc handles */
12623 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
12624 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
12625 DDI_SUCCESS) ||
12626 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
12627 DDI_SUCCESS) ||
12628 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
12629 DDI_SUCCESS) ||
12630 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
12631 DDI_SUCCESS) ||
12632 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
12633 DDI_SUCCESS) ||
12634 (mptsas_check_acc_handle(mpt->m_config_handle) !=
12635 DDI_SUCCESS)) {
12636 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
12637 goto fail;
12638 }
12639
12640 return (DDI_SUCCESS);
12641
12642 fail:
12643 return (DDI_FAILURE);
12644 }
12645
12646 static int
12647 mptsas_get_pci_cap(mptsas_t *mpt)
12648 {
12649 ushort_t caps_ptr, cap, cap_count;
12650
12651 if (mpt->m_config_handle == NULL)
12652 return (FALSE);
12653 /*
12654 * Check if capabilities list is supported and if so,
12655 * get initial capabilities pointer and clear bits 0,1.
12656 */
12657 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
12658 & PCI_STAT_CAP) {
12659 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
12660 PCI_CONF_CAP_PTR), 4);
12661 } else {
12662 caps_ptr = PCI_CAP_NEXT_PTR_NULL;
12663 }
12664
12665 /*
12666 * Walk capabilities if supported.
12667 */
12668 for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
12669
12670 /*
12671 * Check that we haven't exceeded the maximum number of
12672 * capabilities and that the pointer is in a valid range.
12673 */
12674 if (++cap_count > 48) {
12675 mptsas_log(mpt, CE_WARN,
12676 "too many device capabilities.\n");
12677 break;
12678 }
12679 if (caps_ptr < 64) {
12680 mptsas_log(mpt, CE_WARN,
12681 "capabilities pointer 0x%x out of range.\n",
12682 caps_ptr);
12683 break;
12684 }
12685
12686 /*
12687 * Get next capability and check that it is valid.
12688 * For now, we only support power management.
12689 */
12690 cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
12691 switch (cap) {
12692 case PCI_CAP_ID_PM:
12693 mptsas_log(mpt, CE_NOTE,
12694 "?mptsas%d supports power management.\n",
12695 mpt->m_instance);
12696 mpt->m_options |= MPTSAS_OPT_PM;
12697
12698 /* Save PMCSR offset */
12699 mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
12700 break;
12701 /*
12702 * The following capabilities are valid. Any others
12703 * will cause a message to be logged.
12704 */
12705 case PCI_CAP_ID_VPD:
12706 case PCI_CAP_ID_MSI:
12707 case PCI_CAP_ID_PCIX:
12708 case PCI_CAP_ID_PCI_E:
12709 case PCI_CAP_ID_MSI_X:
12710 break;
12711 default:
12712 mptsas_log(mpt, CE_NOTE,
12713 "?mptsas%d unrecognized capability "
12714 "0x%x.\n", mpt->m_instance, cap);
12715 break;
12716 }
12717
12718 /*
12719 * Get next capabilities pointer and clear bits 0,1.
12720 */
12721 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
12722 (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
12723 }
12724 return (TRUE);
12725 }
12726
12727 static int
12728 mptsas_init_pm(mptsas_t *mpt)
12729 {
12730 char pmc_name[16];
12731 char *pmc[] = {
12732 NULL,
12733 "0=Off (PCI D3 State)",
12734 "3=On (PCI D0 State)",
12735 NULL
12736 };
12737 uint16_t pmcsr_stat;
12738
12739 if (mptsas_get_pci_cap(mpt) == FALSE) {
12740 return (DDI_FAILURE);
12741 }
12742 /*
12743 * If PCI's capability does not support PM, then don't need
12744 * to registe the pm-components
12745 */
12746 if (!(mpt->m_options & MPTSAS_OPT_PM))
12747 return (DDI_SUCCESS);
12748 /*
12749 * If power management is supported by this chip, create
12750 * pm-components property for the power management framework
12751 */
12752 (void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
12753 pmc[0] = pmc_name;
12754 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
12755 "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
12756 mutex_enter(&mpt->m_intr_mutex);
12757 mpt->m_options &= ~MPTSAS_OPT_PM;
12758 mutex_exit(&mpt->m_intr_mutex);
12759 mptsas_log(mpt, CE_WARN,
12760 "mptsas%d: pm-component property creation failed.",
12761 mpt->m_instance);
12762 return (DDI_FAILURE);
12763 }
12764
12765 /*
12766 * Power on device.
12767 */
12768 (void) pm_busy_component(mpt->m_dip, 0);
12769 pmcsr_stat = pci_config_get16(mpt->m_config_handle,
12770 mpt->m_pmcsr_offset);
12771 if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
12772 mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
12773 mpt->m_instance);
12774 pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
12775 PCI_PMCSR_D0);
12776 }
12777 if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
12778 mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
12779 return (DDI_FAILURE);
12780 }
12781 mutex_enter(&mpt->m_intr_mutex);
12782 mpt->m_power_level = PM_LEVEL_D0;
12783 mutex_exit(&mpt->m_intr_mutex);
12784 /*
12785 * Set pm idle delay.
12786 */
12787 mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
12788 mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
12789
12790 return (DDI_SUCCESS);
12791 }
12792
12793 static int
12794 mptsas_register_intrs(mptsas_t *mpt)
12795 {
12796 dev_info_t *dip;
12797 int intr_types;
12798
12799 dip = mpt->m_dip;
12800
12801 /* Get supported interrupt types */
12802 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
12803 mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
12804 "failed\n");
12805 return (FALSE);
12806 }
12807
12808 NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
12809
12810 /*
12811 * Try MSI, but fall back to FIXED
12812 */
12813 if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
12814 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
12815 NDBG0(("Using MSI interrupt type"));
12816 mpt->m_intr_type = DDI_INTR_TYPE_MSI;
12817 return (TRUE);
12818 }
12819 }
12820 if (intr_types & DDI_INTR_TYPE_FIXED) {
12821 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
12822 NDBG0(("Using FIXED interrupt type"));
12823 mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
12824 return (TRUE);
12825 } else {
12826 NDBG0(("FIXED interrupt registration failed"));
12827 return (FALSE);
12828 }
12829 }
12830
12831 return (FALSE);
12832 }
12833
12834 static void
12835 mptsas_unregister_intrs(mptsas_t *mpt)
12836 {
12837 mptsas_rem_intrs(mpt);
12838 }
12839
12840 /*
12841 * mptsas_add_intrs:
12842 *
12843 * Register FIXED or MSI interrupts.
12844 */
12845 static int
12846 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
12847 {
12848 dev_info_t *dip = mpt->m_dip;
12849 int avail, actual, count = 0;
12850 int i, flag, ret;
12851
12852 NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
12853
12854 /* Get number of interrupts */
12855 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
12856 if ((ret != DDI_SUCCESS) || (count <= 0)) {
12857 mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
12858 "ret %d count %d\n", ret, count);
12859
12860 return (DDI_FAILURE);
12861 }
12862
12863 /* Get number of available interrupts */
12864 ret = ddi_intr_get_navail(dip, intr_type, &avail);
12865 if ((ret != DDI_SUCCESS) || (avail == 0)) {
12866 mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
12867 "ret %d avail %d\n", ret, avail);
12868
12869 return (DDI_FAILURE);
12870 }
12871
12872 if (avail < count) {
12873 mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, "
12874 "navail() returned %d", count, avail);
12875 }
12876
12877 /* Mpt only have one interrupt routine */
12878 if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
12879 count = 1;
12880 }
12881
12882 /* Allocate an array of interrupt handles */
12883 mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
12884 mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
12885
12886 flag = DDI_INTR_ALLOC_NORMAL;
12887
12888 /* call ddi_intr_alloc() */
12889 ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
12890 count, &actual, flag);
12891
12892 if ((ret != DDI_SUCCESS) || (actual == 0)) {
12893 mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d\n",
12894 ret);
12895 kmem_free(mpt->m_htable, mpt->m_intr_size);
12896 return (DDI_FAILURE);
12897 }
12898
12899 /* use interrupt count returned or abort? */
12900 if (actual < count) {
12901 mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d\n",
12902 count, actual);
12903 }
12904
12905 mpt->m_intr_cnt = actual;
12906
12907 /*
12908 * Get priority for first msi, assume remaining are all the same
12909 */
12910 if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
12911 &mpt->m_intr_pri)) != DDI_SUCCESS) {
12912 mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d\n", ret);
12913
12914 /* Free already allocated intr */
12915 for (i = 0; i < actual; i++) {
12916 (void) ddi_intr_free(mpt->m_htable[i]);
12917 }
12918
12919 kmem_free(mpt->m_htable, mpt->m_intr_size);
12920 return (DDI_FAILURE);
12921 }
12922
12923 /* Test for high level mutex */
12924 if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
12925 mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
12926 "Hi level interrupt not supported\n");
12927
12928 /* Free already allocated intr */
12929 for (i = 0; i < actual; i++) {
12930 (void) ddi_intr_free(mpt->m_htable[i]);
12931 }
12932
12933 kmem_free(mpt->m_htable, mpt->m_intr_size);
12934 return (DDI_FAILURE);
12935 }
12936
12937 /* Call ddi_intr_add_handler() */
12938 for (i = 0; i < actual; i++) {
12939 if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
12940 (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
12941 mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
12942 "failed %d\n", ret);
12943
12944 /* Free already allocated intr */
12945 for (i = 0; i < actual; i++) {
12946 (void) ddi_intr_free(mpt->m_htable[i]);
12947 }
12948
12949 kmem_free(mpt->m_htable, mpt->m_intr_size);
12950 return (DDI_FAILURE);
12951 }
12952 }
12953
12954 if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
12955 != DDI_SUCCESS) {
12956 mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d\n", ret);
12957
12958 /* Free already allocated intr */
12959 for (i = 0; i < actual; i++) {
12960 (void) ddi_intr_free(mpt->m_htable[i]);
12961 }
12962
12963 kmem_free(mpt->m_htable, mpt->m_intr_size);
12964 return (DDI_FAILURE);
12965 }
12966
12967 /*
12968 * Enable interrupts
12969 */
12970 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
12971 /* Call ddi_intr_block_enable() for MSI interrupts */
12972 (void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
12973 } else {
12974 /* Call ddi_intr_enable for MSI or FIXED interrupts */
12975 for (i = 0; i < mpt->m_intr_cnt; i++) {
12976 (void) ddi_intr_enable(mpt->m_htable[i]);
12977 }
12978 }
12979 return (DDI_SUCCESS);
12980 }
12981
12982 /*
12983 * mptsas_rem_intrs:
12984 *
12985 * Unregister FIXED or MSI interrupts
12986 */
12987 static void
12988 mptsas_rem_intrs(mptsas_t *mpt)
12989 {
12990 int i;
12991
12992 NDBG6(("mptsas_rem_intrs"));
12993
12994 /* Disable all interrupts */
12995 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
12996 /* Call ddi_intr_block_disable() */
12997 (void) ddi_intr_block_disable(mpt->m_htable, mpt->m_intr_cnt);
12998 } else {
12999 for (i = 0; i < mpt->m_intr_cnt; i++) {
13000 (void) ddi_intr_disable(mpt->m_htable[i]);
13001 }
13002 }
13003
13004 /* Call ddi_intr_remove_handler() */
13005 for (i = 0; i < mpt->m_intr_cnt; i++) {
13006 (void) ddi_intr_remove_handler(mpt->m_htable[i]);
13007 (void) ddi_intr_free(mpt->m_htable[i]);
13008 }
13009
13010 kmem_free(mpt->m_htable, mpt->m_intr_size);
13011 }
13012
13013 /*
13014 * The IO fault service error handling callback function
13015 */
13016 /*ARGSUSED*/
13017 static int
13018 mptsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
13019 {
13020 /*
13021 * as the driver can always deal with an error in any dma or
13022 * access handle, we can just return the fme_status value.
13023 */
13024 pci_ereport_post(dip, err, NULL);
13025 return (err->fme_status);
13026 }
13027
13028 /*
13029 * mptsas_fm_init - initialize fma capabilities and register with IO
13030 * fault services.
13031 */
13032 static void
13033 mptsas_fm_init(mptsas_t *mpt)
13034 {
13035 /*
13036 * Need to change iblock to priority for new MSI intr
13037 */
13038 ddi_iblock_cookie_t fm_ibc;
13039
13040 /* Only register with IO Fault Services if we have some capability */
13041 if (mpt->m_fm_capabilities) {
13042 /* Adjust access and dma attributes for FMA */
13043 mpt->m_reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
13044 mpt->m_msg_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
13045 mpt->m_io_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
13046
13047 /*
13048 * Register capabilities with IO Fault Services.
13049 * mpt->m_fm_capabilities will be updated to indicate
13050 * capabilities actually supported (not requested.)
13051 */
13052 ddi_fm_init(mpt->m_dip, &mpt->m_fm_capabilities, &fm_ibc);
13053
13054 /*
13055 * Initialize pci ereport capabilities if ereport
13056 * capable (should always be.)
13057 */
13058 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
13059 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13060 pci_ereport_setup(mpt->m_dip);
13061 }
13062
13063 /*
13064 * Register error callback if error callback capable.
13065 */
13066 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13067 ddi_fm_handler_register(mpt->m_dip,
13068 mptsas_fm_error_cb, (void *) mpt);
13069 }
13070 }
13071 }
13072
13073 /*
13074 * mptsas_fm_fini - Releases fma capabilities and un-registers with IO
13075 * fault services.
13076 *
13077 */
13078 static void
13079 mptsas_fm_fini(mptsas_t *mpt)
13080 {
13081 /* Only unregister FMA capabilities if registered */
13082 if (mpt->m_fm_capabilities) {
13083
13084 /*
13085 * Un-register error callback if error callback capable.
13086 */
13087
13088 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13089 ddi_fm_handler_unregister(mpt->m_dip);
13090 }
13091
13092 /*
13093 * Release any resources allocated by pci_ereport_setup()
13094 */
13095
13096 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
13097 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13098 pci_ereport_teardown(mpt->m_dip);
13099 }
13100
13101 /* Unregister from IO Fault Services */
13102 ddi_fm_fini(mpt->m_dip);
13103
13104 /* Adjust access and dma attributes for FMA */
13105 mpt->m_reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
13106 mpt->m_msg_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
13107 mpt->m_io_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
13108
13109 }
13110 }
13111
13112 int
13113 mptsas_check_acc_handle(ddi_acc_handle_t handle)
13114 {
13115 ddi_fm_error_t de;
13116
13117 if (handle == NULL)
13118 return (DDI_FAILURE);
13119 ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
13120 return (de.fme_status);
13121 }
13122
13123 int
13124 mptsas_check_dma_handle(ddi_dma_handle_t handle)
13125 {
13126 ddi_fm_error_t de;
13127
13128 if (handle == NULL)
13129 return (DDI_FAILURE);
13130 ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
13131 return (de.fme_status);
13132 }
13133
13134 void
13135 mptsas_fm_ereport(mptsas_t *mpt, char *detail)
13136 {
13137 uint64_t ena;
13138 char buf[FM_MAX_CLASS];
13139
13140 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
13141 ena = fm_ena_generate(0, FM_ENA_FMT1);
13142 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities)) {
13143 ddi_fm_ereport_post(mpt->m_dip, buf, ena, DDI_NOSLEEP,
13144 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
13145 }
13146 }
13147
13148 static int
13149 mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
13150 uint16_t *dev_handle, mptsas_target_t **pptgt)
13151 {
13152 int rval;
13153 uint32_t dev_info;
13154 uint64_t sas_wwn;
13155 mptsas_phymask_t phymask;
13156 uint8_t physport, phynum, config, disk;
13157 mptsas_slots_t *slots = mpt->m_active;
13158 uint64_t devicename;
13159 uint16_t pdev_hdl;
13160 mptsas_target_t *tmp_tgt = NULL;
13161 uint16_t bay_num, enclosure;
13162
13163 ASSERT(*pptgt == NULL);
13164
13165 rval = mptsas_get_sas_device_page0(mpt, page_address, dev_handle,
13166 &sas_wwn, &dev_info, &physport, &phynum, &pdev_hdl,
13167 &bay_num, &enclosure);
13168 if (rval != DDI_SUCCESS) {
13169 rval = DEV_INFO_FAIL_PAGE0;
13170 return (rval);
13171 }
13172
13173 if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
13174 MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13175 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) == NULL) {
13176 rval = DEV_INFO_WRONG_DEVICE_TYPE;
13177 return (rval);
13178 }
13179
13180 /*
13181 * Check if the dev handle is for a Phys Disk. If so, set return value
13182 * and exit. Don't add Phys Disks to hash.
13183 */
13184 for (config = 0; config < slots->m_num_raid_configs; config++) {
13185 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
13186 if (*dev_handle == slots->m_raidconfig[config].
13187 m_physdisk_devhdl[disk]) {
13188 rval = DEV_INFO_PHYS_DISK;
13189 return (rval);
13190 }
13191 }
13192 }
13193
13194 /*
13195 * Get SATA Device Name from SAS device page0 for
13196 * sata device, if device name doesn't exist, set m_sas_wwn to
13197 * 0 for direct attached SATA. For the device behind the expander
13198 * we still can use STP address assigned by expander.
13199 */
13200 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13201 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
13202 mutex_exit(&mpt->m_mutex);
13203 /* alloc a tmp_tgt to send the cmd */
13204 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target),
13205 KM_SLEEP);
13206 tmp_tgt->m_devhdl = *dev_handle;
13207 tmp_tgt->m_deviceinfo = dev_info;
13208 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
13209 tmp_tgt->m_qfull_retry_interval =
13210 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
13211 tmp_tgt->m_t_throttle = MAX_THROTTLE;
13212 devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
13213 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
13214 mutex_enter(&mpt->m_mutex);
13215 if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
13216 sas_wwn = devicename;
13217 } else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
13218 sas_wwn = 0;
13219 }
13220 }
13221
13222 phymask = mptsas_physport_to_phymask(mpt, physport);
13223 *pptgt = mptsas_tgt_alloc(&slots->m_tgttbl, *dev_handle, sas_wwn,
13224 dev_info, phymask, phynum, mpt);
13225 if (*pptgt == NULL) {
13226 mptsas_log(mpt, CE_WARN, "Failed to allocated target"
13227 "structure!");
13228 rval = DEV_INFO_FAIL_ALLOC;
13229 return (rval);
13230 }
13231 (*pptgt)->m_enclosure = enclosure;
13232 (*pptgt)->m_slot_num = bay_num;
13233 return (DEV_INFO_SUCCESS);
13234 }
13235
13236 uint64_t
13237 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
13238 {
13239 uint64_t sata_guid = 0, *pwwn = NULL;
13240 int target = ptgt->m_devhdl;
13241 uchar_t *inq83 = NULL;
13242 int inq83_len = 0xFF;
13243 uchar_t *dblk = NULL;
13244 int inq83_retry = 3;
13245 int rval = DDI_FAILURE;
13246
13247 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
13248
13249 inq83_retry:
13250 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
13251 inq83_len, NULL, 1);
13252 if (rval != DDI_SUCCESS) {
13253 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
13254 "0x83 for target:%x, lun:%x failed!", target, lun);
13255 goto out;
13256 }
13257 /* According to SAT2, the first descriptor is logic unit name */
13258 dblk = &inq83[4];
13259 if ((dblk[1] & 0x30) != 0) {
13260 mptsas_log(mpt, CE_WARN, "!Descriptor is not lun associated.");
13261 goto out;
13262 }
13263 pwwn = (uint64_t *)(void *)(&dblk[4]);
13264 if ((dblk[4] & 0xf0) == 0x50) {
13265 sata_guid = BE_64(*pwwn);
13266 goto out;
13267 } else if (dblk[4] == 'A') {
13268 NDBG20(("SATA drive has no NAA format GUID."));
13269 goto out;
13270 } else {
13271 /* The data is not ready, wait and retry */
13272 inq83_retry--;
13273 if (inq83_retry <= 0) {
13274 goto out;
13275 }
13276 NDBG20(("The GUID is not ready, retry..."));
13277 delay(1 * drv_usectohz(1000000));
13278 goto inq83_retry;
13279 }
13280 out:
13281 kmem_free(inq83, inq83_len);
13282 return (sata_guid);
13283 }
13284
13285 static int
13286 mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, uchar_t page,
13287 unsigned char *buf, int len, int *reallen, uchar_t evpd)
13288 {
13289 uchar_t cdb[CDB_GROUP0];
13290 struct scsi_address ap;
13291 struct buf *data_bp = NULL;
13292 int resid = 0;
13293 int ret = DDI_FAILURE;
13294
13295 ASSERT(len <= 0xffff);
13296
13297 ap.a_target = MPTSAS_INVALID_DEVHDL;
13298 ap.a_lun = (uchar_t)(lun);
13299 ap.a_hba_tran = mpt->m_tran;
13300
13301 data_bp = scsi_alloc_consistent_buf(&ap,
13302 (struct buf *)NULL, len, B_READ, NULL_FUNC, NULL);
13303 if (data_bp == NULL) {
13304 return (ret);
13305 }
13306 bzero(cdb, CDB_GROUP0);
13307 cdb[0] = SCMD_INQUIRY;
13308 cdb[1] = evpd;
13309 cdb[2] = page;
13310 cdb[3] = (len & 0xff00) >> 8;
13311 cdb[4] = (len & 0x00ff);
13312 cdb[5] = 0;
13313
13314 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP0, data_bp,
13315 &resid);
13316 if (ret == DDI_SUCCESS) {
13317 if (reallen) {
13318 *reallen = len - resid;
13319 }
13320 bcopy((caddr_t)data_bp->b_un.b_addr, buf, len);
13321 }
13322 if (data_bp) {
13323 scsi_free_consistent_buf(data_bp);
13324 }
13325 return (ret);
13326 }
13327
13328 static int
13329 mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
13330 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
13331 int *resid)
13332 {
13333 struct scsi_pkt *pktp = NULL;
13334 scsi_hba_tran_t *tran_clone = NULL;
13335 mptsas_tgt_private_t *tgt_private = NULL;
13336 int ret = DDI_FAILURE;
13337
13338 /*
13339 * scsi_hba_tran_t->tran_tgt_private is used to pass the address
13340 * information to scsi_init_pkt, allocate a scsi_hba_tran structure
13341 * to simulate the cmds from sd
13342 */
13343 tran_clone = kmem_alloc(
13344 sizeof (scsi_hba_tran_t), KM_SLEEP);
13345 if (tran_clone == NULL) {
13346 goto out;
13347 }
13348 bcopy((caddr_t)mpt->m_tran,
13349 (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
13350 tgt_private = kmem_alloc(
13351 sizeof (mptsas_tgt_private_t), KM_SLEEP);
13352 if (tgt_private == NULL) {
13353 goto out;
13354 }
13355 tgt_private->t_lun = ap->a_lun;
13356 tgt_private->t_private = ptgt;
13357 tran_clone->tran_tgt_private = tgt_private;
13358 ap->a_hba_tran = tran_clone;
13359
13360 pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
13361 data_bp, cdblen, sizeof (struct scsi_arq_status),
13362 0, PKT_CONSISTENT, NULL, NULL);
13363 if (pktp == NULL) {
13364 goto out;
13365 }
13366 bcopy(cdb, pktp->pkt_cdbp, cdblen);
13367 pktp->pkt_flags = FLAG_NOPARITY;
13368 if (scsi_poll(pktp) < 0) {
13369 goto out;
13370 }
13371 if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
13372 goto out;
13373 }
13374 if (resid != NULL) {
13375 *resid = pktp->pkt_resid;
13376 }
13377
13378 ret = DDI_SUCCESS;
13379 out:
13380 if (pktp) {
13381 scsi_destroy_pkt(pktp);
13382 }
13383 if (tran_clone) {
13384 kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
13385 }
13386 if (tgt_private) {
13387 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
13388 }
13389 return (ret);
13390 }
13391 static int
13392 mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, int *lun)
13393 {
13394 char *cp = NULL;
13395 char *ptr = NULL;
13396 size_t s = 0;
13397 char *wwid_str = NULL;
13398 char *lun_str = NULL;
13399 long lunnum;
13400 long phyid = -1;
13401 int rc = DDI_FAILURE;
13402
13403 ptr = name;
13404 ASSERT(ptr[0] == 'w' || ptr[0] == 'p');
13405 ptr++;
13406 if ((cp = strchr(ptr, ',')) == NULL) {
13407 return (DDI_FAILURE);
13408 }
13409
13410 wwid_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13411 s = (uintptr_t)cp - (uintptr_t)ptr;
13412
13413 bcopy(ptr, wwid_str, s);
13414 wwid_str[s] = '\0';
13415
13416 ptr = ++cp;
13417
13418 if ((cp = strchr(ptr, '\0')) == NULL) {
13419 goto out;
13420 }
13421 lun_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13422 s = (uintptr_t)cp - (uintptr_t)ptr;
13423
13424 bcopy(ptr, lun_str, s);
13425 lun_str[s] = '\0';
13426
13427 if (name[0] == 'p') {
13428 rc = ddi_strtol(wwid_str, NULL, 0x10, &phyid);
13429 } else {
13430 rc = scsi_wwnstr_to_wwn(wwid_str, wwid);
13431 }
13432 if (rc != DDI_SUCCESS)
13433 goto out;
13434
13435 if (phyid != -1) {
13436 ASSERT(phyid < MPTSAS_MAX_PHYS);
13437 *phy = (uint8_t)phyid;
13438 }
13439 rc = ddi_strtol(lun_str, NULL, 0x10, &lunnum);
13440 if (rc != 0)
13441 goto out;
13442
13443 *lun = (int)lunnum;
13444 rc = DDI_SUCCESS;
13445 out:
13446 if (wwid_str)
13447 kmem_free(wwid_str, SCSI_MAXNAMELEN);
13448 if (lun_str)
13449 kmem_free(lun_str, SCSI_MAXNAMELEN);
13450
13451 return (rc);
13452 }
13453
13454 /*
13455 * mptsas_parse_smp_name() is to parse sas wwn string
13456 * which format is "wWWN"
13457 */
13458 static int
13459 mptsas_parse_smp_name(char *name, uint64_t *wwn)
13460 {
13461 char *ptr = name;
13462
13463 if (*ptr != 'w') {
13464 return (DDI_FAILURE);
13465 }
13466
13467 ptr++;
13468 if (scsi_wwnstr_to_wwn(ptr, wwn)) {
13469 return (DDI_FAILURE);
13470 }
13471 return (DDI_SUCCESS);
13472 }
13473
13474 static int
13475 mptsas_bus_config(dev_info_t *pdip, uint_t flag,
13476 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
13477 {
13478 int ret = NDI_FAILURE;
13479 int circ = 0;
13480 int circ1 = 0;
13481 mptsas_t *mpt;
13482 char *ptr = NULL;
13483 char *devnm = NULL;
13484 uint64_t wwid = 0;
13485 uint8_t phy = 0xFF;
13486 int lun = 0;
13487 uint_t mflags = flag;
13488 int bconfig = TRUE;
13489
13490 if (scsi_hba_iport_unit_address(pdip) == 0) {
13491 return (DDI_FAILURE);
13492 }
13493
13494 mpt = DIP2MPT(pdip);
13495 if (!mpt) {
13496 return (DDI_FAILURE);
13497 }
13498 /*
13499 * Hold the nexus across the bus_config
13500 */
13501 ndi_devi_enter(scsi_vhci_dip, &circ);
13502 ndi_devi_enter(pdip, &circ1);
13503 switch (op) {
13504 case BUS_CONFIG_ONE:
13505 /* parse wwid/target name out of name given */
13506 if ((ptr = strchr((char *)arg, '@')) == NULL) {
13507 ret = NDI_FAILURE;
13508 break;
13509 }
13510 ptr++;
13511 if (strncmp((char *)arg, "smp", 3) == 0) {
13512 /*
13513 * This is a SMP target device
13514 */
13515 ret = mptsas_parse_smp_name(ptr, &wwid);
13516 if (ret != DDI_SUCCESS) {
13517 ret = NDI_FAILURE;
13518 break;
13519 }
13520 ret = mptsas_config_smp(pdip, wwid, childp);
13521 } else if ((ptr[0] == 'w') || (ptr[0] == 'p')) {
13522 /*
13523 * OBP could pass down a non-canonical form
13524 * bootpath without LUN part when LUN is 0.
13525 * So driver need adjust the string.
13526 */
13527 if (strchr(ptr, ',') == NULL) {
13528 devnm = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13529 (void) sprintf(devnm, "%s,0", (char *)arg);
13530 ptr = strchr(devnm, '@');
13531 ptr++;
13532 }
13533
13534 /*
13535 * The device path is wWWID format and the device
13536 * is not SMP target device.
13537 */
13538 ret = mptsas_parse_address(ptr, &wwid, &phy, &lun);
13539 if (ret != DDI_SUCCESS) {
13540 ret = NDI_FAILURE;
13541 break;
13542 }
13543 *childp = NULL;
13544 if (ptr[0] == 'w') {
13545 ret = mptsas_config_one_addr(pdip, wwid,
13546 lun, childp);
13547 } else if (ptr[0] == 'p') {
13548 ret = mptsas_config_one_phy(pdip, phy, lun,
13549 childp);
13550 }
13551
13552 /*
13553 * If this is CD/DVD device in OBP path, the
13554 * ndi_busop_bus_config can be skipped as config one
13555 * operation is done above.
13556 */
13557 if ((ret == NDI_SUCCESS) && (*childp != NULL) &&
13558 (strcmp(ddi_node_name(*childp), "cdrom") == 0) &&
13559 (strncmp((char *)arg, "disk", 4) == 0)) {
13560 bconfig = FALSE;
13561 ndi_hold_devi(*childp);
13562 }
13563 } else {
13564 ret = NDI_FAILURE;
13565 break;
13566 }
13567
13568 /*
13569 * DDI group instructed us to use this flag.
13570 */
13571 mflags |= NDI_MDI_FALLBACK;
13572 break;
13573 case BUS_CONFIG_DRIVER:
13574 case BUS_CONFIG_ALL:
13575 mptsas_config_all(pdip);
13576 ret = NDI_SUCCESS;
13577 break;
13578 }
13579
13580 if ((ret == NDI_SUCCESS) && bconfig) {
13581 ret = ndi_busop_bus_config(pdip, mflags, op,
13582 (devnm == NULL) ? arg : devnm, childp, 0);
13583 }
13584
13585 ndi_devi_exit(pdip, circ1);
13586 ndi_devi_exit(scsi_vhci_dip, circ);
13587 if (devnm != NULL)
13588 kmem_free(devnm, SCSI_MAXNAMELEN);
13589 return (ret);
13590 }
13591
13592 static int
13593 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
13594 mptsas_target_t *ptgt)
13595 {
13596 int rval = DDI_FAILURE;
13597 struct scsi_inquiry *sd_inq = NULL;
13598 mptsas_t *mpt = DIP2MPT(pdip);
13599
13600 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
13601
13602 rval = mptsas_inquiry(mpt, ptgt, lun, 0, (uchar_t *)sd_inq,
13603 SUN_INQSIZE, 0, (uchar_t)0);
13604
13605 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
13606 rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
13607 } else {
13608 rval = DDI_FAILURE;
13609 }
13610
13611 kmem_free(sd_inq, SUN_INQSIZE);
13612 return (rval);
13613 }
13614
13615 static int
13616 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
13617 dev_info_t **lundip)
13618 {
13619 int rval;
13620 mptsas_t *mpt = DIP2MPT(pdip);
13621 int phymask;
13622 mptsas_target_t *ptgt = NULL;
13623
13624 /*
13625 * Get the physical port associated to the iport
13626 */
13627 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13628 "phymask", 0);
13629
13630 ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
13631 if (ptgt == NULL) {
13632 /*
13633 * didn't match any device by searching
13634 */
13635 return (DDI_FAILURE);
13636 }
13637 /*
13638 * If the LUN already exists and the status is online,
13639 * we just return the pointer to dev_info_t directly.
13640 * For the mdi_pathinfo node, we'll handle it in
13641 * mptsas_create_virt_lun()
13642 * TODO should be also in mptsas_handle_dr
13643 */
13644
13645 *lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
13646 if (*lundip != NULL) {
13647 /*
13648 * TODO Another senario is, we hotplug the same disk
13649 * on the same slot, the devhdl changed, is this
13650 * possible?
13651 * tgt_private->t_private != ptgt
13652 */
13653 if (sasaddr != ptgt->m_sas_wwn) {
13654 /*
13655 * The device has changed although the devhdl is the
13656 * same (Enclosure mapping mode, change drive on the
13657 * same slot)
13658 */
13659 return (DDI_FAILURE);
13660 }
13661 return (DDI_SUCCESS);
13662 }
13663
13664 if (phymask == 0) {
13665 /*
13666 * Configure IR volume
13667 */
13668 rval = mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
13669 return (rval);
13670 }
13671 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
13672
13673 return (rval);
13674 }
13675
13676 static int
13677 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
13678 dev_info_t **lundip)
13679 {
13680 int rval;
13681 mptsas_t *mpt = DIP2MPT(pdip);
13682 int phymask;
13683 mptsas_target_t *ptgt = NULL;
13684
13685 /*
13686 * Get the physical port associated to the iport
13687 */
13688 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13689 "phymask", 0);
13690
13691 ptgt = mptsas_phy_to_tgt(mpt, phymask, phy);
13692 if (ptgt == NULL) {
13693 /*
13694 * didn't match any device by searching
13695 */
13696 return (DDI_FAILURE);
13697 }
13698
13699 /*
13700 * If the LUN already exists and the status is online,
13701 * we just return the pointer to dev_info_t directly.
13702 * For the mdi_pathinfo node, we'll handle it in
13703 * mptsas_create_virt_lun().
13704 */
13705
13706 *lundip = mptsas_find_child_phy(pdip, phy);
13707 if (*lundip != NULL) {
13708 return (DDI_SUCCESS);
13709 }
13710
13711 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
13712
13713 return (rval);
13714 }
13715
13716 static int
13717 mptsas_retrieve_lundata(int lun_cnt, uint8_t *buf, uint16_t *lun_num,
13718 uint8_t *lun_addr_type)
13719 {
13720 uint32_t lun_idx = 0;
13721
13722 ASSERT(lun_num != NULL);
13723 ASSERT(lun_addr_type != NULL);
13724
13725 lun_idx = (lun_cnt + 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
13726 /* determine report luns addressing type */
13727 switch (buf[lun_idx] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) {
13728 /*
13729 * Vendors in the field have been found to be concatenating
13730 * bus/target/lun to equal the complete lun value instead
13731 * of switching to flat space addressing
13732 */
13733 /* 00b - peripheral device addressing method */
13734 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL:
13735 /* FALLTHRU */
13736 /* 10b - logical unit addressing method */
13737 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT:
13738 /* FALLTHRU */
13739 /* 01b - flat space addressing method */
13740 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE:
13741 /* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
13742 *lun_addr_type = (buf[lun_idx] &
13743 MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) >> 6;
13744 *lun_num = (buf[lun_idx] & 0x3F) << 8;
13745 *lun_num |= buf[lun_idx + 1];
13746 return (DDI_SUCCESS);
13747 default:
13748 return (DDI_FAILURE);
13749 }
13750 }
13751
13752 static int
13753 mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt)
13754 {
13755 struct buf *repluns_bp = NULL;
13756 struct scsi_address ap;
13757 uchar_t cdb[CDB_GROUP5];
13758 int ret = DDI_FAILURE;
13759 int retry = 0;
13760 int lun_list_len = 0;
13761 uint16_t lun_num = 0;
13762 uint8_t lun_addr_type = 0;
13763 uint32_t lun_cnt = 0;
13764 uint32_t lun_total = 0;
13765 dev_info_t *cdip = NULL;
13766 uint16_t *saved_repluns = NULL;
13767 char *buffer = NULL;
13768 int buf_len = 128;
13769 mptsas_t *mpt = DIP2MPT(pdip);
13770 uint64_t sas_wwn = 0;
13771 uint8_t phy = 0xFF;
13772 uint32_t dev_info = 0;
13773
13774 mutex_enter(&mpt->m_mutex);
13775 sas_wwn = ptgt->m_sas_wwn;
13776 phy = ptgt->m_phynum;
13777 dev_info = ptgt->m_deviceinfo;
13778 mutex_exit(&mpt->m_mutex);
13779
13780 if (sas_wwn == 0) {
13781 /*
13782 * It's a SATA without Device Name
13783 * So don't try multi-LUNs
13784 */
13785 if (mptsas_find_child_phy(pdip, phy)) {
13786 return (DDI_SUCCESS);
13787 } else {
13788 /*
13789 * need configure and create node
13790 */
13791 return (DDI_FAILURE);
13792 }
13793 }
13794
13795 /*
13796 * WWN (SAS address or Device Name exist)
13797 */
13798 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13799 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
13800 /*
13801 * SATA device with Device Name
13802 * So don't try multi-LUNs
13803 */
13804 if (mptsas_find_child_addr(pdip, sas_wwn, 0)) {
13805 return (DDI_SUCCESS);
13806 } else {
13807 return (DDI_FAILURE);
13808 }
13809 }
13810
13811 do {
13812 ap.a_target = MPTSAS_INVALID_DEVHDL;
13813 ap.a_lun = 0;
13814 ap.a_hba_tran = mpt->m_tran;
13815 repluns_bp = scsi_alloc_consistent_buf(&ap,
13816 (struct buf *)NULL, buf_len, B_READ, NULL_FUNC, NULL);
13817 if (repluns_bp == NULL) {
13818 retry++;
13819 continue;
13820 }
13821 bzero(cdb, CDB_GROUP5);
13822 cdb[0] = SCMD_REPORT_LUNS;
13823 cdb[6] = (buf_len & 0xff000000) >> 24;
13824 cdb[7] = (buf_len & 0x00ff0000) >> 16;
13825 cdb[8] = (buf_len & 0x0000ff00) >> 8;
13826 cdb[9] = (buf_len & 0x000000ff);
13827
13828 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP5,
13829 repluns_bp, NULL);
13830 if (ret != DDI_SUCCESS) {
13831 scsi_free_consistent_buf(repluns_bp);
13832 retry++;
13833 continue;
13834 }
13835 lun_list_len = BE_32(*(int *)((void *)(
13836 repluns_bp->b_un.b_addr)));
13837 if (buf_len >= lun_list_len + 8) {
13838 ret = DDI_SUCCESS;
13839 break;
13840 }
13841 scsi_free_consistent_buf(repluns_bp);
13842 buf_len = lun_list_len + 8;
13843
13844 } while (retry < 3);
13845
13846 if (ret != DDI_SUCCESS)
13847 return (ret);
13848 buffer = (char *)repluns_bp->b_un.b_addr;
13849 /*
13850 * find out the number of luns returned by the SCSI ReportLun call
13851 * and allocate buffer space
13852 */
13853 lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
13854 saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
13855 if (saved_repluns == NULL) {
13856 scsi_free_consistent_buf(repluns_bp);
13857 return (DDI_FAILURE);
13858 }
13859 for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
13860 if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
13861 &lun_num, &lun_addr_type) != DDI_SUCCESS) {
13862 continue;
13863 }
13864 saved_repluns[lun_cnt] = lun_num;
13865 if (cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num))
13866 ret = DDI_SUCCESS;
13867 else
13868 ret = mptsas_probe_lun(pdip, lun_num, &cdip,
13869 ptgt);
13870 if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
13871 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
13872 MPTSAS_DEV_GONE);
13873 }
13874 }
13875 mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
13876 kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
13877 scsi_free_consistent_buf(repluns_bp);
13878 return (DDI_SUCCESS);
13879 }
13880
13881 static int
13882 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
13883 {
13884 int rval = DDI_FAILURE;
13885 struct scsi_inquiry *sd_inq = NULL;
13886 mptsas_t *mpt = DIP2MPT(pdip);
13887 mptsas_target_t *ptgt = NULL;
13888
13889 mutex_enter(&mpt->m_mutex);
13890 ptgt = mptsas_search_by_devhdl(&mpt->m_active->m_tgttbl, target);
13891 mutex_exit(&mpt->m_mutex);
13892 if (ptgt == NULL) {
13893 mptsas_log(mpt, CE_WARN, "Volume with VolDevHandle of 0x%x "
13894 "not found.", target);
13895 return (rval);
13896 }
13897
13898 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
13899 rval = mptsas_inquiry(mpt, ptgt, 0, 0, (uchar_t *)sd_inq,
13900 SUN_INQSIZE, 0, (uchar_t)0);
13901
13902 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
13903 rval = mptsas_create_phys_lun(pdip, sd_inq, NULL, dip, ptgt,
13904 0);
13905 } else {
13906 rval = DDI_FAILURE;
13907 }
13908
13909 kmem_free(sd_inq, SUN_INQSIZE);
13910 return (rval);
13911 }
13912
13913 /*
13914 * configure all RAID volumes for virtual iport
13915 */
13916 static void
13917 mptsas_config_all_viport(dev_info_t *pdip)
13918 {
13919 mptsas_t *mpt = DIP2MPT(pdip);
13920 int config, vol;
13921 int target;
13922 dev_info_t *lundip = NULL;
13923 mptsas_slots_t *slots = mpt->m_active;
13924
13925 /*
13926 * Get latest RAID info and search for any Volume DevHandles. If any
13927 * are found, configure the volume.
13928 */
13929 mutex_enter(&mpt->m_mutex);
13930 for (config = 0; config < slots->m_num_raid_configs; config++) {
13931 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
13932 if (slots->m_raidconfig[config].m_raidvol[vol].m_israid
13933 == 1) {
13934 target = slots->m_raidconfig[config].
13935 m_raidvol[vol].m_raidhandle;
13936 mutex_exit(&mpt->m_mutex);
13937 (void) mptsas_config_raid(pdip, target,
13938 &lundip);
13939 mutex_enter(&mpt->m_mutex);
13940 }
13941 }
13942 }
13943 mutex_exit(&mpt->m_mutex);
13944 }
13945
13946 static void
13947 mptsas_offline_missed_luns(dev_info_t *pdip, uint16_t *repluns,
13948 int lun_cnt, mptsas_target_t *ptgt)
13949 {
13950 dev_info_t *child = NULL, *savechild = NULL;
13951 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
13952 uint64_t sas_wwn, wwid;
13953 uint8_t phy;
13954 int lun;
13955 int i;
13956 int find;
13957 char *addr;
13958 char *nodename;
13959 mptsas_t *mpt = DIP2MPT(pdip);
13960
13961 mutex_enter(&mpt->m_mutex);
13962 wwid = ptgt->m_sas_wwn;
13963 mutex_exit(&mpt->m_mutex);
13964
13965 child = ddi_get_child(pdip);
13966 while (child) {
13967 find = 0;
13968 savechild = child;
13969 child = ddi_get_next_sibling(child);
13970
13971 nodename = ddi_node_name(savechild);
13972 if (strcmp(nodename, "smp") == 0) {
13973 continue;
13974 }
13975
13976 addr = ddi_get_name_addr(savechild);
13977 if (addr == NULL) {
13978 continue;
13979 }
13980
13981 if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
13982 DDI_SUCCESS) {
13983 continue;
13984 }
13985
13986 if (wwid == sas_wwn) {
13987 for (i = 0; i < lun_cnt; i++) {
13988 if (repluns[i] == lun) {
13989 find = 1;
13990 break;
13991 }
13992 }
13993 } else {
13994 continue;
13995 }
13996 if (find == 0) {
13997 /*
13998 * The lun has not been there already
13999 */
14000 (void) mptsas_offline_lun(pdip, savechild, NULL,
14001 NDI_DEVI_REMOVE);
14002 }
14003 }
14004
14005 pip = mdi_get_next_client_path(pdip, NULL);
14006 while (pip) {
14007 find = 0;
14008 savepip = pip;
14009 addr = MDI_PI(pip)->pi_addr;
14010
14011 pip = mdi_get_next_client_path(pdip, pip);
14012
14013 if (addr == NULL) {
14014 continue;
14015 }
14016
14017 if (mptsas_parse_address(addr, &sas_wwn, &phy,
14018 &lun) != DDI_SUCCESS) {
14019 continue;
14020 }
14021
14022 if (sas_wwn == wwid) {
14023 for (i = 0; i < lun_cnt; i++) {
14024 if (repluns[i] == lun) {
14025 find = 1;
14026 break;
14027 }
14028 }
14029 } else {
14030 continue;
14031 }
14032
14033 if (find == 0) {
14034 /*
14035 * The lun has not been there already
14036 */
14037 (void) mptsas_offline_lun(pdip, NULL, savepip,
14038 NDI_DEVI_REMOVE);
14039 }
14040 }
14041 }
14042
14043 void
14044 mptsas_update_hashtab(struct mptsas *mpt)
14045 {
14046 uint32_t page_address;
14047 int rval = 0;
14048 uint16_t dev_handle;
14049 mptsas_target_t *ptgt = NULL;
14050 mptsas_smp_t smp_node;
14051
14052 /*
14053 * Get latest RAID info.
14054 */
14055 (void) mptsas_get_raid_info(mpt);
14056
14057 dev_handle = mpt->m_smp_devhdl;
14058 for (; mpt->m_done_traverse_smp == 0; ) {
14059 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
14060 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)dev_handle;
14061 if (mptsas_get_sas_expander_page0(mpt, page_address, &smp_node)
14062 != DDI_SUCCESS) {
14063 break;
14064 }
14065 mpt->m_smp_devhdl = dev_handle = smp_node.m_devhdl;
14066 (void) mptsas_smp_alloc(&mpt->m_active->m_smptbl, &smp_node);
14067 }
14068
14069 /*
14070 * Config target devices
14071 */
14072 dev_handle = mpt->m_dev_handle;
14073
14074 /*
14075 * Do loop to get sas device page 0 by GetNextHandle till the
14076 * the last handle. If the sas device is a SATA/SSP target,
14077 * we try to config it.
14078 */
14079 for (; mpt->m_done_traverse_dev == 0; ) {
14080 ptgt = NULL;
14081 page_address =
14082 (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
14083 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14084 (uint32_t)dev_handle;
14085 rval = mptsas_get_target_device_info(mpt, page_address,
14086 &dev_handle, &ptgt);
14087 if ((rval == DEV_INFO_FAIL_PAGE0) ||
14088 (rval == DEV_INFO_FAIL_ALLOC)) {
14089 break;
14090 }
14091
14092 mpt->m_dev_handle = dev_handle;
14093 }
14094
14095 }
14096
14097 void
14098 mptsas_invalid_hashtab(mptsas_hash_table_t *hashtab)
14099 {
14100 mptsas_hash_data_t *data;
14101 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST);
14102 while (data != NULL) {
14103 data->devhdl = MPTSAS_INVALID_DEVHDL;
14104 data->device_info = 0;
14105 /*
14106 * For tgttbl, clear dr_flag.
14107 */
14108 data->dr_flag = MPTSAS_DR_INACTIVE;
14109 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT);
14110 }
14111 }
14112
14113 void
14114 mptsas_update_driver_data(struct mptsas *mpt)
14115 {
14116 /*
14117 * TODO after hard reset, update the driver data structures
14118 * 1. update port/phymask mapping table mpt->m_phy_info
14119 * 2. invalid all the entries in hash table
14120 * m_devhdl = 0xffff and m_deviceinfo = 0
14121 * 3. call sas_device_page/expander_page to update hash table
14122 */
14123 mptsas_update_phymask(mpt);
14124 /*
14125 * Invalid the existing entries
14126 */
14127 mptsas_invalid_hashtab(&mpt->m_active->m_tgttbl);
14128 mptsas_invalid_hashtab(&mpt->m_active->m_smptbl);
14129 mpt->m_done_traverse_dev = 0;
14130 mpt->m_done_traverse_smp = 0;
14131 mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
14132 mptsas_update_hashtab(mpt);
14133 }
14134
14135 static void
14136 mptsas_config_all(dev_info_t *pdip)
14137 {
14138 dev_info_t *smpdip = NULL;
14139 mptsas_t *mpt = DIP2MPT(pdip);
14140 int phymask = 0;
14141 mptsas_phymask_t phy_mask;
14142 mptsas_target_t *ptgt = NULL;
14143 mptsas_smp_t *psmp;
14144
14145 /*
14146 * Get the phymask associated to the iport
14147 */
14148 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14149 "phymask", 0);
14150
14151 /*
14152 * Enumerate RAID volumes here (phymask == 0).
14153 */
14154 if (phymask == 0) {
14155 mptsas_config_all_viport(pdip);
14156 return;
14157 }
14158
14159 mutex_enter(&mpt->m_mutex);
14160
14161 if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp) {
14162 mptsas_update_hashtab(mpt);
14163 }
14164
14165 psmp = (mptsas_smp_t *)mptsas_hash_traverse(&mpt->m_active->m_smptbl,
14166 MPTSAS_HASH_FIRST);
14167 while (psmp != NULL) {
14168 phy_mask = psmp->m_phymask;
14169 if (phy_mask == phymask) {
14170 smpdip = NULL;
14171 mutex_exit(&mpt->m_mutex);
14172 (void) mptsas_online_smp(pdip, psmp, &smpdip);
14173 mutex_enter(&mpt->m_mutex);
14174 }
14175 psmp = (mptsas_smp_t *)mptsas_hash_traverse(
14176 &mpt->m_active->m_smptbl, MPTSAS_HASH_NEXT);
14177 }
14178
14179 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
14180 MPTSAS_HASH_FIRST);
14181 while (ptgt != NULL) {
14182 phy_mask = ptgt->m_phymask;
14183 if (phy_mask == phymask) {
14184 mutex_exit(&mpt->m_mutex);
14185 (void) mptsas_config_target(pdip, ptgt);
14186 mutex_enter(&mpt->m_mutex);
14187 }
14188
14189 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
14190 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
14191 }
14192 mutex_exit(&mpt->m_mutex);
14193 }
14194
14195 static int
14196 mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt)
14197 {
14198 int rval = DDI_FAILURE;
14199 dev_info_t *tdip;
14200
14201 rval = mptsas_config_luns(pdip, ptgt);
14202 if (rval != DDI_SUCCESS) {
14203 /*
14204 * The return value means the SCMD_REPORT_LUNS
14205 * did not execute successfully. The target maybe
14206 * doesn't support such command.
14207 */
14208 rval = mptsas_probe_lun(pdip, 0, &tdip, ptgt);
14209 }
14210 return (rval);
14211 }
14212
14213 /*
14214 * Return fail if not all the childs/paths are freed.
14215 * if there is any path under the HBA, the return value will be always fail
14216 * because we didn't call mdi_pi_free for path
14217 */
14218 static int
14219 mptsas_offline_target(dev_info_t *pdip, char *name)
14220 {
14221 dev_info_t *child = NULL, *prechild = NULL;
14222 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
14223 int tmp_rval, rval = DDI_SUCCESS;
14224 char *addr, *cp;
14225 size_t s;
14226 mptsas_t *mpt = DIP2MPT(pdip);
14227
14228 child = ddi_get_child(pdip);
14229 while (child) {
14230 addr = ddi_get_name_addr(child);
14231 prechild = child;
14232 child = ddi_get_next_sibling(child);
14233
14234 if (addr == NULL) {
14235 continue;
14236 }
14237 if ((cp = strchr(addr, ',')) == NULL) {
14238 continue;
14239 }
14240
14241 s = (uintptr_t)cp - (uintptr_t)addr;
14242
14243 if (strncmp(addr, name, s) != 0) {
14244 continue;
14245 }
14246
14247 tmp_rval = mptsas_offline_lun(pdip, prechild, NULL,
14248 NDI_DEVI_REMOVE);
14249 if (tmp_rval != DDI_SUCCESS) {
14250 rval = DDI_FAILURE;
14251 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14252 prechild, MPTSAS_DEV_GONE) !=
14253 DDI_PROP_SUCCESS) {
14254 mptsas_log(mpt, CE_WARN, "mptsas driver "
14255 "unable to create property for "
14256 "SAS %s (MPTSAS_DEV_GONE)", addr);
14257 }
14258 }
14259 }
14260
14261 pip = mdi_get_next_client_path(pdip, NULL);
14262 while (pip) {
14263 addr = MDI_PI(pip)->pi_addr;
14264 savepip = pip;
14265 pip = mdi_get_next_client_path(pdip, pip);
14266 if (addr == NULL) {
14267 continue;
14268 }
14269
14270 if ((cp = strchr(addr, ',')) == NULL) {
14271 continue;
14272 }
14273
14274 s = (uintptr_t)cp - (uintptr_t)addr;
14275
14276 if (strncmp(addr, name, s) != 0) {
14277 continue;
14278 }
14279
14280 (void) mptsas_offline_lun(pdip, NULL, savepip,
14281 NDI_DEVI_REMOVE);
14282 /*
14283 * driver will not invoke mdi_pi_free, so path will not
14284 * be freed forever, return DDI_FAILURE.
14285 */
14286 rval = DDI_FAILURE;
14287 }
14288 return (rval);
14289 }
14290
14291 static int
14292 mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
14293 mdi_pathinfo_t *rpip, uint_t flags)
14294 {
14295 int rval = DDI_FAILURE;
14296 char *devname;
14297 dev_info_t *cdip, *parent;
14298
14299 if (rpip != NULL) {
14300 parent = scsi_vhci_dip;
14301 cdip = mdi_pi_get_client(rpip);
14302 } else if (rdip != NULL) {
14303 parent = pdip;
14304 cdip = rdip;
14305 } else {
14306 return (DDI_FAILURE);
14307 }
14308
14309 /*
14310 * Make sure node is attached otherwise
14311 * it won't have related cache nodes to
14312 * clean up. i_ddi_devi_attached is
14313 * similiar to i_ddi_node_state(cdip) >=
14314 * DS_ATTACHED.
14315 */
14316 if (i_ddi_devi_attached(cdip)) {
14317
14318 /* Get full devname */
14319 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
14320 (void) ddi_deviname(cdip, devname);
14321 /* Clean cache */
14322 (void) devfs_clean(parent, devname + 1,
14323 DV_CLEAN_FORCE);
14324 kmem_free(devname, MAXNAMELEN + 1);
14325 }
14326 if (rpip != NULL) {
14327 if (MDI_PI_IS_OFFLINE(rpip)) {
14328 rval = DDI_SUCCESS;
14329 } else {
14330 rval = mdi_pi_offline(rpip, 0);
14331 }
14332 } else {
14333 rval = ndi_devi_offline(cdip, flags);
14334 }
14335
14336 return (rval);
14337 }
14338
14339 static dev_info_t *
14340 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
14341 {
14342 dev_info_t *child = NULL;
14343 char *smp_wwn = NULL;
14344
14345 child = ddi_get_child(parent);
14346 while (child) {
14347 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
14348 DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
14349 != DDI_SUCCESS) {
14350 child = ddi_get_next_sibling(child);
14351 continue;
14352 }
14353
14354 if (strcmp(smp_wwn, str_wwn) == 0) {
14355 ddi_prop_free(smp_wwn);
14356 break;
14357 }
14358 child = ddi_get_next_sibling(child);
14359 ddi_prop_free(smp_wwn);
14360 }
14361 return (child);
14362 }
14363
14364 static int
14365 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, uint_t flags)
14366 {
14367 int rval = DDI_FAILURE;
14368 char *devname;
14369 char wwn_str[MPTSAS_WWN_STRLEN];
14370 dev_info_t *cdip;
14371
14372 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_sasaddr);
14373
14374 cdip = mptsas_find_smp_child(pdip, wwn_str);
14375
14376 if (cdip == NULL)
14377 return (DDI_SUCCESS);
14378
14379 /*
14380 * Make sure node is attached otherwise
14381 * it won't have related cache nodes to
14382 * clean up. i_ddi_devi_attached is
14383 * similiar to i_ddi_node_state(cdip) >=
14384 * DS_ATTACHED.
14385 */
14386 if (i_ddi_devi_attached(cdip)) {
14387
14388 /* Get full devname */
14389 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
14390 (void) ddi_deviname(cdip, devname);
14391 /* Clean cache */
14392 (void) devfs_clean(pdip, devname + 1,
14393 DV_CLEAN_FORCE);
14394 kmem_free(devname, MAXNAMELEN + 1);
14395 }
14396
14397 rval = ndi_devi_offline(cdip, flags);
14398
14399 return (rval);
14400 }
14401
14402 static dev_info_t *
14403 mptsas_find_child(dev_info_t *pdip, char *name)
14404 {
14405 dev_info_t *child = NULL;
14406 char *rname = NULL;
14407 int rval = DDI_FAILURE;
14408
14409 rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14410
14411 child = ddi_get_child(pdip);
14412 while (child) {
14413 rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
14414 if (rval != DDI_SUCCESS) {
14415 child = ddi_get_next_sibling(child);
14416 bzero(rname, SCSI_MAXNAMELEN);
14417 continue;
14418 }
14419
14420 if (strcmp(rname, name) == 0) {
14421 break;
14422 }
14423 child = ddi_get_next_sibling(child);
14424 bzero(rname, SCSI_MAXNAMELEN);
14425 }
14426
14427 kmem_free(rname, SCSI_MAXNAMELEN);
14428
14429 return (child);
14430 }
14431
14432
14433 static dev_info_t *
14434 mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, int lun)
14435 {
14436 dev_info_t *child = NULL;
14437 char *name = NULL;
14438 char *addr = NULL;
14439
14440 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14441 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14442 (void) sprintf(name, "%016"PRIx64, sasaddr);
14443 (void) sprintf(addr, "w%s,%x", name, lun);
14444 child = mptsas_find_child(pdip, addr);
14445 kmem_free(name, SCSI_MAXNAMELEN);
14446 kmem_free(addr, SCSI_MAXNAMELEN);
14447 return (child);
14448 }
14449
14450 static dev_info_t *
14451 mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy)
14452 {
14453 dev_info_t *child;
14454 char *addr;
14455
14456 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14457 (void) sprintf(addr, "p%x,0", phy);
14458 child = mptsas_find_child(pdip, addr);
14459 kmem_free(addr, SCSI_MAXNAMELEN);
14460 return (child);
14461 }
14462
14463 static mdi_pathinfo_t *
14464 mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy)
14465 {
14466 mdi_pathinfo_t *path;
14467 char *addr = NULL;
14468
14469 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14470 (void) sprintf(addr, "p%x,0", phy);
14471 path = mdi_pi_find(pdip, NULL, addr);
14472 kmem_free(addr, SCSI_MAXNAMELEN);
14473 return (path);
14474 }
14475
14476 static mdi_pathinfo_t *
14477 mptsas_find_path_addr(dev_info_t *parent, uint64_t sasaddr, int lun)
14478 {
14479 mdi_pathinfo_t *path;
14480 char *name = NULL;
14481 char *addr = NULL;
14482
14483 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14484 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14485 (void) sprintf(name, "%016"PRIx64, sasaddr);
14486 (void) sprintf(addr, "w%s,%x", name, lun);
14487 path = mdi_pi_find(parent, NULL, addr);
14488 kmem_free(name, SCSI_MAXNAMELEN);
14489 kmem_free(addr, SCSI_MAXNAMELEN);
14490
14491 return (path);
14492 }
14493
14494 static int
14495 mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
14496 dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
14497 {
14498 int i = 0;
14499 uchar_t *inq83 = NULL;
14500 int inq83_len1 = 0xFF;
14501 int inq83_len = 0;
14502 int rval = DDI_FAILURE;
14503 ddi_devid_t devid;
14504 char *guid = NULL;
14505 int target = ptgt->m_devhdl;
14506 mdi_pathinfo_t *pip = NULL;
14507 mptsas_t *mpt = DIP2MPT(pdip);
14508
14509 /*
14510 * For DVD/CD ROM and tape devices and optical
14511 * devices, we won't try to enumerate them under
14512 * scsi_vhci, so no need to try page83
14513 */
14514 if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
14515 sd_inq->inq_dtype == DTYPE_OPTICAL ||
14516 sd_inq->inq_dtype == DTYPE_ESI))
14517 goto create_lun;
14518
14519 /*
14520 * The LCA returns good SCSI status, but corrupt page 83 data the first
14521 * time it is queried. The solution is to keep trying to request page83
14522 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
14523 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
14524 * give up to get VPD page at this stage and fail the enumeration.
14525 */
14526
14527 inq83 = kmem_zalloc(inq83_len1, KM_SLEEP);
14528
14529 for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
14530 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
14531 inq83_len1, &inq83_len, 1);
14532 if (rval != 0) {
14533 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
14534 "0x83 for target:%x, lun:%x failed!", target, lun);
14535 if (mptsas_physical_bind_failed_page_83 != B_FALSE)
14536 goto create_lun;
14537 goto out;
14538 }
14539 /*
14540 * create DEVID from inquiry data
14541 */
14542 if ((rval = ddi_devid_scsi_encode(
14543 DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
14544 sizeof (struct scsi_inquiry), NULL, 0, inq83,
14545 (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
14546 /*
14547 * extract GUID from DEVID
14548 */
14549 guid = ddi_devid_to_guid(devid);
14550
14551 /*
14552 * Do not enable MPXIO if the strlen(guid) is greater
14553 * than MPTSAS_MAX_GUID_LEN, this constrain would be
14554 * handled by framework later.
14555 */
14556 if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
14557 ddi_devid_free_guid(guid);
14558 guid = NULL;
14559 if (mpt->m_mpxio_enable == TRUE) {
14560 mptsas_log(mpt, CE_NOTE, "!Target:%x, "
14561 "lun:%x doesn't have a valid GUID, "
14562 "multipathing for this drive is "
14563 "not enabled", target, lun);
14564 }
14565 }
14566
14567 /*
14568 * devid no longer needed
14569 */
14570 ddi_devid_free(devid);
14571 break;
14572 } else if (rval == DDI_NOT_WELL_FORMED) {
14573 /*
14574 * return value of ddi_devid_scsi_encode equal to
14575 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
14576 * to retry inquiry page 0x83 and get GUID.
14577 */
14578 NDBG20(("Not well formed devid, retry..."));
14579 delay(1 * drv_usectohz(1000000));
14580 continue;
14581 } else {
14582 mptsas_log(mpt, CE_WARN, "!Encode devid failed for "
14583 "path target:%x, lun:%x", target, lun);
14584 rval = DDI_FAILURE;
14585 goto create_lun;
14586 }
14587 }
14588
14589 if (i == mptsas_inq83_retry_timeout) {
14590 mptsas_log(mpt, CE_WARN, "!Repeated page83 requests timeout "
14591 "for path target:%x, lun:%x", target, lun);
14592 }
14593
14594 rval = DDI_FAILURE;
14595
14596 create_lun:
14597 if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
14598 rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
14599 ptgt, lun);
14600 }
14601 if (rval != DDI_SUCCESS) {
14602 rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
14603 ptgt, lun);
14604
14605 }
14606 out:
14607 if (guid != NULL) {
14608 /*
14609 * guid no longer needed
14610 */
14611 ddi_devid_free_guid(guid);
14612 }
14613 if (inq83 != NULL)
14614 kmem_free(inq83, inq83_len1);
14615 return (rval);
14616 }
14617
14618 static int
14619 mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid,
14620 dev_info_t **lun_dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, int lun)
14621 {
14622 int target;
14623 char *nodename = NULL;
14624 char **compatible = NULL;
14625 int ncompatible = 0;
14626 int mdi_rtn = MDI_FAILURE;
14627 int rval = DDI_FAILURE;
14628 char *old_guid = NULL;
14629 mptsas_t *mpt = DIP2MPT(pdip);
14630 char *lun_addr = NULL;
14631 char *wwn_str = NULL;
14632 char *attached_wwn_str = NULL;
14633 char *component = NULL;
14634 uint8_t phy = 0xFF;
14635 uint64_t sas_wwn;
14636 int64_t lun64 = 0;
14637 uint32_t devinfo;
14638 uint16_t dev_hdl;
14639 uint16_t pdev_hdl;
14640 uint64_t dev_sas_wwn;
14641 uint64_t pdev_sas_wwn;
14642 uint32_t pdev_info;
14643 uint8_t physport;
14644 uint8_t phy_id;
14645 uint32_t page_address;
14646 uint16_t bay_num, enclosure;
14647 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
14648 uint32_t dev_info;
14649
14650 mutex_enter(&mpt->m_mutex);
14651 target = ptgt->m_devhdl;
14652 sas_wwn = ptgt->m_sas_wwn;
14653 devinfo = ptgt->m_deviceinfo;
14654 phy = ptgt->m_phynum;
14655 mutex_exit(&mpt->m_mutex);
14656
14657 if (sas_wwn) {
14658 *pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
14659 } else {
14660 *pip = mptsas_find_path_phy(pdip, phy);
14661 }
14662
14663 if (*pip != NULL) {
14664 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
14665 ASSERT(*lun_dip != NULL);
14666 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
14667 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
14668 MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
14669 if (strncmp(guid, old_guid, strlen(guid)) == 0) {
14670 /*
14671 * Same path back online again.
14672 */
14673 (void) ddi_prop_free(old_guid);
14674 if ((!MDI_PI_IS_ONLINE(*pip)) &&
14675 (!MDI_PI_IS_STANDBY(*pip)) &&
14676 (ptgt->m_tgt_unconfigured == 0)) {
14677 rval = mdi_pi_online(*pip, 0);
14678 mutex_enter(&mpt->m_mutex);
14679 ptgt->m_led_status = 0;
14680 (void) mptsas_flush_led_status(mpt,
14681 ptgt);
14682 mutex_exit(&mpt->m_mutex);
14683 } else {
14684 rval = DDI_SUCCESS;
14685 }
14686 if (rval != DDI_SUCCESS) {
14687 mptsas_log(mpt, CE_WARN, "path:target: "
14688 "%x, lun:%x online failed!", target,
14689 lun);
14690 *pip = NULL;
14691 *lun_dip = NULL;
14692 }
14693 return (rval);
14694 } else {
14695 /*
14696 * The GUID of the LUN has changed which maybe
14697 * because customer mapped another volume to the
14698 * same LUN.
14699 */
14700 mptsas_log(mpt, CE_WARN, "The GUID of the "
14701 "target:%x, lun:%x was changed, maybe "
14702 "because someone mapped another volume "
14703 "to the same LUN", target, lun);
14704 (void) ddi_prop_free(old_guid);
14705 if (!MDI_PI_IS_OFFLINE(*pip)) {
14706 rval = mdi_pi_offline(*pip, 0);
14707 if (rval != MDI_SUCCESS) {
14708 mptsas_log(mpt, CE_WARN, "path:"
14709 "target:%x, lun:%x offline "
14710 "failed!", target, lun);
14711 *pip = NULL;
14712 *lun_dip = NULL;
14713 return (DDI_FAILURE);
14714 }
14715 }
14716 if (mdi_pi_free(*pip, 0) != MDI_SUCCESS) {
14717 mptsas_log(mpt, CE_WARN, "path:target:"
14718 "%x, lun:%x free failed!", target,
14719 lun);
14720 *pip = NULL;
14721 *lun_dip = NULL;
14722 return (DDI_FAILURE);
14723 }
14724 }
14725 } else {
14726 mptsas_log(mpt, CE_WARN, "Can't get client-guid "
14727 "property for path:target:%x, lun:%x", target, lun);
14728 *pip = NULL;
14729 *lun_dip = NULL;
14730 return (DDI_FAILURE);
14731 }
14732 }
14733 scsi_hba_nodename_compatible_get(inq, NULL,
14734 inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
14735
14736 /*
14737 * if nodename can't be determined then print a message and skip it
14738 */
14739 if (nodename == NULL) {
14740 mptsas_log(mpt, CE_WARN, "mptsas driver found no compatible "
14741 "driver for target%d lun %d dtype:0x%02x", target, lun,
14742 inq->inq_dtype);
14743 return (DDI_FAILURE);
14744 }
14745
14746 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
14747 /* The property is needed by MPAPI */
14748 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
14749
14750 lun_addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14751 if (guid) {
14752 (void) sprintf(lun_addr, "w%s,%x", wwn_str, lun);
14753 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14754 } else {
14755 (void) sprintf(lun_addr, "p%x,%x", phy, lun);
14756 (void) sprintf(wwn_str, "p%x", phy);
14757 }
14758
14759 mdi_rtn = mdi_pi_alloc_compatible(pdip, nodename,
14760 guid, lun_addr, compatible, ncompatible,
14761 0, pip);
14762 if (mdi_rtn == MDI_SUCCESS) {
14763
14764 if (mdi_prop_update_string(*pip, MDI_GUID,
14765 guid) != DDI_SUCCESS) {
14766 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14767 "create prop for target %d lun %d (MDI_GUID)",
14768 target, lun);
14769 mdi_rtn = MDI_FAILURE;
14770 goto virt_create_done;
14771 }
14772
14773 if (mdi_prop_update_int(*pip, LUN_PROP,
14774 lun) != DDI_SUCCESS) {
14775 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14776 "create prop for target %d lun %d (LUN_PROP)",
14777 target, lun);
14778 mdi_rtn = MDI_FAILURE;
14779 goto virt_create_done;
14780 }
14781 lun64 = (int64_t)lun;
14782 if (mdi_prop_update_int64(*pip, LUN64_PROP,
14783 lun64) != DDI_SUCCESS) {
14784 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14785 "create prop for target %d (LUN64_PROP)",
14786 target);
14787 mdi_rtn = MDI_FAILURE;
14788 goto virt_create_done;
14789 }
14790 if (mdi_prop_update_string_array(*pip, "compatible",
14791 compatible, ncompatible) !=
14792 DDI_PROP_SUCCESS) {
14793 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14794 "create prop for target %d lun %d (COMPATIBLE)",
14795 target, lun);
14796 mdi_rtn = MDI_FAILURE;
14797 goto virt_create_done;
14798 }
14799 if (sas_wwn && (mdi_prop_update_string(*pip,
14800 SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS)) {
14801 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14802 "create prop for target %d lun %d "
14803 "(target-port)", target, lun);
14804 mdi_rtn = MDI_FAILURE;
14805 goto virt_create_done;
14806 } else if ((sas_wwn == 0) && (mdi_prop_update_int(*pip,
14807 "sata-phy", phy) != DDI_PROP_SUCCESS)) {
14808 /*
14809 * Direct attached SATA device without DeviceName
14810 */
14811 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14812 "create prop for SAS target %d lun %d "
14813 "(sata-phy)", target, lun);
14814 mdi_rtn = MDI_FAILURE;
14815 goto virt_create_done;
14816 }
14817 mutex_enter(&mpt->m_mutex);
14818
14819 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14820 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14821 (uint32_t)ptgt->m_devhdl;
14822 rval = mptsas_get_sas_device_page0(mpt, page_address,
14823 &dev_hdl, &dev_sas_wwn, &dev_info, &physport,
14824 &phy_id, &pdev_hdl, &bay_num, &enclosure);
14825 if (rval != DDI_SUCCESS) {
14826 mutex_exit(&mpt->m_mutex);
14827 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14828 "parent device for handle %d", page_address);
14829 mdi_rtn = MDI_FAILURE;
14830 goto virt_create_done;
14831 }
14832
14833 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14834 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
14835 rval = mptsas_get_sas_device_page0(mpt, page_address,
14836 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
14837 &phy_id, &pdev_hdl, &bay_num, &enclosure);
14838 if (rval != DDI_SUCCESS) {
14839 mutex_exit(&mpt->m_mutex);
14840 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
14841 "device info for handle %d", page_address);
14842 mdi_rtn = MDI_FAILURE;
14843 goto virt_create_done;
14844 }
14845
14846 mutex_exit(&mpt->m_mutex);
14847
14848 /*
14849 * If this device direct attached to the controller
14850 * set the attached-port to the base wwid
14851 */
14852 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14853 != DEVINFO_DIRECT_ATTACHED) {
14854 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14855 pdev_sas_wwn);
14856 } else {
14857 /*
14858 * Update the iport's attached-port to guid
14859 */
14860 if (sas_wwn == 0) {
14861 (void) sprintf(wwn_str, "p%x", phy);
14862 } else {
14863 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14864 }
14865 if (ddi_prop_update_string(DDI_DEV_T_NONE,
14866 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14867 DDI_PROP_SUCCESS) {
14868 mptsas_log(mpt, CE_WARN,
14869 "mptsas unable to create "
14870 "property for iport target-port"
14871 " %s (sas_wwn)",
14872 wwn_str);
14873 mdi_rtn = MDI_FAILURE;
14874 goto virt_create_done;
14875 }
14876
14877 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14878 mpt->un.m_base_wwid);
14879 }
14880
14881 if (mdi_prop_update_string(*pip,
14882 SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
14883 DDI_PROP_SUCCESS) {
14884 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14885 "property for iport attached-port %s (sas_wwn)",
14886 attached_wwn_str);
14887 mdi_rtn = MDI_FAILURE;
14888 goto virt_create_done;
14889 }
14890
14891
14892 if (inq->inq_dtype == 0) {
14893 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
14894 /*
14895 * set obp path for pathinfo
14896 */
14897 (void) snprintf(component, MAXPATHLEN,
14898 "disk@%s", lun_addr);
14899
14900 if (mdi_pi_pathname_obp_set(*pip, component) !=
14901 DDI_SUCCESS) {
14902 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
14903 "unable to set obp-path for object %s",
14904 component);
14905 mdi_rtn = MDI_FAILURE;
14906 goto virt_create_done;
14907 }
14908 }
14909
14910 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
14911 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14912 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14913 if ((ndi_prop_update_int(DDI_DEV_T_NONE, *lun_dip,
14914 "pm-capable", 1)) !=
14915 DDI_PROP_SUCCESS) {
14916 mptsas_log(mpt, CE_WARN, "mptsas driver"
14917 "failed to create pm-capable "
14918 "property, target %d", target);
14919 mdi_rtn = MDI_FAILURE;
14920 goto virt_create_done;
14921 }
14922 }
14923 /*
14924 * Create the phy-num property
14925 */
14926 if (mdi_prop_update_int(*pip, "phy-num",
14927 ptgt->m_phynum) != DDI_SUCCESS) {
14928 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14929 "create phy-num property for target %d lun %d",
14930 target, lun);
14931 mdi_rtn = MDI_FAILURE;
14932 goto virt_create_done;
14933 }
14934 NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
14935 mdi_rtn = mdi_pi_online(*pip, 0);
14936 if (mdi_rtn == MDI_SUCCESS) {
14937 mutex_enter(&mpt->m_mutex);
14938 ptgt->m_led_status = 0;
14939 if (mptsas_flush_led_status(mpt, ptgt) != DDI_SUCCESS) {
14940 NDBG14(("mptsas: clear LED for slot %x "
14941 "failed", ptgt->m_slot_num));
14942 }
14943 mutex_exit(&mpt->m_mutex);
14944 }
14945 if (mdi_rtn == MDI_NOT_SUPPORTED) {
14946 mdi_rtn = MDI_FAILURE;
14947 }
14948 virt_create_done:
14949 if (*pip && mdi_rtn != MDI_SUCCESS) {
14950 (void) mdi_pi_free(*pip, 0);
14951 *pip = NULL;
14952 *lun_dip = NULL;
14953 }
14954 }
14955
14956 scsi_hba_nodename_compatible_free(nodename, compatible);
14957 if (lun_addr != NULL) {
14958 kmem_free(lun_addr, SCSI_MAXNAMELEN);
14959 }
14960 if (wwn_str != NULL) {
14961 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
14962 }
14963 if (component != NULL) {
14964 kmem_free(component, MAXPATHLEN);
14965 }
14966
14967 return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14968 }
14969
14970 static int
14971 mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq,
14972 char *guid, dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
14973 {
14974 int target;
14975 int rval;
14976 int ndi_rtn = NDI_FAILURE;
14977 uint64_t be_sas_wwn;
14978 char *nodename = NULL;
14979 char **compatible = NULL;
14980 int ncompatible = 0;
14981 int instance = 0;
14982 mptsas_t *mpt = DIP2MPT(pdip);
14983 char *wwn_str = NULL;
14984 char *component = NULL;
14985 char *attached_wwn_str = NULL;
14986 uint8_t phy = 0xFF;
14987 uint64_t sas_wwn;
14988 uint32_t devinfo;
14989 uint16_t dev_hdl;
14990 uint16_t pdev_hdl;
14991 uint64_t pdev_sas_wwn;
14992 uint64_t dev_sas_wwn;
14993 uint32_t pdev_info;
14994 uint8_t physport;
14995 uint8_t phy_id;
14996 uint32_t page_address;
14997 uint16_t bay_num, enclosure;
14998 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
14999 uint32_t dev_info;
15000 int64_t lun64 = 0;
15001
15002 mutex_enter(&mpt->m_mutex);
15003 target = ptgt->m_devhdl;
15004 sas_wwn = ptgt->m_sas_wwn;
15005 devinfo = ptgt->m_deviceinfo;
15006 phy = ptgt->m_phynum;
15007 mutex_exit(&mpt->m_mutex);
15008
15009 /*
15010 * generate compatible property with binding-set "mpt"
15011 */
15012 scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL,
15013 &nodename, &compatible, &ncompatible);
15014
15015 /*
15016 * if nodename can't be determined then print a message and skip it
15017 */
15018 if (nodename == NULL) {
15019 mptsas_log(mpt, CE_WARN, "mptsas found no compatible driver "
15020 "for target %d lun %d", target, lun);
15021 return (DDI_FAILURE);
15022 }
15023
15024 ndi_rtn = ndi_devi_alloc(pdip, nodename,
15025 DEVI_SID_NODEID, lun_dip);
15026
15027 /*
15028 * if lun alloc success, set props
15029 */
15030 if (ndi_rtn == NDI_SUCCESS) {
15031
15032 if (ndi_prop_update_int(DDI_DEV_T_NONE,
15033 *lun_dip, LUN_PROP, lun) !=
15034 DDI_PROP_SUCCESS) {
15035 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15036 "property for target %d lun %d (LUN_PROP)",
15037 target, lun);
15038 ndi_rtn = NDI_FAILURE;
15039 goto phys_create_done;
15040 }
15041
15042 lun64 = (int64_t)lun;
15043 if (ndi_prop_update_int64(DDI_DEV_T_NONE,
15044 *lun_dip, LUN64_PROP, lun64) !=
15045 DDI_PROP_SUCCESS) {
15046 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15047 "property for target %d lun64 %d (LUN64_PROP)",
15048 target, lun);
15049 ndi_rtn = NDI_FAILURE;
15050 goto phys_create_done;
15051 }
15052 if (ndi_prop_update_string_array(DDI_DEV_T_NONE,
15053 *lun_dip, "compatible", compatible, ncompatible)
15054 != DDI_PROP_SUCCESS) {
15055 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15056 "property for target %d lun %d (COMPATIBLE)",
15057 target, lun);
15058 ndi_rtn = NDI_FAILURE;
15059 goto phys_create_done;
15060 }
15061
15062 /*
15063 * We need the SAS WWN for non-multipath devices, so
15064 * we'll use the same property as that multipathing
15065 * devices need to present for MPAPI. If we don't have
15066 * a WWN (e.g. parallel SCSI), don't create the prop.
15067 */
15068 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
15069 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15070 if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE,
15071 *lun_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str)
15072 != DDI_PROP_SUCCESS) {
15073 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15074 "create property for SAS target %d lun %d "
15075 "(target-port)", target, lun);
15076 ndi_rtn = NDI_FAILURE;
15077 goto phys_create_done;
15078 }
15079
15080 be_sas_wwn = BE_64(sas_wwn);
15081 if (sas_wwn && ndi_prop_update_byte_array(
15082 DDI_DEV_T_NONE, *lun_dip, "port-wwn",
15083 (uchar_t *)&be_sas_wwn, 8) != DDI_PROP_SUCCESS) {
15084 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15085 "create property for SAS target %d lun %d "
15086 "(port-wwn)", target, lun);
15087 ndi_rtn = NDI_FAILURE;
15088 goto phys_create_done;
15089 } else if ((sas_wwn == 0) && (ndi_prop_update_int(
15090 DDI_DEV_T_NONE, *lun_dip, "sata-phy", phy) !=
15091 DDI_PROP_SUCCESS)) {
15092 /*
15093 * Direct attached SATA device without DeviceName
15094 */
15095 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15096 "create property for SAS target %d lun %d "
15097 "(sata-phy)", target, lun);
15098 ndi_rtn = NDI_FAILURE;
15099 goto phys_create_done;
15100 }
15101
15102 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15103 *lun_dip, SAS_PROP) != DDI_PROP_SUCCESS) {
15104 mptsas_log(mpt, CE_WARN, "mptsas unable to"
15105 "create property for SAS target %d lun %d"
15106 " (SAS_PROP)", target, lun);
15107 ndi_rtn = NDI_FAILURE;
15108 goto phys_create_done;
15109 }
15110 if (guid && (ndi_prop_update_string(DDI_DEV_T_NONE,
15111 *lun_dip, NDI_GUID, guid) != DDI_SUCCESS)) {
15112 mptsas_log(mpt, CE_WARN, "mptsas unable "
15113 "to create guid property for target %d "
15114 "lun %d", target, lun);
15115 ndi_rtn = NDI_FAILURE;
15116 goto phys_create_done;
15117 }
15118
15119 /*
15120 * The following code is to set properties for SM-HBA support,
15121 * it doesn't apply to RAID volumes
15122 */
15123 if (ptgt->m_phymask == 0)
15124 goto phys_raid_lun;
15125
15126 mutex_enter(&mpt->m_mutex);
15127
15128 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15129 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15130 (uint32_t)ptgt->m_devhdl;
15131 rval = mptsas_get_sas_device_page0(mpt, page_address,
15132 &dev_hdl, &dev_sas_wwn, &dev_info,
15133 &physport, &phy_id, &pdev_hdl,
15134 &bay_num, &enclosure);
15135 if (rval != DDI_SUCCESS) {
15136 mutex_exit(&mpt->m_mutex);
15137 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
15138 "parent device for handle %d.", page_address);
15139 ndi_rtn = NDI_FAILURE;
15140 goto phys_create_done;
15141 }
15142
15143 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15144 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
15145 rval = mptsas_get_sas_device_page0(mpt, page_address,
15146 &dev_hdl, &pdev_sas_wwn, &pdev_info,
15147 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
15148 if (rval != DDI_SUCCESS) {
15149 mutex_exit(&mpt->m_mutex);
15150 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15151 "device for handle %d.", page_address);
15152 ndi_rtn = NDI_FAILURE;
15153 goto phys_create_done;
15154 }
15155
15156 mutex_exit(&mpt->m_mutex);
15157
15158 /*
15159 * If this device direct attached to the controller
15160 * set the attached-port to the base wwid
15161 */
15162 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15163 != DEVINFO_DIRECT_ATTACHED) {
15164 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15165 pdev_sas_wwn);
15166 } else {
15167 /*
15168 * Update the iport's attached-port to guid
15169 */
15170 if (sas_wwn == 0) {
15171 (void) sprintf(wwn_str, "p%x", phy);
15172 } else {
15173 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15174 }
15175 if (ddi_prop_update_string(DDI_DEV_T_NONE,
15176 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15177 DDI_PROP_SUCCESS) {
15178 mptsas_log(mpt, CE_WARN,
15179 "mptsas unable to create "
15180 "property for iport target-port"
15181 " %s (sas_wwn)",
15182 wwn_str);
15183 ndi_rtn = NDI_FAILURE;
15184 goto phys_create_done;
15185 }
15186
15187 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15188 mpt->un.m_base_wwid);
15189 }
15190
15191 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15192 *lun_dip, SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
15193 DDI_PROP_SUCCESS) {
15194 mptsas_log(mpt, CE_WARN,
15195 "mptsas unable to create "
15196 "property for iport attached-port %s (sas_wwn)",
15197 attached_wwn_str);
15198 ndi_rtn = NDI_FAILURE;
15199 goto phys_create_done;
15200 }
15201
15202 if (IS_SATA_DEVICE(dev_info)) {
15203 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15204 *lun_dip, MPTSAS_VARIANT, "sata") !=
15205 DDI_PROP_SUCCESS) {
15206 mptsas_log(mpt, CE_WARN,
15207 "mptsas unable to create "
15208 "property for device variant ");
15209 ndi_rtn = NDI_FAILURE;
15210 goto phys_create_done;
15211 }
15212 }
15213
15214 if (IS_ATAPI_DEVICE(dev_info)) {
15215 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15216 *lun_dip, MPTSAS_VARIANT, "atapi") !=
15217 DDI_PROP_SUCCESS) {
15218 mptsas_log(mpt, CE_WARN,
15219 "mptsas unable to create "
15220 "property for device variant ");
15221 ndi_rtn = NDI_FAILURE;
15222 goto phys_create_done;
15223 }
15224 }
15225
15226 phys_raid_lun:
15227 /*
15228 * if this is a SAS controller, and the target is a SATA
15229 * drive, set the 'pm-capable' property for sd and if on
15230 * an OPL platform, also check if this is an ATAPI
15231 * device.
15232 */
15233 instance = ddi_get_instance(mpt->m_dip);
15234 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
15235 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
15236 NDBG2(("mptsas%d: creating pm-capable property, "
15237 "target %d", instance, target));
15238
15239 if ((ndi_prop_update_int(DDI_DEV_T_NONE,
15240 *lun_dip, "pm-capable", 1)) !=
15241 DDI_PROP_SUCCESS) {
15242 mptsas_log(mpt, CE_WARN, "mptsas "
15243 "failed to create pm-capable "
15244 "property, target %d", target);
15245 ndi_rtn = NDI_FAILURE;
15246 goto phys_create_done;
15247 }
15248
15249 }
15250
15251 if ((inq->inq_dtype == 0) || (inq->inq_dtype == 5)) {
15252 /*
15253 * add 'obp-path' properties for devinfo
15254 */
15255 bzero(wwn_str, sizeof (wwn_str));
15256 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
15257 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
15258 if (guid) {
15259 (void) snprintf(component, MAXPATHLEN,
15260 "disk@w%s,%x", wwn_str, lun);
15261 } else {
15262 (void) snprintf(component, MAXPATHLEN,
15263 "disk@p%x,%x", phy, lun);
15264 }
15265 if (ddi_pathname_obp_set(*lun_dip, component)
15266 != DDI_SUCCESS) {
15267 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
15268 "unable to set obp-path for SAS "
15269 "object %s", component);
15270 ndi_rtn = NDI_FAILURE;
15271 goto phys_create_done;
15272 }
15273 }
15274 /*
15275 * Create the phy-num property for non-raid disk
15276 */
15277 if (ptgt->m_phymask != 0) {
15278 if (ndi_prop_update_int(DDI_DEV_T_NONE,
15279 *lun_dip, "phy-num", ptgt->m_phynum) !=
15280 DDI_PROP_SUCCESS) {
15281 mptsas_log(mpt, CE_WARN, "mptsas driver "
15282 "failed to create phy-num property for "
15283 "target %d", target);
15284 ndi_rtn = NDI_FAILURE;
15285 goto phys_create_done;
15286 }
15287 }
15288 phys_create_done:
15289 /*
15290 * If props were setup ok, online the lun
15291 */
15292 if (ndi_rtn == NDI_SUCCESS) {
15293 /*
15294 * Try to online the new node
15295 */
15296 ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
15297 }
15298 if (ndi_rtn == NDI_SUCCESS) {
15299 mutex_enter(&mpt->m_mutex);
15300 ptgt->m_led_status = 0;
15301 if (mptsas_flush_led_status(mpt, ptgt) != DDI_SUCCESS) {
15302 NDBG14(("mptsas: clear LED for tgt %x "
15303 "failed", ptgt->m_slot_num));
15304 }
15305 mutex_exit(&mpt->m_mutex);
15306 }
15307
15308 /*
15309 * If success set rtn flag, else unwire alloc'd lun
15310 */
15311 if (ndi_rtn != NDI_SUCCESS) {
15312 NDBG12(("mptsas driver unable to online "
15313 "target %d lun %d", target, lun));
15314 ndi_prop_remove_all(*lun_dip);
15315 (void) ndi_devi_free(*lun_dip);
15316 *lun_dip = NULL;
15317 }
15318 }
15319
15320 scsi_hba_nodename_compatible_free(nodename, compatible);
15321
15322 if (wwn_str != NULL) {
15323 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
15324 }
15325 if (component != NULL) {
15326 kmem_free(component, MAXPATHLEN);
15327 }
15328
15329
15330 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15331 }
15332
15333 static int
15334 mptsas_probe_smp(dev_info_t *pdip, uint64_t wwn)
15335 {
15336 mptsas_t *mpt = DIP2MPT(pdip);
15337 struct smp_device smp_sd;
15338
15339 /* XXX An HBA driver should not be allocating an smp_device. */
15340 bzero(&smp_sd, sizeof (struct smp_device));
15341 smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran;
15342 bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE);
15343
15344 if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS)
15345 return (NDI_FAILURE);
15346 return (NDI_SUCCESS);
15347 }
15348
15349 static int
15350 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
15351 {
15352 mptsas_t *mpt = DIP2MPT(pdip);
15353 mptsas_smp_t *psmp = NULL;
15354 int rval;
15355 int phymask;
15356
15357 /*
15358 * Get the physical port associated to the iport
15359 * PHYMASK TODO
15360 */
15361 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
15362 "phymask", 0);
15363 /*
15364 * Find the smp node in hash table with specified sas address and
15365 * physical port
15366 */
15367 psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
15368 if (psmp == NULL) {
15369 return (DDI_FAILURE);
15370 }
15371
15372 rval = mptsas_online_smp(pdip, psmp, smp_dip);
15373
15374 return (rval);
15375 }
15376
15377 static int
15378 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
15379 dev_info_t **smp_dip)
15380 {
15381 char wwn_str[MPTSAS_WWN_STRLEN];
15382 char attached_wwn_str[MPTSAS_WWN_STRLEN];
15383 int ndi_rtn = NDI_FAILURE;
15384 int rval = 0;
15385 mptsas_smp_t dev_info;
15386 uint32_t page_address;
15387 mptsas_t *mpt = DIP2MPT(pdip);
15388 uint16_t dev_hdl;
15389 uint64_t sas_wwn;
15390 uint64_t smp_sas_wwn;
15391 uint8_t physport;
15392 uint8_t phy_id;
15393 uint16_t pdev_hdl;
15394 uint8_t numphys = 0;
15395 uint16_t i = 0;
15396 char phymask[MPTSAS_MAX_PHYS];
15397 char *iport = NULL;
15398 mptsas_phymask_t phy_mask = 0;
15399 uint16_t attached_devhdl;
15400 uint16_t bay_num, enclosure;
15401
15402 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_sasaddr);
15403
15404 /*
15405 * Probe smp device, prevent the node of removed device from being
15406 * configured succesfully
15407 */
15408 if (mptsas_probe_smp(pdip, smp_node->m_sasaddr) != NDI_SUCCESS) {
15409 return (DDI_FAILURE);
15410 }
15411
15412 if ((*smp_dip = mptsas_find_smp_child(pdip, wwn_str)) != NULL) {
15413 return (DDI_SUCCESS);
15414 }
15415
15416 ndi_rtn = ndi_devi_alloc(pdip, "smp", DEVI_SID_NODEID, smp_dip);
15417
15418 /*
15419 * if lun alloc success, set props
15420 */
15421 if (ndi_rtn == NDI_SUCCESS) {
15422 /*
15423 * Set the flavor of the child to be SMP flavored
15424 */
15425 ndi_flavor_set(*smp_dip, SCSA_FLAVOR_SMP);
15426
15427 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15428 *smp_dip, SMP_WWN, wwn_str) !=
15429 DDI_PROP_SUCCESS) {
15430 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15431 "property for smp device %s (sas_wwn)",
15432 wwn_str);
15433 ndi_rtn = NDI_FAILURE;
15434 goto smp_create_done;
15435 }
15436 (void) sprintf(wwn_str, "w%"PRIx64, smp_node->m_sasaddr);
15437 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15438 *smp_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str) !=
15439 DDI_PROP_SUCCESS) {
15440 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15441 "property for iport target-port %s (sas_wwn)",
15442 wwn_str);
15443 ndi_rtn = NDI_FAILURE;
15444 goto smp_create_done;
15445 }
15446
15447 mutex_enter(&mpt->m_mutex);
15448
15449 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
15450 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | smp_node->m_devhdl;
15451 rval = mptsas_get_sas_expander_page0(mpt, page_address,
15452 &dev_info);
15453 if (rval != DDI_SUCCESS) {
15454 mutex_exit(&mpt->m_mutex);
15455 mptsas_log(mpt, CE_WARN,
15456 "mptsas unable to get expander "
15457 "parent device info for %x", page_address);
15458 ndi_rtn = NDI_FAILURE;
15459 goto smp_create_done;
15460 }
15461
15462 smp_node->m_pdevhdl = dev_info.m_pdevhdl;
15463 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15464 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15465 (uint32_t)dev_info.m_pdevhdl;
15466 rval = mptsas_get_sas_device_page0(mpt, page_address,
15467 &dev_hdl, &sas_wwn, &smp_node->m_pdevinfo,
15468 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
15469 if (rval != DDI_SUCCESS) {
15470 mutex_exit(&mpt->m_mutex);
15471 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15472 "device info for %x", page_address);
15473 ndi_rtn = NDI_FAILURE;
15474 goto smp_create_done;
15475 }
15476
15477 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15478 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15479 (uint32_t)dev_info.m_devhdl;
15480 rval = mptsas_get_sas_device_page0(mpt, page_address,
15481 &dev_hdl, &smp_sas_wwn, &smp_node->m_deviceinfo,
15482 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
15483 if (rval != DDI_SUCCESS) {
15484 mutex_exit(&mpt->m_mutex);
15485 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15486 "device info for %x", page_address);
15487 ndi_rtn = NDI_FAILURE;
15488 goto smp_create_done;
15489 }
15490 mutex_exit(&mpt->m_mutex);
15491
15492 /*
15493 * If this smp direct attached to the controller
15494 * set the attached-port to the base wwid
15495 */
15496 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15497 != DEVINFO_DIRECT_ATTACHED) {
15498 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
15499 sas_wwn);
15500 } else {
15501 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
15502 mpt->un.m_base_wwid);
15503 }
15504
15505 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15506 *smp_dip, SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwn_str) !=
15507 DDI_PROP_SUCCESS) {
15508 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15509 "property for smp attached-port %s (sas_wwn)",
15510 attached_wwn_str);
15511 ndi_rtn = NDI_FAILURE;
15512 goto smp_create_done;
15513 }
15514
15515 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15516 *smp_dip, SMP_PROP) != DDI_PROP_SUCCESS) {
15517 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15518 "create property for SMP %s (SMP_PROP) ",
15519 wwn_str);
15520 ndi_rtn = NDI_FAILURE;
15521 goto smp_create_done;
15522 }
15523
15524 /*
15525 * check the smp to see whether it direct
15526 * attached to the controller
15527 */
15528 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15529 != DEVINFO_DIRECT_ATTACHED) {
15530 goto smp_create_done;
15531 }
15532 numphys = ddi_prop_get_int(DDI_DEV_T_ANY, pdip,
15533 DDI_PROP_DONTPASS, MPTSAS_NUM_PHYS, -1);
15534 if (numphys > 0) {
15535 goto smp_create_done;
15536 }
15537 /*
15538 * this iport is an old iport, we need to
15539 * reconfig the props for it.
15540 */
15541 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
15542 MPTSAS_VIRTUAL_PORT, 0) !=
15543 DDI_PROP_SUCCESS) {
15544 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
15545 MPTSAS_VIRTUAL_PORT);
15546 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
15547 "prop update failed");
15548 goto smp_create_done;
15549 }
15550
15551 mutex_enter(&mpt->m_mutex);
15552 numphys = 0;
15553 iport = ddi_get_name_addr(pdip);
15554 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15555 bzero(phymask, sizeof (phymask));
15556 (void) sprintf(phymask,
15557 "%x", mpt->m_phy_info[i].phy_mask);
15558 if (strcmp(phymask, iport) == 0) {
15559 phy_mask = mpt->m_phy_info[i].phy_mask;
15560 break;
15561 }
15562 }
15563
15564 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15565 if ((phy_mask >> i) & 0x01) {
15566 numphys++;
15567 }
15568 }
15569 /*
15570 * Update PHY info for smhba
15571 */
15572 if (mptsas_smhba_phy_init(mpt)) {
15573 mutex_exit(&mpt->m_mutex);
15574 mptsas_log(mpt, CE_WARN, "mptsas phy update "
15575 "failed");
15576 goto smp_create_done;
15577 }
15578 mutex_exit(&mpt->m_mutex);
15579
15580 mptsas_smhba_set_phy_props(mpt, iport, pdip,
15581 numphys, &attached_devhdl);
15582
15583 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
15584 MPTSAS_NUM_PHYS, numphys) !=
15585 DDI_PROP_SUCCESS) {
15586 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
15587 MPTSAS_NUM_PHYS);
15588 mptsas_log(mpt, CE_WARN, "mptsas update "
15589 "num phys props failed");
15590 goto smp_create_done;
15591 }
15592 /*
15593 * Add parent's props for SMHBA support
15594 */
15595 if (ddi_prop_update_string(DDI_DEV_T_NONE, pdip,
15596 SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15597 DDI_PROP_SUCCESS) {
15598 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
15599 SCSI_ADDR_PROP_ATTACHED_PORT);
15600 mptsas_log(mpt, CE_WARN, "mptsas update iport"
15601 "attached-port failed");
15602 goto smp_create_done;
15603 }
15604
15605 smp_create_done:
15606 /*
15607 * If props were setup ok, online the lun
15608 */
15609 if (ndi_rtn == NDI_SUCCESS) {
15610 /*
15611 * Try to online the new node
15612 */
15613 ndi_rtn = ndi_devi_online(*smp_dip, NDI_ONLINE_ATTACH);
15614 }
15615
15616 /*
15617 * If success set rtn flag, else unwire alloc'd lun
15618 */
15619 if (ndi_rtn != NDI_SUCCESS) {
15620 NDBG12(("mptsas unable to online "
15621 "SMP target %s", wwn_str));
15622 ndi_prop_remove_all(*smp_dip);
15623 (void) ndi_devi_free(*smp_dip);
15624 }
15625 }
15626
15627 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15628 }
15629
15630 /* smp transport routine */
15631 static int mptsas_smp_start(struct smp_pkt *smp_pkt)
15632 {
15633 uint64_t wwn;
15634 Mpi2SmpPassthroughRequest_t req;
15635 Mpi2SmpPassthroughReply_t rep;
15636 uint32_t direction = 0;
15637 mptsas_t *mpt;
15638 int ret;
15639 uint64_t tmp64;
15640
15641 mpt = (mptsas_t *)smp_pkt->smp_pkt_address->
15642 smp_a_hba_tran->smp_tran_hba_private;
15643
15644 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
15645 /*
15646 * Need to compose a SMP request message
15647 * and call mptsas_do_passthru() function
15648 */
15649 bzero(&req, sizeof (req));
15650 bzero(&rep, sizeof (rep));
15651 req.PassthroughFlags = 0;
15652 req.PhysicalPort = 0xff;
15653 req.ChainOffset = 0;
15654 req.Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
15655
15656 if ((smp_pkt->smp_pkt_reqsize & 0xffff0000ul) != 0) {
15657 smp_pkt->smp_pkt_reason = ERANGE;
15658 return (DDI_FAILURE);
15659 }
15660 req.RequestDataLength = LE_16((uint16_t)(smp_pkt->smp_pkt_reqsize - 4));
15661
15662 req.MsgFlags = 0;
15663 tmp64 = LE_64(wwn);
15664 bcopy(&tmp64, &req.SASAddress, SAS_WWN_BYTE_SIZE);
15665 if (smp_pkt->smp_pkt_rspsize > 0) {
15666 direction |= MPTSAS_PASS_THRU_DIRECTION_READ;
15667 }
15668 if (smp_pkt->smp_pkt_reqsize > 0) {
15669 direction |= MPTSAS_PASS_THRU_DIRECTION_WRITE;
15670 }
15671
15672 mutex_enter(&mpt->m_mutex);
15673 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep,
15674 (uint8_t *)smp_pkt->smp_pkt_rsp,
15675 offsetof(Mpi2SmpPassthroughRequest_t, SGL), sizeof (rep),
15676 smp_pkt->smp_pkt_rspsize - 4, direction,
15677 (uint8_t *)smp_pkt->smp_pkt_req, smp_pkt->smp_pkt_reqsize - 4,
15678 smp_pkt->smp_pkt_timeout, FKIOCTL);
15679 mutex_exit(&mpt->m_mutex);
15680 if (ret != 0) {
15681 cmn_err(CE_WARN, "smp_start do passthru error %d", ret);
15682 smp_pkt->smp_pkt_reason = (uchar_t)(ret);
15683 return (DDI_FAILURE);
15684 }
15685 /* do passthrough success, check the smp status */
15686 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
15687 switch (LE_16(rep.IOCStatus)) {
15688 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
15689 smp_pkt->smp_pkt_reason = ENODEV;
15690 break;
15691 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
15692 smp_pkt->smp_pkt_reason = EOVERFLOW;
15693 break;
15694 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
15695 smp_pkt->smp_pkt_reason = EIO;
15696 break;
15697 default:
15698 mptsas_log(mpt, CE_NOTE, "smp_start: get unknown ioc"
15699 "status:%x", LE_16(rep.IOCStatus));
15700 smp_pkt->smp_pkt_reason = EIO;
15701 break;
15702 }
15703 return (DDI_FAILURE);
15704 }
15705 if (rep.SASStatus != MPI2_SASSTATUS_SUCCESS) {
15706 mptsas_log(mpt, CE_NOTE, "smp_start: get error SAS status:%x",
15707 rep.SASStatus);
15708 smp_pkt->smp_pkt_reason = EIO;
15709 return (DDI_FAILURE);
15710 }
15711
15712 return (DDI_SUCCESS);
15713 }
15714
15715 /*
15716 * If we didn't get a match, we need to get sas page0 for each device, and
15717 * untill we get a match. If failed, return NULL
15718 */
15719 static mptsas_target_t *
15720 mptsas_phy_to_tgt(mptsas_t *mpt, int phymask, uint8_t phy)
15721 {
15722 int i, j = 0;
15723 int rval = 0;
15724 uint16_t cur_handle;
15725 uint32_t page_address;
15726 mptsas_target_t *ptgt = NULL;
15727
15728 /*
15729 * PHY named device must be direct attached and attaches to
15730 * narrow port, if the iport is not parent of the device which
15731 * we are looking for.
15732 */
15733 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15734 if ((1 << i) & phymask)
15735 j++;
15736 }
15737
15738 if (j > 1)
15739 return (NULL);
15740
15741 /*
15742 * Must be a narrow port and single device attached to the narrow port
15743 * So the physical port num of device which is equal to the iport's
15744 * port num is the device what we are looking for.
15745 */
15746
15747 if (mpt->m_phy_info[phy].phy_mask != phymask)
15748 return (NULL);
15749
15750 mutex_enter(&mpt->m_mutex);
15751
15752 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
15753 MPTSAS_HASH_FIRST);
15754 while (ptgt != NULL) {
15755 if ((ptgt->m_sas_wwn == 0) && (ptgt->m_phynum == phy)) {
15756 mutex_exit(&mpt->m_mutex);
15757 return (ptgt);
15758 }
15759
15760 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
15761 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
15762 }
15763
15764 if (mpt->m_done_traverse_dev) {
15765 mutex_exit(&mpt->m_mutex);
15766 return (NULL);
15767 }
15768
15769 /* If didn't get a match, come here */
15770 cur_handle = mpt->m_dev_handle;
15771 for (; ; ) {
15772 ptgt = NULL;
15773 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
15774 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)cur_handle;
15775 rval = mptsas_get_target_device_info(mpt, page_address,
15776 &cur_handle, &ptgt);
15777 if ((rval == DEV_INFO_FAIL_PAGE0) ||
15778 (rval == DEV_INFO_FAIL_ALLOC)) {
15779 break;
15780 }
15781 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
15782 (rval == DEV_INFO_PHYS_DISK)) {
15783 continue;
15784 }
15785 mpt->m_dev_handle = cur_handle;
15786
15787 if ((ptgt->m_sas_wwn == 0) && (ptgt->m_phynum == phy)) {
15788 break;
15789 }
15790 }
15791
15792 mutex_exit(&mpt->m_mutex);
15793 return (ptgt);
15794 }
15795
15796 /*
15797 * The ptgt->m_sas_wwn contains the wwid for each disk.
15798 * For Raid volumes, we need to check m_raidvol[x].m_raidwwid
15799 * If we didn't get a match, we need to get sas page0 for each device, and
15800 * untill we get a match
15801 * If failed, return NULL
15802 */
15803 static mptsas_target_t *
15804 mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask, uint64_t wwid)
15805 {
15806 int rval = 0;
15807 uint16_t cur_handle;
15808 uint32_t page_address;
15809 mptsas_target_t *tmp_tgt = NULL;
15810
15811 mutex_enter(&mpt->m_mutex);
15812 tmp_tgt = (struct mptsas_target *)mptsas_hash_search(
15813 &mpt->m_active->m_tgttbl, wwid, phymask);
15814 if (tmp_tgt != NULL) {
15815 mutex_exit(&mpt->m_mutex);
15816 return (tmp_tgt);
15817 }
15818
15819 if (phymask == 0) {
15820 /*
15821 * It's IR volume
15822 */
15823 rval = mptsas_get_raid_info(mpt);
15824 if (rval) {
15825 tmp_tgt = (struct mptsas_target *)mptsas_hash_search(
15826 &mpt->m_active->m_tgttbl, wwid, phymask);
15827 }
15828 mutex_exit(&mpt->m_mutex);
15829 return (tmp_tgt);
15830 }
15831
15832 if (mpt->m_done_traverse_dev) {
15833 mutex_exit(&mpt->m_mutex);
15834 return (NULL);
15835 }
15836
15837 /* If didn't get a match, come here */
15838 cur_handle = mpt->m_dev_handle;
15839 for (; ; ) {
15840 tmp_tgt = NULL;
15841 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
15842 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | cur_handle;
15843 rval = mptsas_get_target_device_info(mpt, page_address,
15844 &cur_handle, &tmp_tgt);
15845 if ((rval == DEV_INFO_FAIL_PAGE0) ||
15846 (rval == DEV_INFO_FAIL_ALLOC)) {
15847 tmp_tgt = NULL;
15848 break;
15849 }
15850 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
15851 (rval == DEV_INFO_PHYS_DISK)) {
15852 continue;
15853 }
15854 mpt->m_dev_handle = cur_handle;
15855 if ((tmp_tgt->m_sas_wwn) && (tmp_tgt->m_sas_wwn == wwid) &&
15856 (tmp_tgt->m_phymask == phymask)) {
15857 break;
15858 }
15859 }
15860
15861 mutex_exit(&mpt->m_mutex);
15862 return (tmp_tgt);
15863 }
15864
15865 static mptsas_smp_t *
15866 mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask, uint64_t wwid)
15867 {
15868 int rval = 0;
15869 uint16_t cur_handle;
15870 uint32_t page_address;
15871 mptsas_smp_t smp_node, *psmp = NULL;
15872
15873 mutex_enter(&mpt->m_mutex);
15874 psmp = (struct mptsas_smp *)mptsas_hash_search(&mpt->m_active->m_smptbl,
15875 wwid, phymask);
15876 if (psmp != NULL) {
15877 mutex_exit(&mpt->m_mutex);
15878 return (psmp);
15879 }
15880
15881 if (mpt->m_done_traverse_smp) {
15882 mutex_exit(&mpt->m_mutex);
15883 return (NULL);
15884 }
15885
15886 /* If didn't get a match, come here */
15887 cur_handle = mpt->m_smp_devhdl;
15888 for (; ; ) {
15889 psmp = NULL;
15890 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
15891 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)cur_handle;
15892 rval = mptsas_get_sas_expander_page0(mpt, page_address,
15893 &smp_node);
15894 if (rval != DDI_SUCCESS) {
15895 break;
15896 }
15897 mpt->m_smp_devhdl = cur_handle = smp_node.m_devhdl;
15898 psmp = mptsas_smp_alloc(&mpt->m_active->m_smptbl, &smp_node);
15899 ASSERT(psmp);
15900 if ((psmp->m_sasaddr) && (psmp->m_sasaddr == wwid) &&
15901 (psmp->m_phymask == phymask)) {
15902 break;
15903 }
15904 }
15905
15906 mutex_exit(&mpt->m_mutex);
15907 return (psmp);
15908 }
15909
15910 /* helper functions using hash */
15911
15912 /*
15913 * Can't have duplicate entries for same devhdl,
15914 * if there are invalid entries, the devhdl should be set to 0xffff
15915 */
15916 static void *
15917 mptsas_search_by_devhdl(mptsas_hash_table_t *hashtab, uint16_t devhdl)
15918 {
15919 mptsas_hash_data_t *data;
15920
15921 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST);
15922 while (data != NULL) {
15923 if (data->devhdl == devhdl) {
15924 break;
15925 }
15926 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT);
15927 }
15928 return (data);
15929 }
15930
15931 mptsas_target_t *
15932 mptsas_tgt_alloc(mptsas_hash_table_t *hashtab, uint16_t devhdl, uint64_t wwid,
15933 uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum, mptsas_t *mpt)
15934 {
15935 mptsas_target_t *tmp_tgt = NULL;
15936
15937 tmp_tgt = mptsas_hash_search(hashtab, wwid, phymask);
15938 if (tmp_tgt != NULL) {
15939 NDBG20(("Hash item already exist"));
15940 tmp_tgt->m_deviceinfo = devinfo;
15941 tmp_tgt->m_devhdl = devhdl;
15942 return (tmp_tgt);
15943 }
15944 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
15945 if (tmp_tgt == NULL) {
15946 cmn_err(CE_WARN, "Fatal, allocated tgt failed");
15947 return (NULL);
15948 }
15949 tmp_tgt->m_devhdl = devhdl;
15950 tmp_tgt->m_sas_wwn = wwid;
15951 tmp_tgt->m_deviceinfo = devinfo;
15952 tmp_tgt->m_phymask = phymask;
15953 tmp_tgt->m_phynum = phynum;
15954 /* Initialized the tgt structure */
15955 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
15956 tmp_tgt->m_qfull_retry_interval =
15957 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
15958 tmp_tgt->m_t_throttle = MAX_THROTTLE;
15959 mutex_init(&tmp_tgt->m_tgt_intr_mutex, NULL, MUTEX_DRIVER,
15960 DDI_INTR_PRI(mpt->m_intr_pri));
15961
15962 mptsas_hash_add(hashtab, tmp_tgt);
15963
15964 return (tmp_tgt);
15965 }
15966
15967 static void
15968 mptsas_tgt_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
15969 mptsas_phymask_t phymask)
15970 {
15971 mptsas_target_t *tmp_tgt;
15972 tmp_tgt = mptsas_hash_rem(hashtab, wwid, phymask);
15973 if (tmp_tgt == NULL) {
15974 cmn_err(CE_WARN, "Tgt not found, nothing to free");
15975 } else {
15976 mutex_destroy(&tmp_tgt->m_tgt_intr_mutex);
15977 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
15978 }
15979 }
15980
15981 /*
15982 * Return the entry in the hash table
15983 */
15984 static mptsas_smp_t *
15985 mptsas_smp_alloc(mptsas_hash_table_t *hashtab, mptsas_smp_t *data)
15986 {
15987 uint64_t key1 = data->m_sasaddr;
15988 mptsas_phymask_t key2 = data->m_phymask;
15989 mptsas_smp_t *ret_data;
15990
15991 ret_data = mptsas_hash_search(hashtab, key1, key2);
15992 if (ret_data != NULL) {
15993 bcopy(data, ret_data, sizeof (mptsas_smp_t));
15994 return (ret_data);
15995 }
15996
15997 ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
15998 bcopy(data, ret_data, sizeof (mptsas_smp_t));
15999 mptsas_hash_add(hashtab, ret_data);
16000 return (ret_data);
16001 }
16002
16003 static void
16004 mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
16005 mptsas_phymask_t phymask)
16006 {
16007 mptsas_smp_t *tmp_smp;
16008 tmp_smp = mptsas_hash_rem(hashtab, wwid, phymask);
16009 if (tmp_smp == NULL) {
16010 cmn_err(CE_WARN, "Smp element not found, nothing to free");
16011 } else {
16012 kmem_free(tmp_smp, sizeof (struct mptsas_smp));
16013 }
16014 }
16015
16016 /*
16017 * Hash operation functions
16018 * key1 is the sas_wwn, key2 is the phymask
16019 */
16020 static void
16021 mptsas_hash_init(mptsas_hash_table_t *hashtab)
16022 {
16023 if (hashtab == NULL) {
16024 return;
16025 }
16026 bzero(hashtab->head, sizeof (mptsas_hash_node_t) *
16027 MPTSAS_HASH_ARRAY_SIZE);
16028 hashtab->cur = NULL;
16029 hashtab->line = 0;
16030 }
16031
16032 static void
16033 mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen)
16034 {
16035 uint16_t line = 0;
16036 mptsas_hash_node_t *cur = NULL, *last = NULL;
16037
16038 if (hashtab == NULL) {
16039 return;
16040 }
16041 for (line = 0; line < MPTSAS_HASH_ARRAY_SIZE; line++) {
16042 cur = hashtab->head[line];
16043 while (cur != NULL) {
16044 last = cur;
16045 cur = cur->next;
16046 kmem_free(last->data, datalen);
16047 kmem_free(last, sizeof (mptsas_hash_node_t));
16048 }
16049 }
16050 }
16051
16052 /*
16053 * You must guarantee the element doesn't exist in the hash table
16054 * before you call mptsas_hash_add()
16055 */
16056 static void
16057 mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data)
16058 {
16059 uint64_t key1 = ((mptsas_hash_data_t *)data)->key1;
16060 mptsas_phymask_t key2 = ((mptsas_hash_data_t *)data)->key2;
16061 mptsas_hash_node_t **head = NULL;
16062 mptsas_hash_node_t *node = NULL;
16063
16064 if (hashtab == NULL) {
16065 return;
16066 }
16067 ASSERT(mptsas_hash_search(hashtab, key1, key2) == NULL);
16068 node = kmem_zalloc(sizeof (mptsas_hash_node_t), KM_NOSLEEP);
16069 node->data = data;
16070
16071 head = &(hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]);
16072 if (*head == NULL) {
16073 *head = node;
16074 } else {
16075 node->next = *head;
16076 *head = node;
16077 }
16078 }
16079
16080 static void *
16081 mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1,
16082 mptsas_phymask_t key2)
16083 {
16084 mptsas_hash_node_t **head = NULL;
16085 mptsas_hash_node_t *last = NULL, *cur = NULL;
16086 mptsas_hash_data_t *data;
16087 if (hashtab == NULL) {
16088 return (NULL);
16089 }
16090 head = &(hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]);
16091 cur = *head;
16092 while (cur != NULL) {
16093 data = cur->data;
16094 if ((data->key1 == key1) && (data->key2 == key2)) {
16095 if (last == NULL) {
16096 (*head) = cur->next;
16097 } else {
16098 last->next = cur->next;
16099 }
16100 kmem_free(cur, sizeof (mptsas_hash_node_t));
16101 return (data);
16102 } else {
16103 last = cur;
16104 cur = cur->next;
16105 }
16106 }
16107 return (NULL);
16108 }
16109
16110 static void *
16111 mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1,
16112 mptsas_phymask_t key2)
16113 {
16114 mptsas_hash_node_t *cur = NULL;
16115 mptsas_hash_data_t *data;
16116 if (hashtab == NULL) {
16117 return (NULL);
16118 }
16119 cur = hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE];
16120 while (cur != NULL) {
16121 data = cur->data;
16122 if ((data->key1 == key1) && (data->key2 == key2)) {
16123 return (data);
16124 } else {
16125 cur = cur->next;
16126 }
16127 }
16128 return (NULL);
16129 }
16130
16131 static void *
16132 mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos)
16133 {
16134 mptsas_hash_node_t *this = NULL;
16135
16136 if (hashtab == NULL) {
16137 return (NULL);
16138 }
16139
16140 if (pos == MPTSAS_HASH_FIRST) {
16141 hashtab->line = 0;
16142 hashtab->cur = NULL;
16143 this = hashtab->head[0];
16144 } else {
16145 if (hashtab->cur == NULL) {
16146 return (NULL);
16147 } else {
16148 this = hashtab->cur->next;
16149 }
16150 }
16151
16152 while (this == NULL) {
16153 hashtab->line++;
16154 if (hashtab->line >= MPTSAS_HASH_ARRAY_SIZE) {
16155 /* the traverse reaches the end */
16156 hashtab->cur = NULL;
16157 return (NULL);
16158 } else {
16159 this = hashtab->head[hashtab->line];
16160 }
16161 }
16162 hashtab->cur = this;
16163 return (this->data);
16164 }
16165
16166 /*
16167 * Functions for SGPIO LED support
16168 */
16169 static dev_info_t *
16170 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
16171 {
16172 dev_info_t *dip;
16173 int prop;
16174 dip = e_ddi_hold_devi_by_dev(dev, 0);
16175 if (dip == NULL)
16176 return (dip);
16177 prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
16178 "phymask", 0);
16179 *phymask = (mptsas_phymask_t)prop;
16180 ddi_release_devi(dip);
16181 return (dip);
16182 }
16183 static mptsas_target_t *
16184 mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask)
16185 {
16186 uint8_t phynum;
16187 uint64_t wwn;
16188 int lun;
16189 mptsas_target_t *ptgt = NULL;
16190
16191 if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) {
16192 return (NULL);
16193 }
16194 if (addr[0] == 'w') {
16195 ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn);
16196 } else {
16197 ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum);
16198 }
16199 return (ptgt);
16200 }
16201
16202 static int
16203 mptsas_flush_led_status(mptsas_t *mpt, mptsas_target_t *ptgt)
16204 {
16205 uint32_t slotstatus = 0;
16206
16207 /* Build an MPI2 Slot Status based on our view of the world */
16208 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_IDENT - 1)))
16209 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST;
16210 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_FAIL - 1)))
16211 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT;
16212 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1)))
16213 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
16214
16215 /* Write it to the controller */
16216 NDBG14(("mptsas_ioctl: set LED status %x for slot %x",
16217 slotstatus, ptgt->m_slot_num));
16218 return (mptsas_send_sep(mpt, ptgt, &slotstatus,
16219 MPI2_SEP_REQ_ACTION_WRITE_STATUS));
16220 }
16221
16222 /*
16223 * send sep request, use enclosure/slot addressing
16224 */
16225 static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
16226 uint32_t *status, uint8_t act)
16227 {
16228 Mpi2SepRequest_t req;
16229 Mpi2SepReply_t rep;
16230 int ret;
16231
16232 ASSERT(mutex_owned(&mpt->m_mutex));
16233
16234 bzero(&req, sizeof (req));
16235 bzero(&rep, sizeof (rep));
16236
16237 /* Do nothing for RAID volumes */
16238 if (ptgt->m_phymask == 0) {
16239 NDBG14(("mptsas_send_sep: Skip RAID volumes"));
16240 return (DDI_FAILURE);
16241 }
16242
16243 req.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
16244 req.Action = act;
16245 req.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
16246 req.EnclosureHandle = LE_16(ptgt->m_enclosure);
16247 req.Slot = LE_16(ptgt->m_slot_num);
16248 if (act == MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16249 req.SlotStatus = LE_32(*status);
16250 }
16251 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
16252 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
16253 if (ret != 0) {
16254 mptsas_log(mpt, CE_NOTE, "mptsas_send_sep: passthru SEP "
16255 "Processor Request message error %d", ret);
16256 return (DDI_FAILURE);
16257 }
16258 /* do passthrough success, check the ioc status */
16259 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
16260 if ((LE_16(rep.IOCStatus) & MPI2_IOCSTATUS_MASK) ==
16261 MPI2_IOCSTATUS_INVALID_FIELD) {
16262 mptsas_log(mpt, CE_NOTE, "send sep act %x: Not "
16263 "supported action, loginfo %x", act,
16264 LE_32(rep.IOCLogInfo));
16265 return (DDI_FAILURE);
16266 }
16267 mptsas_log(mpt, CE_NOTE, "send_sep act %x: ioc "
16268 "status:%x", act, LE_16(rep.IOCStatus));
16269 return (DDI_FAILURE);
16270 }
16271 if (act != MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16272 *status = LE_32(rep.SlotStatus);
16273 }
16274
16275 return (DDI_SUCCESS);
16276 }
16277
16278 int
16279 mptsas_dma_addr_create(mptsas_t *mpt, ddi_dma_attr_t dma_attr,
16280 ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp, caddr_t *dma_memp,
16281 uint32_t alloc_size, ddi_dma_cookie_t *cookiep)
16282 {
16283 ddi_dma_cookie_t new_cookie;
16284 size_t alloc_len;
16285 uint_t ncookie;
16286
16287 if (cookiep == NULL)
16288 cookiep = &new_cookie;
16289
16290 if (ddi_dma_alloc_handle(mpt->m_dip, &dma_attr, DDI_DMA_SLEEP,
16291 NULL, dma_hdp) != DDI_SUCCESS) {
16292 dma_hdp = NULL;
16293 return (FALSE);
16294 }
16295
16296 if (ddi_dma_mem_alloc(*dma_hdp, alloc_size, &mpt->m_dev_acc_attr,
16297 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, dma_memp, &alloc_len,
16298 acc_hdp) != DDI_SUCCESS) {
16299 ddi_dma_free_handle(dma_hdp);
16300 dma_hdp = NULL;
16301 return (FALSE);
16302 }
16303
16304 if (ddi_dma_addr_bind_handle(*dma_hdp, NULL, *dma_memp, alloc_len,
16305 (DDI_DMA_RDWR | DDI_DMA_CONSISTENT), DDI_DMA_SLEEP, NULL,
16306 cookiep, &ncookie) != DDI_DMA_MAPPED) {
16307 (void) ddi_dma_mem_free(acc_hdp);
16308 ddi_dma_free_handle(dma_hdp);
16309 dma_hdp = NULL;
16310 return (FALSE);
16311 }
16312
16313 return (TRUE);
16314 }
16315
16316 void
16317 mptsas_dma_addr_destroy(ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp)
16318 {
16319 if (*dma_hdp == NULL)
16320 return;
16321
16322 (void) ddi_dma_unbind_handle(*dma_hdp);
16323 (void) ddi_dma_mem_free(acc_hdp);
16324 ddi_dma_free_handle(dma_hdp);
16325 dma_hdp = NULL;
16326 }
16327
16328 static int
16329 mptsas_outstanding_cmds_n(mptsas_t *mpt)
16330 {
16331 int n = 0, i;
16332 for (i = 0; i < mpt->m_slot_freeq_pair_n; i++) {
16333 mutex_enter(&mpt->m_slot_freeq_pairp[i].
16334 m_slot_allocq.s.m_fq_mutex);
16335 mutex_enter(&mpt->m_slot_freeq_pairp[i].
16336 m_slot_releq.s.m_fq_mutex);
16337 n += (mpt->m_slot_freeq_pairp[i].m_slot_allocq.s.m_fq_n_init -
16338 mpt->m_slot_freeq_pairp[i].m_slot_allocq.s.m_fq_n -
16339 mpt->m_slot_freeq_pairp[i].m_slot_releq.s.m_fq_n);
16340 mutex_exit(&mpt->m_slot_freeq_pairp[i].
16341 m_slot_releq.s.m_fq_mutex);
16342 mutex_exit(&mpt->m_slot_freeq_pairp[i].
16343 m_slot_allocq.s.m_fq_mutex);
16344 }
16345 if (mpt->m_max_requests - 2 < n)
16346 panic("mptsas: free slot allocq and releq crazy");
16347 return (n);
16348 }