1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
25 */
26
27 /*
28 * Copyright (c) 2000 to 2010, LSI Corporation.
29 * All rights reserved.
30 *
31 * Redistribution and use in source and binary forms of all code within
32 * this file that is exclusively owned by LSI, with or without
33 * modification, is permitted provided that, in addition to the CDDL 1.0
34 * License requirements, the following conditions are met:
35 *
36 * Neither the name of the author nor the names of its contributors may be
37 * used to endorse or promote products derived from this software without
38 * specific prior written permission.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
43 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
44 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
45 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
46 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
47 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
48 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
49 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
50 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
51 * DAMAGE.
52 */
53
54 /*
55 * mptsas - This is a driver based on LSI Logic's MPT2.0 interface.
56 *
57 */
58
59 #if defined(lint) || defined(DEBUG)
60 #define MPTSAS_DEBUG
61 #endif
62
63 /*
64 * standard header files.
65 */
66 #include <sys/note.h>
67 #include <sys/scsi/scsi.h>
68 #include <sys/pci.h>
69 #include <sys/file.h>
70 #include <sys/policy.h>
71 #include <sys/sysevent.h>
72 #include <sys/sysevent/eventdefs.h>
73 #include <sys/sysevent/dr.h>
74 #include <sys/sata/sata_defs.h>
75 #include <sys/scsi/generic/sas.h>
76 #include <sys/scsi/impl/scsi_sas.h>
77
78 #pragma pack(1)
79 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
80 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
81 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
82 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
83 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
84 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
85 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
86 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
87 #pragma pack()
88
89 /*
90 * private header files.
91 *
92 */
93 #include <sys/scsi/impl/scsi_reset_notify.h>
94 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
95 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
96 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
97 #include <sys/raidioctl.h>
98
99 #include <sys/fs/dv_node.h> /* devfs_clean */
100
101 /*
102 * FMA header files
103 */
104 #include <sys/ddifm.h>
105 #include <sys/fm/protocol.h>
106 #include <sys/fm/util.h>
107 #include <sys/fm/io/ddi.h>
108
109 /*
110 * autoconfiguration data and routines.
111 */
112 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
113 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
114 static int mptsas_power(dev_info_t *dip, int component, int level);
115
116 /*
117 * cb_ops function
118 */
119 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
120 cred_t *credp, int *rval);
121 #ifdef __sparc
122 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd);
123 #else /* __sparc */
124 static int mptsas_quiesce(dev_info_t *devi);
125 #endif /* __sparc */
126
127 /*
128 * Resource initilaization for hardware
129 */
130 static void mptsas_setup_cmd_reg(mptsas_t *mpt);
131 static void mptsas_disable_bus_master(mptsas_t *mpt);
132 static void mptsas_hba_fini(mptsas_t *mpt);
133 static void mptsas_cfg_fini(mptsas_t *mptsas_blkp);
134 static int mptsas_hba_setup(mptsas_t *mpt);
135 static void mptsas_hba_teardown(mptsas_t *mpt);
136 static int mptsas_config_space_init(mptsas_t *mpt);
137 static void mptsas_config_space_fini(mptsas_t *mpt);
138 static void mptsas_iport_register(mptsas_t *mpt);
139 static int mptsas_smp_setup(mptsas_t *mpt);
140 static void mptsas_smp_teardown(mptsas_t *mpt);
141 static int mptsas_cache_create(mptsas_t *mpt);
142 static void mptsas_cache_destroy(mptsas_t *mpt);
143 static int mptsas_alloc_request_frames(mptsas_t *mpt);
144 static int mptsas_alloc_reply_frames(mptsas_t *mpt);
145 static int mptsas_alloc_free_queue(mptsas_t *mpt);
146 static int mptsas_alloc_post_queue(mptsas_t *mpt);
147 static void mptsas_alloc_reply_args(mptsas_t *mpt);
148 static int mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
149 static void mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
150 static int mptsas_init_chip(mptsas_t *mpt, int first_time);
151
152 /*
153 * SCSA function prototypes
154 */
155 static int mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
156 static int mptsas_scsi_reset(struct scsi_address *ap, int level);
157 static int mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
158 static int mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
159 static int mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
160 int tgtonly);
161 static void mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
162 static struct scsi_pkt *mptsas_scsi_init_pkt(struct scsi_address *ap,
163 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
164 int tgtlen, int flags, int (*callback)(), caddr_t arg);
165 static void mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
166 static void mptsas_scsi_destroy_pkt(struct scsi_address *ap,
167 struct scsi_pkt *pkt);
168 static int mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
169 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
170 static void mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
171 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
172 static int mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
173 void (*callback)(caddr_t), caddr_t arg);
174 static int mptsas_get_name(struct scsi_device *sd, char *name, int len);
175 static int mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len);
176 static int mptsas_scsi_quiesce(dev_info_t *dip);
177 static int mptsas_scsi_unquiesce(dev_info_t *dip);
178 static int mptsas_bus_config(dev_info_t *pdip, uint_t flags,
179 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
180
181 /*
182 * SMP functions
183 */
184 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
185
186 /*
187 * internal function prototypes.
188 */
189 static void mptsas_list_add(mptsas_t *mpt);
190 static void mptsas_list_del(mptsas_t *mpt);
191
192 static int mptsas_quiesce_bus(mptsas_t *mpt);
193 static int mptsas_unquiesce_bus(mptsas_t *mpt);
194
195 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
196 static void mptsas_free_handshake_msg(mptsas_t *mpt);
197
198 static void mptsas_ncmds_checkdrain(void *arg);
199
200 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
201 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
202 static int mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
203 static void mptsas_accept_tx_waitq(mptsas_t *mpt);
204
205 static int mptsas_do_detach(dev_info_t *dev);
206 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
207 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
208 struct scsi_pkt *pkt);
209 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp);
210
211 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
212 static void mptsas_handle_event(void *args);
213 static int mptsas_handle_event_sync(void *args);
214 static void mptsas_handle_dr(void *args);
215 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
216 dev_info_t *pdip);
217
218 static void mptsas_restart_cmd(void *);
219
220 static void mptsas_flush_hba(mptsas_t *mpt);
221 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
222 uint8_t tasktype);
223 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
224 uchar_t reason, uint_t stat);
225
226 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
227 static void mptsas_process_intr(mptsas_t *mpt,
228 pMpi2ReplyDescriptorsUnion_t reply_desc_union);
229 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
230 pMpi2ReplyDescriptorsUnion_t reply_desc);
231 static void mptsas_handle_address_reply(mptsas_t *mpt,
232 pMpi2ReplyDescriptorsUnion_t reply_desc);
233 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
234 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
235 uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
236
237 static void mptsas_watch(void *arg);
238 static void mptsas_watchsubr(mptsas_t *mpt);
239 static void mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl);
240 static void mptsas_kill_target(mptsas_t *mpt, mptsas_target_t *ptgt);
241
242 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
243 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
244 uint8_t *data, uint32_t request_size, uint32_t reply_size,
245 uint32_t data_size, uint32_t direction, uint8_t *dataout,
246 uint32_t dataout_size, short timeout, int mode);
247 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
248
249 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt,
250 uint32_t unique_id);
251 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd);
252 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt,
253 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
254 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt,
255 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
256 uint32_t diag_type);
257 static int mptsas_diag_register(mptsas_t *mpt,
258 mptsas_fw_diag_register_t *diag_register, uint32_t *return_code);
259 static int mptsas_diag_unregister(mptsas_t *mpt,
260 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
261 static int mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
262 uint32_t *return_code);
263 static int mptsas_diag_read_buffer(mptsas_t *mpt,
264 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
265 uint32_t *return_code, int ioctl_mode);
266 static int mptsas_diag_release(mptsas_t *mpt,
267 mptsas_fw_diag_release_t *diag_release, uint32_t *return_code);
268 static int mptsas_do_diag_action(mptsas_t *mpt, uint32_t action,
269 uint8_t *diag_action, uint32_t length, uint32_t *return_code,
270 int ioctl_mode);
271 static int mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *data,
272 int mode);
273
274 static int mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
275 int cmdlen, int tgtlen, int statuslen, int kf);
276 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
277
278 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
279 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
280
281 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
282 int kmflags);
283 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
284
285 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
286 mptsas_cmd_t *cmd);
287 static void mptsas_check_task_mgt(mptsas_t *mpt,
288 pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
289 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
290 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
291 int *resid);
292
293 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
294 static void mptsas_free_active_slots(mptsas_t *mpt);
295 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
296
297 static void mptsas_restart_hba(mptsas_t *mpt);
298 static void mptsas_restart_waitq(mptsas_t *mpt);
299
300 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
301 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);
302 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
303
304 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
305 static void mptsas_doneq_empty(mptsas_t *mpt);
306 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
307
308 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
309 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
310 static mptsas_cmd_t *mptsas_tx_waitq_rm(mptsas_t *mpt);
311 static void mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
312
313
314 static void mptsas_start_watch_reset_delay();
315 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
316 static void mptsas_watch_reset_delay(void *arg);
317 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
318
319 /*
320 * helper functions
321 */
322 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
323
324 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
325 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
326 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
327 int lun);
328 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
329 int lun);
330 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
331 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
332
333 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
334 int *lun);
335 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
336
337 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt, int phymask,
338 uint8_t phy);
339 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask,
340 uint64_t wwid);
341 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask,
342 uint64_t wwid);
343
344 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
345 uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
346
347 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
348 uint16_t *handle, mptsas_target_t **pptgt);
349 static void mptsas_update_phymask(mptsas_t *mpt);
350
351 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
352 mptsas_phymask_t *phymask);
353
354
355 /*
356 * Enumeration / DR functions
357 */
358 static void mptsas_config_all(dev_info_t *pdip);
359 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
360 dev_info_t **lundip);
361 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
362 dev_info_t **lundip);
363
364 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
365 static int mptsas_offline_target(dev_info_t *pdip, char *name);
366
367 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
368 dev_info_t **dip);
369
370 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
371 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
372 dev_info_t **dip, mptsas_target_t *ptgt);
373
374 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
375 dev_info_t **dip, mptsas_target_t *ptgt, int lun);
376
377 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
378 char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
379 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
380 char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
381 int lun);
382
383 static void mptsas_offline_missed_luns(dev_info_t *pdip,
384 uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
385 static int mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
386 mdi_pathinfo_t *rpip, uint_t flags);
387
388 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
389 dev_info_t **smp_dip);
390 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
391 uint_t flags);
392
393 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
394 int mode, int *rval);
395 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
396 int mode, int *rval);
397 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
398 int mode, int *rval);
399 static void mptsas_record_event(void *args);
400 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
401 int mode);
402
403 static void mptsas_hash_init(mptsas_hash_table_t *hashtab);
404 static void mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen);
405 static void mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data);
406 static void * mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1,
407 mptsas_phymask_t key2);
408 static void * mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1,
409 mptsas_phymask_t key2);
410 static void * mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos);
411
412 mptsas_target_t *mptsas_tgt_alloc(mptsas_hash_table_t *, uint16_t, uint64_t,
413 uint32_t, mptsas_phymask_t, uint8_t);
414 static mptsas_smp_t *mptsas_smp_alloc(mptsas_hash_table_t *hashtab,
415 mptsas_smp_t *data);
416 static void mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
417 mptsas_phymask_t phymask);
418 static void mptsas_tgt_free(mptsas_hash_table_t *, uint64_t, mptsas_phymask_t);
419 static void * mptsas_search_by_devhdl(mptsas_hash_table_t *, uint16_t);
420 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
421 dev_info_t **smp_dip);
422
423 /*
424 * Power management functions
425 */
426 static int mptsas_get_pci_cap(mptsas_t *mpt);
427 static int mptsas_init_pm(mptsas_t *mpt);
428
429 /*
430 * MPT MSI tunable:
431 *
432 * By default MSI is enabled on all supported platforms.
433 */
434 boolean_t mptsas_enable_msi = B_TRUE;
435 boolean_t mptsas_physical_bind_failed_page_83 = B_FALSE;
436
437 static int mptsas_register_intrs(mptsas_t *);
438 static void mptsas_unregister_intrs(mptsas_t *);
439 static int mptsas_add_intrs(mptsas_t *, int);
440 static void mptsas_rem_intrs(mptsas_t *);
441
442 /*
443 * FMA Prototypes
444 */
445 static void mptsas_fm_init(mptsas_t *mpt);
446 static void mptsas_fm_fini(mptsas_t *mpt);
447 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
448
449 extern pri_t minclsyspri, maxclsyspri;
450
451 /*
452 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
453 * under this device that the paths to a physical device are created when
454 * MPxIO is used.
455 */
456 extern dev_info_t *scsi_vhci_dip;
457
458 /*
459 * Tunable timeout value for Inquiry VPD page 0x83
460 * By default the value is 30 seconds.
461 */
462 int mptsas_inq83_retry_timeout = 30;
463 /*
464 * Maximum number of command timeouts (0 - 255) considered acceptable.
465 */
466 int mptsas_timeout_threshold = 2;
467 /*
468 * Timeouts exceeding threshold within this period are considered excessive.
469 */
470 int mptsas_timeout_interval = 30;
471
472 /*
473 * This is used to allocate memory for message frame storage, not for
474 * data I/O DMA. All message frames must be stored in the first 4G of
475 * physical memory.
476 */
477 ddi_dma_attr_t mptsas_dma_attrs = {
478 DMA_ATTR_V0, /* attribute layout version */
479 0x0ull, /* address low - should be 0 (longlong) */
480 0xffffffffull, /* address high - 32-bit max range */
481 0x00ffffffull, /* count max - max DMA object size */
482 4, /* allocation alignment requirements */
483 0x78, /* burstsizes - binary encoded values */
484 1, /* minxfer - gran. of DMA engine */
485 0x00ffffffull, /* maxxfer - gran. of DMA engine */
486 0xffffffffull, /* max segment size (DMA boundary) */
487 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
488 512, /* granularity - device transfer size */
489 0 /* flags, set to 0 */
490 };
491
492 /*
493 * This is used for data I/O DMA memory allocation. (full 64-bit DMA
494 * physical addresses are supported.)
495 */
496 ddi_dma_attr_t mptsas_dma_attrs64 = {
497 DMA_ATTR_V0, /* attribute layout version */
498 0x0ull, /* address low - should be 0 (longlong) */
499 0xffffffffffffffffull, /* address high - 64-bit max */
500 0x00ffffffull, /* count max - max DMA object size */
501 4, /* allocation alignment requirements */
502 0x78, /* burstsizes - binary encoded values */
503 1, /* minxfer - gran. of DMA engine */
504 0x00ffffffull, /* maxxfer - gran. of DMA engine */
505 0xffffffffull, /* max segment size (DMA boundary) */
506 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
507 512, /* granularity - device transfer size */
508 DDI_DMA_RELAXED_ORDERING /* flags, enable relaxed ordering */
509 };
510
511 ddi_device_acc_attr_t mptsas_dev_attr = {
512 DDI_DEVICE_ATTR_V1,
513 DDI_STRUCTURE_LE_ACC,
514 DDI_STRICTORDER_ACC,
515 DDI_DEFAULT_ACC
516 };
517
518 static struct cb_ops mptsas_cb_ops = {
519 scsi_hba_open, /* open */
520 scsi_hba_close, /* close */
521 nodev, /* strategy */
522 nodev, /* print */
523 nodev, /* dump */
524 nodev, /* read */
525 nodev, /* write */
526 mptsas_ioctl, /* ioctl */
527 nodev, /* devmap */
528 nodev, /* mmap */
529 nodev, /* segmap */
530 nochpoll, /* chpoll */
531 ddi_prop_op, /* cb_prop_op */
532 NULL, /* streamtab */
533 D_MP, /* cb_flag */
534 CB_REV, /* rev */
535 nodev, /* aread */
536 nodev /* awrite */
537 };
538
539 static struct dev_ops mptsas_ops = {
540 DEVO_REV, /* devo_rev, */
541 0, /* refcnt */
542 ddi_no_info, /* info */
543 nulldev, /* identify */
544 nulldev, /* probe */
545 mptsas_attach, /* attach */
546 mptsas_detach, /* detach */
547 #ifdef __sparc
548 mptsas_reset,
549 #else
550 nodev, /* reset */
551 #endif /* __sparc */
552 &mptsas_cb_ops, /* driver operations */
553 NULL, /* bus operations */
554 mptsas_power, /* power management */
555 #ifdef __sparc
556 ddi_quiesce_not_needed
557 #else
558 mptsas_quiesce /* quiesce */
559 #endif /* __sparc */
560 };
561
562
563 #define MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24"
564
565 static struct modldrv modldrv = {
566 &mod_driverops, /* Type of module. This one is a driver */
567 MPTSAS_MOD_STRING, /* Name of the module. */
568 &mptsas_ops, /* driver ops */
569 };
570
571 static struct modlinkage modlinkage = {
572 MODREV_1, &modldrv, NULL
573 };
574 #define TARGET_PROP "target"
575 #define LUN_PROP "lun"
576 #define LUN64_PROP "lun64"
577 #define SAS_PROP "sas-mpt"
578 #define MDI_GUID "wwn"
579 #define NDI_GUID "guid"
580 #define MPTSAS_DEV_GONE "mptsas_dev_gone"
581
582 /*
583 * Local static data
584 */
585 #if defined(MPTSAS_DEBUG)
586 uint32_t mptsas_debug_flags = 0;
587 #endif /* defined(MPTSAS_DEBUG) */
588 uint32_t mptsas_debug_resets = 0;
589
590 static kmutex_t mptsas_global_mutex;
591 static void *mptsas_state; /* soft state ptr */
592 static krwlock_t mptsas_global_rwlock;
593
594 static kmutex_t mptsas_log_mutex;
595 static char mptsas_log_buf[256];
596 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex, mptsas_log_buf))
597
598 static mptsas_t *mptsas_head, *mptsas_tail;
599 static clock_t mptsas_scsi_watchdog_tick;
600 static clock_t mptsas_tick;
601 static timeout_id_t mptsas_reset_watch;
602 static timeout_id_t mptsas_timeout_id;
603 static int mptsas_timeouts_enabled = 0;
604 /*
605 * warlock directives
606 */
607 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \
608 mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status))
609 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt))
610 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
611 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private))
612 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private))
613
614 /*
615 * SM - HBA statics
616 */
617 char *mptsas_driver_rev = MPTSAS_MOD_STRING;
618
619 #ifdef MPTSAS_DEBUG
620 void debug_enter(char *);
621 #endif
622
623 /*
624 * Notes:
625 * - scsi_hba_init(9F) initializes SCSI HBA modules
626 * - must call scsi_hba_fini(9F) if modload() fails
627 */
628 int
629 _init(void)
630 {
631 int status;
632 /* CONSTCOND */
633 ASSERT(NO_COMPETING_THREADS);
634
635 NDBG0(("_init"));
636
637 status = ddi_soft_state_init(&mptsas_state, MPTSAS_SIZE,
638 MPTSAS_INITIAL_SOFT_SPACE);
639 if (status != 0) {
640 return (status);
641 }
642
643 if ((status = scsi_hba_init(&modlinkage)) != 0) {
644 ddi_soft_state_fini(&mptsas_state);
645 return (status);
646 }
647
648 mutex_init(&mptsas_global_mutex, NULL, MUTEX_DRIVER, NULL);
649 rw_init(&mptsas_global_rwlock, NULL, RW_DRIVER, NULL);
650 mutex_init(&mptsas_log_mutex, NULL, MUTEX_DRIVER, NULL);
651
652 if ((status = mod_install(&modlinkage)) != 0) {
653 mutex_destroy(&mptsas_log_mutex);
654 rw_destroy(&mptsas_global_rwlock);
655 mutex_destroy(&mptsas_global_mutex);
656 ddi_soft_state_fini(&mptsas_state);
657 scsi_hba_fini(&modlinkage);
658 }
659
660 return (status);
661 }
662
663 /*
664 * Notes:
665 * - scsi_hba_fini(9F) uninitializes SCSI HBA modules
666 */
667 int
668 _fini(void)
669 {
670 int status;
671 /* CONSTCOND */
672 ASSERT(NO_COMPETING_THREADS);
673
674 NDBG0(("_fini"));
675
676 if ((status = mod_remove(&modlinkage)) == 0) {
677 ddi_soft_state_fini(&mptsas_state);
678 scsi_hba_fini(&modlinkage);
679 mutex_destroy(&mptsas_global_mutex);
680 rw_destroy(&mptsas_global_rwlock);
681 mutex_destroy(&mptsas_log_mutex);
682 }
683 return (status);
684 }
685
686 /*
687 * The loadable-module _info(9E) entry point
688 */
689 int
690 _info(struct modinfo *modinfop)
691 {
692 /* CONSTCOND */
693 ASSERT(NO_COMPETING_THREADS);
694 NDBG0(("mptsas _info"));
695
696 return (mod_info(&modlinkage, modinfop));
697 }
698
699
700 static int
701 mptsas_iport_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
702 {
703 dev_info_t *pdip;
704 mptsas_t *mpt;
705 scsi_hba_tran_t *hba_tran;
706 char *iport = NULL;
707 char phymask[MPTSAS_MAX_PHYS];
708 mptsas_phymask_t phy_mask = 0;
709 int dynamic_port = 0;
710 uint32_t page_address;
711 char initiator_wwnstr[MPTSAS_WWN_STRLEN];
712 int rval = DDI_FAILURE;
713 int i = 0;
714 uint8_t numphys = 0;
715 uint8_t phy_id;
716 uint8_t phy_port = 0;
717 uint16_t attached_devhdl = 0;
718 uint32_t dev_info;
719 uint64_t attached_sas_wwn;
720 uint16_t dev_hdl;
721 uint16_t pdev_hdl;
722 uint16_t bay_num, enclosure;
723 char attached_wwnstr[MPTSAS_WWN_STRLEN];
724
725 /* CONSTCOND */
726 ASSERT(NO_COMPETING_THREADS);
727
728 switch (cmd) {
729 case DDI_ATTACH:
730 break;
731
732 case DDI_RESUME:
733 /*
734 * If this a scsi-iport node, nothing to do here.
735 */
736 return (DDI_SUCCESS);
737
738 default:
739 return (DDI_FAILURE);
740 }
741
742 pdip = ddi_get_parent(dip);
743
744 if ((hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SCSI_DEVICE)) ==
745 NULL) {
746 cmn_err(CE_WARN, "Failed attach iport because fail to "
747 "get tran vector for the HBA node");
748 return (DDI_FAILURE);
749 }
750
751 mpt = TRAN2MPT(hba_tran);
752 ASSERT(mpt != NULL);
753 if (mpt == NULL)
754 return (DDI_FAILURE);
755
756 if ((hba_tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) ==
757 NULL) {
758 mptsas_log(mpt, CE_WARN, "Failed attach iport because fail to "
759 "get tran vector for the iport node");
760 return (DDI_FAILURE);
761 }
762
763 /*
764 * Overwrite parent's tran_hba_private to iport's tran vector
765 */
766 hba_tran->tran_hba_private = mpt;
767
768 ddi_report_dev(dip);
769
770 /*
771 * Get SAS address for initiator port according dev_handle
772 */
773 iport = ddi_get_name_addr(dip);
774 if (iport && strncmp(iport, "v0", 2) == 0) {
775 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
776 MPTSAS_VIRTUAL_PORT, 1) !=
777 DDI_PROP_SUCCESS) {
778 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
779 MPTSAS_VIRTUAL_PORT);
780 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
781 "prop update failed");
782 return (DDI_FAILURE);
783 }
784 return (DDI_SUCCESS);
785 }
786
787 mutex_enter(&mpt->m_mutex);
788 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
789 bzero(phymask, sizeof (phymask));
790 (void) sprintf(phymask,
791 "%x", mpt->m_phy_info[i].phy_mask);
792 if (strcmp(phymask, iport) == 0) {
793 break;
794 }
795 }
796
797 if (i == MPTSAS_MAX_PHYS) {
798 mptsas_log(mpt, CE_WARN, "Failed attach port %s because port"
799 "seems not exist", iport);
800 mutex_exit(&mpt->m_mutex);
801 return (DDI_FAILURE);
802 }
803
804 phy_mask = mpt->m_phy_info[i].phy_mask;
805
806 if (mpt->m_phy_info[i].port_flags & AUTO_PORT_CONFIGURATION)
807 dynamic_port = 1;
808 else
809 dynamic_port = 0;
810
811 /*
812 * Update PHY info for smhba
813 */
814 if (mptsas_smhba_phy_init(mpt)) {
815 mutex_exit(&mpt->m_mutex);
816 mptsas_log(mpt, CE_WARN, "mptsas phy update "
817 "failed");
818 return (DDI_FAILURE);
819 }
820
821 mutex_exit(&mpt->m_mutex);
822
823 numphys = 0;
824 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
825 if ((phy_mask >> i) & 0x01) {
826 numphys++;
827 }
828 }
829
830 bzero(initiator_wwnstr, sizeof (initiator_wwnstr));
831 (void) sprintf(initiator_wwnstr, "w%016"PRIx64,
832 mpt->un.m_base_wwid);
833
834 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
835 SCSI_ADDR_PROP_INITIATOR_PORT, initiator_wwnstr) !=
836 DDI_PROP_SUCCESS) {
837 (void) ddi_prop_remove(DDI_DEV_T_NONE,
838 dip, SCSI_ADDR_PROP_INITIATOR_PORT);
839 mptsas_log(mpt, CE_WARN, "mptsas Initiator port "
840 "prop update failed");
841 return (DDI_FAILURE);
842 }
843 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
844 MPTSAS_NUM_PHYS, numphys) !=
845 DDI_PROP_SUCCESS) {
846 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, MPTSAS_NUM_PHYS);
847 return (DDI_FAILURE);
848 }
849
850 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
851 "phymask", phy_mask) !=
852 DDI_PROP_SUCCESS) {
853 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "phymask");
854 mptsas_log(mpt, CE_WARN, "mptsas phy mask "
855 "prop update failed");
856 return (DDI_FAILURE);
857 }
858
859 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
860 "dynamic-port", dynamic_port) !=
861 DDI_PROP_SUCCESS) {
862 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "dynamic-port");
863 mptsas_log(mpt, CE_WARN, "mptsas dynamic port "
864 "prop update failed");
865 return (DDI_FAILURE);
866 }
867 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
868 MPTSAS_VIRTUAL_PORT, 0) !=
869 DDI_PROP_SUCCESS) {
870 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
871 MPTSAS_VIRTUAL_PORT);
872 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
873 "prop update failed");
874 return (DDI_FAILURE);
875 }
876 mptsas_smhba_set_phy_props(mpt,
877 iport, dip, numphys, &attached_devhdl);
878
879 mutex_enter(&mpt->m_mutex);
880 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
881 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)attached_devhdl;
882 rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
883 &attached_sas_wwn, &dev_info, &phy_port, &phy_id,
884 &pdev_hdl, &bay_num, &enclosure);
885 if (rval != DDI_SUCCESS) {
886 mptsas_log(mpt, CE_WARN,
887 "Failed to get device page0 for handle:%d",
888 attached_devhdl);
889 mutex_exit(&mpt->m_mutex);
890 return (DDI_FAILURE);
891 }
892
893 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
894 bzero(phymask, sizeof (phymask));
895 (void) sprintf(phymask, "%x", mpt->m_phy_info[i].phy_mask);
896 if (strcmp(phymask, iport) == 0) {
897 (void) sprintf(&mpt->m_phy_info[i].smhba_info.path[0],
898 "%x",
899 mpt->m_phy_info[i].phy_mask);
900 }
901 }
902 mutex_exit(&mpt->m_mutex);
903
904 bzero(attached_wwnstr, sizeof (attached_wwnstr));
905 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
906 attached_sas_wwn);
907 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
908 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
909 DDI_PROP_SUCCESS) {
910 (void) ddi_prop_remove(DDI_DEV_T_NONE,
911 dip, SCSI_ADDR_PROP_ATTACHED_PORT);
912 return (DDI_FAILURE);
913 }
914
915 /* Create kstats for each phy on this iport */
916
917 mptsas_create_phy_stats(mpt, iport, dip);
918
919 /*
920 * register sas hba iport with mdi (MPxIO/vhci)
921 */
922 if (mdi_phci_register(MDI_HCI_CLASS_SCSI,
923 dip, 0) == MDI_SUCCESS) {
924 mpt->m_mpxio_enable = TRUE;
925 }
926 return (DDI_SUCCESS);
927 }
928
929 /*
930 * Notes:
931 * Set up all device state and allocate data structures,
932 * mutexes, condition variables, etc. for device operation.
933 * Add interrupts needed.
934 * Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
935 */
936 static int
937 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
938 {
939 mptsas_t *mpt = NULL;
940 int instance, i, j;
941 int doneq_thread_num;
942 char intr_added = 0;
943 char map_setup = 0;
944 char config_setup = 0;
945 char hba_attach_setup = 0;
946 char smp_attach_setup = 0;
947 char mutex_init_done = 0;
948 char event_taskq_create = 0;
949 char dr_taskq_create = 0;
950 char doneq_thread_create = 0;
951 scsi_hba_tran_t *hba_tran;
952 uint_t mem_bar = MEM_SPACE;
953 int rval = DDI_FAILURE;
954
955 /* CONSTCOND */
956 ASSERT(NO_COMPETING_THREADS);
957
958 if (scsi_hba_iport_unit_address(dip)) {
959 return (mptsas_iport_attach(dip, cmd));
960 }
961
962 switch (cmd) {
963 case DDI_ATTACH:
964 break;
965
966 case DDI_RESUME:
967 if ((hba_tran = ddi_get_driver_private(dip)) == NULL)
968 return (DDI_FAILURE);
969
970 mpt = TRAN2MPT(hba_tran);
971
972 if (!mpt) {
973 return (DDI_FAILURE);
974 }
975
976 /*
977 * Reset hardware and softc to "no outstanding commands"
978 * Note that a check condition can result on first command
979 * to a target.
980 */
981 mutex_enter(&mpt->m_mutex);
982
983 /*
984 * raise power.
985 */
986 if (mpt->m_options & MPTSAS_OPT_PM) {
987 mutex_exit(&mpt->m_mutex);
988 (void) pm_busy_component(dip, 0);
989 rval = pm_power_has_changed(dip, 0, PM_LEVEL_D0);
990 if (rval == DDI_SUCCESS) {
991 mutex_enter(&mpt->m_mutex);
992 } else {
993 /*
994 * The pm_raise_power() call above failed,
995 * and that can only occur if we were unable
996 * to reset the hardware. This is probably
997 * due to unhealty hardware, and because
998 * important filesystems(such as the root
999 * filesystem) could be on the attached disks,
1000 * it would not be a good idea to continue,
1001 * as we won't be entirely certain we are
1002 * writing correct data. So we panic() here
1003 * to not only prevent possible data corruption,
1004 * but to give developers or end users a hope
1005 * of identifying and correcting any problems.
1006 */
1007 fm_panic("mptsas could not reset hardware "
1008 "during resume");
1009 }
1010 }
1011
1012 mpt->m_suspended = 0;
1013
1014 /*
1015 * Reinitialize ioc
1016 */
1017 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1018 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
1019 mutex_exit(&mpt->m_mutex);
1020 if (mpt->m_options & MPTSAS_OPT_PM) {
1021 (void) pm_idle_component(dip, 0);
1022 }
1023 fm_panic("mptsas init chip fail during resume");
1024 }
1025 /*
1026 * mptsas_update_driver_data needs interrupts so enable them
1027 * first.
1028 */
1029 MPTSAS_ENABLE_INTR(mpt);
1030 mptsas_update_driver_data(mpt);
1031
1032 /* start requests, if possible */
1033 mptsas_restart_hba(mpt);
1034
1035 mutex_exit(&mpt->m_mutex);
1036
1037 /*
1038 * Restart watch thread
1039 */
1040 mutex_enter(&mptsas_global_mutex);
1041 if (mptsas_timeout_id == 0) {
1042 mptsas_timeout_id = timeout(mptsas_watch, NULL,
1043 mptsas_tick);
1044 mptsas_timeouts_enabled = 1;
1045 }
1046 mutex_exit(&mptsas_global_mutex);
1047
1048 /* report idle status to pm framework */
1049 if (mpt->m_options & MPTSAS_OPT_PM) {
1050 (void) pm_idle_component(dip, 0);
1051 }
1052
1053 return (DDI_SUCCESS);
1054
1055 default:
1056 return (DDI_FAILURE);
1057
1058 }
1059
1060 instance = ddi_get_instance(dip);
1061
1062 /*
1063 * Allocate softc information.
1064 */
1065 if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
1066 mptsas_log(NULL, CE_WARN,
1067 "mptsas%d: cannot allocate soft state", instance);
1068 goto fail;
1069 }
1070
1071 mpt = ddi_get_soft_state(mptsas_state, instance);
1072
1073 if (mpt == NULL) {
1074 mptsas_log(NULL, CE_WARN,
1075 "mptsas%d: cannot get soft state", instance);
1076 goto fail;
1077 }
1078
1079 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
1080 scsi_size_clean(dip);
1081
1082 mpt->m_dip = dip;
1083 mpt->m_instance = instance;
1084
1085 /* Make a per-instance copy of the structures */
1086 mpt->m_io_dma_attr = mptsas_dma_attrs64;
1087 mpt->m_msg_dma_attr = mptsas_dma_attrs;
1088 mpt->m_reg_acc_attr = mptsas_dev_attr;
1089 mpt->m_dev_acc_attr = mptsas_dev_attr;
1090
1091 /*
1092 * Initialize FMA
1093 */
1094 mpt->m_fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, mpt->m_dip,
1095 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
1096 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1097 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1098
1099 mptsas_fm_init(mpt);
1100
1101 if (mptsas_alloc_handshake_msg(mpt,
1102 sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) {
1103 mptsas_log(mpt, CE_WARN, "cannot initialize handshake msg.");
1104 goto fail;
1105 }
1106
1107 /*
1108 * Setup configuration space
1109 */
1110 if (mptsas_config_space_init(mpt) == FALSE) {
1111 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init failed");
1112 goto fail;
1113 }
1114 config_setup++;
1115
1116 if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&mpt->m_reg,
1117 0, 0, &mpt->m_reg_acc_attr, &mpt->m_datap) != DDI_SUCCESS) {
1118 mptsas_log(mpt, CE_WARN, "map setup failed");
1119 goto fail;
1120 }
1121 map_setup++;
1122
1123 /*
1124 * A taskq is created for dealing with the event handler
1125 */
1126 if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
1127 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1128 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
1129 goto fail;
1130 }
1131 event_taskq_create++;
1132
1133 /*
1134 * A taskq is created for dealing with dr events
1135 */
1136 if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1137 "mptsas_dr_taskq",
1138 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1139 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1140 "failed");
1141 goto fail;
1142 }
1143 dr_taskq_create++;
1144
1145 mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1146 0, "mptsas_doneq_thread_threshold_prop", 10);
1147 mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1148 0, "mptsas_doneq_length_threshold_prop", 8);
1149 mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1150 0, "mptsas_doneq_thread_n_prop", 8);
1151
1152 if (mpt->m_doneq_thread_n) {
1153 cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1154 mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1155
1156 mutex_enter(&mpt->m_doneq_mutex);
1157 mpt->m_doneq_thread_id =
1158 kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1159 * mpt->m_doneq_thread_n, KM_SLEEP);
1160
1161 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1162 cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1163 CV_DRIVER, NULL);
1164 mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1165 MUTEX_DRIVER, NULL);
1166 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1167 mpt->m_doneq_thread_id[j].flag |=
1168 MPTSAS_DONEQ_THREAD_ACTIVE;
1169 mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1170 mpt->m_doneq_thread_id[j].arg.t = j;
1171 mpt->m_doneq_thread_id[j].threadp =
1172 thread_create(NULL, 0, mptsas_doneq_thread,
1173 &mpt->m_doneq_thread_id[j].arg,
1174 0, &p0, TS_RUN, minclsyspri);
1175 mpt->m_doneq_thread_id[j].donetail =
1176 &mpt->m_doneq_thread_id[j].doneq;
1177 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1178 }
1179 mutex_exit(&mpt->m_doneq_mutex);
1180 doneq_thread_create++;
1181 }
1182
1183 /* Initialize mutex used in interrupt handler */
1184 mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1185 DDI_INTR_PRI(mpt->m_intr_pri));
1186 mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1187 mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1188 DDI_INTR_PRI(mpt->m_intr_pri));
1189 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1190 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1191 NULL, MUTEX_DRIVER,
1192 DDI_INTR_PRI(mpt->m_intr_pri));
1193 }
1194
1195 cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1196 cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1197 cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1198 cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1199 cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1200 mutex_init_done++;
1201
1202 /*
1203 * Disable hardware interrupt since we're not ready to
1204 * handle it yet.
1205 */
1206 MPTSAS_DISABLE_INTR(mpt);
1207 if (mptsas_register_intrs(mpt) == FALSE)
1208 goto fail;
1209 intr_added++;
1210
1211 mutex_enter(&mpt->m_mutex);
1212 /*
1213 * Initialize power management component
1214 */
1215 if (mpt->m_options & MPTSAS_OPT_PM) {
1216 if (mptsas_init_pm(mpt)) {
1217 mutex_exit(&mpt->m_mutex);
1218 mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1219 "failed");
1220 goto fail;
1221 }
1222 }
1223
1224 /*
1225 * Initialize chip using Message Unit Reset, if allowed
1226 */
1227 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1228 if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1229 mutex_exit(&mpt->m_mutex);
1230 mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1231 goto fail;
1232 }
1233
1234 /*
1235 * Fill in the phy_info structure and get the base WWID
1236 */
1237 if (mptsas_get_manufacture_page5(mpt) == DDI_FAILURE) {
1238 mptsas_log(mpt, CE_WARN,
1239 "mptsas_get_manufacture_page5 failed!");
1240 goto fail;
1241 }
1242
1243 if (mptsas_get_sas_io_unit_page_hndshk(mpt)) {
1244 mptsas_log(mpt, CE_WARN,
1245 "mptsas_get_sas_io_unit_page_hndshk failed!");
1246 goto fail;
1247 }
1248
1249 if (mptsas_get_manufacture_page0(mpt) == DDI_FAILURE) {
1250 mptsas_log(mpt, CE_WARN,
1251 "mptsas_get_manufacture_page0 failed!");
1252 goto fail;
1253 }
1254
1255 mutex_exit(&mpt->m_mutex);
1256
1257 /*
1258 * Register the iport for multiple port HBA
1259 */
1260 mptsas_iport_register(mpt);
1261
1262 /*
1263 * initialize SCSI HBA transport structure
1264 */
1265 if (mptsas_hba_setup(mpt) == FALSE)
1266 goto fail;
1267 hba_attach_setup++;
1268
1269 if (mptsas_smp_setup(mpt) == FALSE)
1270 goto fail;
1271 smp_attach_setup++;
1272
1273 if (mptsas_cache_create(mpt) == FALSE)
1274 goto fail;
1275
1276 mpt->m_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1277 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
1278 if (mpt->m_scsi_reset_delay == 0) {
1279 mptsas_log(mpt, CE_NOTE,
1280 "scsi_reset_delay of 0 is not recommended,"
1281 " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1282 mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1283 }
1284
1285 /*
1286 * Initialize the wait and done FIFO queue
1287 */
1288 mpt->m_donetail = &mpt->m_doneq;
1289 mpt->m_waitqtail = &mpt->m_waitq;
1290 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
1291 mpt->m_tx_draining = 0;
1292
1293 /*
1294 * ioc cmd queue initialize
1295 */
1296 mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1297 mpt->m_dev_handle = 0xFFFF;
1298
1299 MPTSAS_ENABLE_INTR(mpt);
1300
1301 /*
1302 * enable event notification
1303 */
1304 mutex_enter(&mpt->m_mutex);
1305 if (mptsas_ioc_enable_event_notification(mpt)) {
1306 mutex_exit(&mpt->m_mutex);
1307 goto fail;
1308 }
1309 mutex_exit(&mpt->m_mutex);
1310
1311 /*
1312 * Initialize PHY info for smhba
1313 */
1314 if (mptsas_smhba_setup(mpt)) {
1315 mptsas_log(mpt, CE_WARN, "mptsas phy initialization "
1316 "failed");
1317 goto fail;
1318 }
1319
1320 /* Check all dma handles allocated in attach */
1321 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl)
1322 != DDI_SUCCESS) ||
1323 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl)
1324 != DDI_SUCCESS) ||
1325 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl)
1326 != DDI_SUCCESS) ||
1327 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl)
1328 != DDI_SUCCESS) ||
1329 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl)
1330 != DDI_SUCCESS)) {
1331 goto fail;
1332 }
1333
1334 /* Check all acc handles allocated in attach */
1335 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
1336 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl)
1337 != DDI_SUCCESS) ||
1338 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl)
1339 != DDI_SUCCESS) ||
1340 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl)
1341 != DDI_SUCCESS) ||
1342 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl)
1343 != DDI_SUCCESS) ||
1344 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl)
1345 != DDI_SUCCESS) ||
1346 (mptsas_check_acc_handle(mpt->m_config_handle)
1347 != DDI_SUCCESS)) {
1348 goto fail;
1349 }
1350
1351 /*
1352 * After this point, we are not going to fail the attach.
1353 */
1354 /*
1355 * used for mptsas_watch
1356 */
1357 mptsas_list_add(mpt);
1358
1359 mutex_enter(&mptsas_global_mutex);
1360 if (mptsas_timeouts_enabled == 0) {
1361 mptsas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
1362 dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
1363
1364 mptsas_tick = mptsas_scsi_watchdog_tick *
1365 drv_usectohz((clock_t)1000000);
1366
1367 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
1368 mptsas_timeouts_enabled = 1;
1369 }
1370 mutex_exit(&mptsas_global_mutex);
1371
1372 /* Print message of HBA present */
1373 ddi_report_dev(dip);
1374
1375 /* report idle status to pm framework */
1376 if (mpt->m_options & MPTSAS_OPT_PM) {
1377 (void) pm_idle_component(dip, 0);
1378 }
1379
1380 return (DDI_SUCCESS);
1381
1382 fail:
1383 mptsas_log(mpt, CE_WARN, "attach failed");
1384 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
1385 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
1386 if (mpt) {
1387 mutex_enter(&mptsas_global_mutex);
1388
1389 if (mptsas_timeout_id && (mptsas_head == NULL)) {
1390 timeout_id_t tid = mptsas_timeout_id;
1391 mptsas_timeouts_enabled = 0;
1392 mptsas_timeout_id = 0;
1393 mutex_exit(&mptsas_global_mutex);
1394 (void) untimeout(tid);
1395 mutex_enter(&mptsas_global_mutex);
1396 }
1397 mutex_exit(&mptsas_global_mutex);
1398 /* deallocate in reverse order */
1399 mptsas_cache_destroy(mpt);
1400
1401 if (smp_attach_setup) {
1402 mptsas_smp_teardown(mpt);
1403 }
1404 if (hba_attach_setup) {
1405 mptsas_hba_teardown(mpt);
1406 }
1407
1408 if (mpt->m_active) {
1409 mptsas_hash_uninit(&mpt->m_active->m_smptbl,
1410 sizeof (mptsas_smp_t));
1411 mptsas_hash_uninit(&mpt->m_active->m_tgttbl,
1412 sizeof (mptsas_target_t));
1413 mptsas_free_active_slots(mpt);
1414 }
1415 if (intr_added) {
1416 mptsas_unregister_intrs(mpt);
1417 }
1418
1419 if (doneq_thread_create) {
1420 mutex_enter(&mpt->m_doneq_mutex);
1421 doneq_thread_num = mpt->m_doneq_thread_n;
1422 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1423 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1424 mpt->m_doneq_thread_id[j].flag &=
1425 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1426 cv_signal(&mpt->m_doneq_thread_id[j].cv);
1427 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1428 }
1429 while (mpt->m_doneq_thread_n) {
1430 cv_wait(&mpt->m_doneq_thread_cv,
1431 &mpt->m_doneq_mutex);
1432 }
1433 for (j = 0; j < doneq_thread_num; j++) {
1434 cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1435 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1436 }
1437 kmem_free(mpt->m_doneq_thread_id,
1438 sizeof (mptsas_doneq_thread_list_t)
1439 * doneq_thread_num);
1440 mutex_exit(&mpt->m_doneq_mutex);
1441 cv_destroy(&mpt->m_doneq_thread_cv);
1442 mutex_destroy(&mpt->m_doneq_mutex);
1443 }
1444 if (event_taskq_create) {
1445 ddi_taskq_destroy(mpt->m_event_taskq);
1446 }
1447 if (dr_taskq_create) {
1448 ddi_taskq_destroy(mpt->m_dr_taskq);
1449 }
1450 if (mutex_init_done) {
1451 mutex_destroy(&mpt->m_tx_waitq_mutex);
1452 mutex_destroy(&mpt->m_passthru_mutex);
1453 mutex_destroy(&mpt->m_mutex);
1454 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1455 mutex_destroy(
1456 &mpt->m_phy_info[i].smhba_info.phy_mutex);
1457 }
1458 cv_destroy(&mpt->m_cv);
1459 cv_destroy(&mpt->m_passthru_cv);
1460 cv_destroy(&mpt->m_fw_cv);
1461 cv_destroy(&mpt->m_config_cv);
1462 cv_destroy(&mpt->m_fw_diag_cv);
1463 }
1464
1465 if (map_setup) {
1466 mptsas_cfg_fini(mpt);
1467 }
1468 if (config_setup) {
1469 mptsas_config_space_fini(mpt);
1470 }
1471 mptsas_free_handshake_msg(mpt);
1472 mptsas_hba_fini(mpt);
1473
1474 mptsas_fm_fini(mpt);
1475 ddi_soft_state_free(mptsas_state, instance);
1476 ddi_prop_remove_all(dip);
1477 }
1478 return (DDI_FAILURE);
1479 }
1480
1481 static int
1482 mptsas_suspend(dev_info_t *devi)
1483 {
1484 mptsas_t *mpt, *g;
1485 scsi_hba_tran_t *tran;
1486
1487 if (scsi_hba_iport_unit_address(devi)) {
1488 return (DDI_SUCCESS);
1489 }
1490
1491 if ((tran = ddi_get_driver_private(devi)) == NULL)
1492 return (DDI_SUCCESS);
1493
1494 mpt = TRAN2MPT(tran);
1495 if (!mpt) {
1496 return (DDI_SUCCESS);
1497 }
1498
1499 mutex_enter(&mpt->m_mutex);
1500
1501 if (mpt->m_suspended++) {
1502 mutex_exit(&mpt->m_mutex);
1503 return (DDI_SUCCESS);
1504 }
1505
1506 /*
1507 * Cancel timeout threads for this mpt
1508 */
1509 if (mpt->m_quiesce_timeid) {
1510 timeout_id_t tid = mpt->m_quiesce_timeid;
1511 mpt->m_quiesce_timeid = 0;
1512 mutex_exit(&mpt->m_mutex);
1513 (void) untimeout(tid);
1514 mutex_enter(&mpt->m_mutex);
1515 }
1516
1517 if (mpt->m_restart_cmd_timeid) {
1518 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1519 mpt->m_restart_cmd_timeid = 0;
1520 mutex_exit(&mpt->m_mutex);
1521 (void) untimeout(tid);
1522 mutex_enter(&mpt->m_mutex);
1523 }
1524
1525 mutex_exit(&mpt->m_mutex);
1526
1527 (void) pm_idle_component(mpt->m_dip, 0);
1528
1529 /*
1530 * Cancel watch threads if all mpts suspended
1531 */
1532 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1533 for (g = mptsas_head; g != NULL; g = g->m_next) {
1534 if (!g->m_suspended)
1535 break;
1536 }
1537 rw_exit(&mptsas_global_rwlock);
1538
1539 mutex_enter(&mptsas_global_mutex);
1540 if (g == NULL) {
1541 timeout_id_t tid;
1542
1543 mptsas_timeouts_enabled = 0;
1544 if (mptsas_timeout_id) {
1545 tid = mptsas_timeout_id;
1546 mptsas_timeout_id = 0;
1547 mutex_exit(&mptsas_global_mutex);
1548 (void) untimeout(tid);
1549 mutex_enter(&mptsas_global_mutex);
1550 }
1551 if (mptsas_reset_watch) {
1552 tid = mptsas_reset_watch;
1553 mptsas_reset_watch = 0;
1554 mutex_exit(&mptsas_global_mutex);
1555 (void) untimeout(tid);
1556 mutex_enter(&mptsas_global_mutex);
1557 }
1558 }
1559 mutex_exit(&mptsas_global_mutex);
1560
1561 mutex_enter(&mpt->m_mutex);
1562
1563 /*
1564 * If this mpt is not in full power(PM_LEVEL_D0), just return.
1565 */
1566 if ((mpt->m_options & MPTSAS_OPT_PM) &&
1567 (mpt->m_power_level != PM_LEVEL_D0)) {
1568 mutex_exit(&mpt->m_mutex);
1569 return (DDI_SUCCESS);
1570 }
1571
1572 /* Disable HBA interrupts in hardware */
1573 MPTSAS_DISABLE_INTR(mpt);
1574 /*
1575 * Send RAID action system shutdown to sync IR
1576 */
1577 mptsas_raid_action_system_shutdown(mpt);
1578
1579 mutex_exit(&mpt->m_mutex);
1580
1581 /* drain the taskq */
1582 ddi_taskq_wait(mpt->m_event_taskq);
1583 ddi_taskq_wait(mpt->m_dr_taskq);
1584
1585 return (DDI_SUCCESS);
1586 }
1587
1588 #ifdef __sparc
1589 /*ARGSUSED*/
1590 static int
1591 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd)
1592 {
1593 mptsas_t *mpt;
1594 scsi_hba_tran_t *tran;
1595
1596 /*
1597 * If this call is for iport, just return.
1598 */
1599 if (scsi_hba_iport_unit_address(devi))
1600 return (DDI_SUCCESS);
1601
1602 if ((tran = ddi_get_driver_private(devi)) == NULL)
1603 return (DDI_SUCCESS);
1604
1605 if ((mpt = TRAN2MPT(tran)) == NULL)
1606 return (DDI_SUCCESS);
1607
1608 /*
1609 * Send RAID action system shutdown to sync IR. Disable HBA
1610 * interrupts in hardware first.
1611 */
1612 MPTSAS_DISABLE_INTR(mpt);
1613 mptsas_raid_action_system_shutdown(mpt);
1614
1615 return (DDI_SUCCESS);
1616 }
1617 #else /* __sparc */
1618 /*
1619 * quiesce(9E) entry point.
1620 *
1621 * This function is called when the system is single-threaded at high
1622 * PIL with preemption disabled. Therefore, this function must not be
1623 * blocked.
1624 *
1625 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1626 * DDI_FAILURE indicates an error condition and should almost never happen.
1627 */
1628 static int
1629 mptsas_quiesce(dev_info_t *devi)
1630 {
1631 mptsas_t *mpt;
1632 scsi_hba_tran_t *tran;
1633
1634 /*
1635 * If this call is for iport, just return.
1636 */
1637 if (scsi_hba_iport_unit_address(devi))
1638 return (DDI_SUCCESS);
1639
1640 if ((tran = ddi_get_driver_private(devi)) == NULL)
1641 return (DDI_SUCCESS);
1642
1643 if ((mpt = TRAN2MPT(tran)) == NULL)
1644 return (DDI_SUCCESS);
1645
1646 /* Disable HBA interrupts in hardware */
1647 MPTSAS_DISABLE_INTR(mpt);
1648 /* Send RAID action system shutdonw to sync IR */
1649 mptsas_raid_action_system_shutdown(mpt);
1650
1651 return (DDI_SUCCESS);
1652 }
1653 #endif /* __sparc */
1654
1655 /*
1656 * detach(9E). Remove all device allocations and system resources;
1657 * disable device interrupts.
1658 * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
1659 */
1660 static int
1661 mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1662 {
1663 /* CONSTCOND */
1664 ASSERT(NO_COMPETING_THREADS);
1665 NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi, (void *)cmd));
1666
1667 switch (cmd) {
1668 case DDI_DETACH:
1669 return (mptsas_do_detach(devi));
1670
1671 case DDI_SUSPEND:
1672 return (mptsas_suspend(devi));
1673
1674 default:
1675 return (DDI_FAILURE);
1676 }
1677 /* NOTREACHED */
1678 }
1679
1680 static int
1681 mptsas_do_detach(dev_info_t *dip)
1682 {
1683 mptsas_t *mpt;
1684 scsi_hba_tran_t *tran;
1685 int circ = 0;
1686 int circ1 = 0;
1687 mdi_pathinfo_t *pip = NULL;
1688 int i;
1689 int doneq_thread_num = 0;
1690
1691 NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1692
1693 if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1694 return (DDI_FAILURE);
1695
1696 mpt = TRAN2MPT(tran);
1697 if (!mpt) {
1698 return (DDI_FAILURE);
1699 }
1700 /*
1701 * Still have pathinfo child, should not detach mpt driver
1702 */
1703 if (scsi_hba_iport_unit_address(dip)) {
1704 if (mpt->m_mpxio_enable) {
1705 /*
1706 * MPxIO enabled for the iport
1707 */
1708 ndi_devi_enter(scsi_vhci_dip, &circ1);
1709 ndi_devi_enter(dip, &circ);
1710 while (pip = mdi_get_next_client_path(dip, NULL)) {
1711 if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1712 continue;
1713 }
1714 ndi_devi_exit(dip, circ);
1715 ndi_devi_exit(scsi_vhci_dip, circ1);
1716 NDBG12(("detach failed because of "
1717 "outstanding path info"));
1718 return (DDI_FAILURE);
1719 }
1720 ndi_devi_exit(dip, circ);
1721 ndi_devi_exit(scsi_vhci_dip, circ1);
1722 (void) mdi_phci_unregister(dip, 0);
1723 }
1724
1725 ddi_prop_remove_all(dip);
1726
1727 return (DDI_SUCCESS);
1728 }
1729
1730 /* Make sure power level is D0 before accessing registers */
1731 if (mpt->m_options & MPTSAS_OPT_PM) {
1732 (void) pm_busy_component(dip, 0);
1733 if (mpt->m_power_level != PM_LEVEL_D0) {
1734 if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1735 DDI_SUCCESS) {
1736 mptsas_log(mpt, CE_WARN,
1737 "mptsas%d: Raise power request failed.",
1738 mpt->m_instance);
1739 (void) pm_idle_component(dip, 0);
1740 return (DDI_FAILURE);
1741 }
1742 }
1743 }
1744
1745 /*
1746 * Send RAID action system shutdown to sync IR. After action, send a
1747 * Message Unit Reset. Since after that DMA resource will be freed,
1748 * set ioc to READY state will avoid HBA initiated DMA operation.
1749 */
1750 mutex_enter(&mpt->m_mutex);
1751 MPTSAS_DISABLE_INTR(mpt);
1752 mptsas_raid_action_system_shutdown(mpt);
1753 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1754 (void) mptsas_ioc_reset(mpt, FALSE);
1755 mutex_exit(&mpt->m_mutex);
1756 mptsas_rem_intrs(mpt);
1757 ddi_taskq_destroy(mpt->m_event_taskq);
1758 ddi_taskq_destroy(mpt->m_dr_taskq);
1759
1760 if (mpt->m_doneq_thread_n) {
1761 mutex_enter(&mpt->m_doneq_mutex);
1762 doneq_thread_num = mpt->m_doneq_thread_n;
1763 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1764 mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1765 mpt->m_doneq_thread_id[i].flag &=
1766 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1767 cv_signal(&mpt->m_doneq_thread_id[i].cv);
1768 mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1769 }
1770 while (mpt->m_doneq_thread_n) {
1771 cv_wait(&mpt->m_doneq_thread_cv,
1772 &mpt->m_doneq_mutex);
1773 }
1774 for (i = 0; i < doneq_thread_num; i++) {
1775 cv_destroy(&mpt->m_doneq_thread_id[i].cv);
1776 mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);
1777 }
1778 kmem_free(mpt->m_doneq_thread_id,
1779 sizeof (mptsas_doneq_thread_list_t)
1780 * doneq_thread_num);
1781 mutex_exit(&mpt->m_doneq_mutex);
1782 cv_destroy(&mpt->m_doneq_thread_cv);
1783 mutex_destroy(&mpt->m_doneq_mutex);
1784 }
1785
1786 scsi_hba_reset_notify_tear_down(mpt->m_reset_notify_listf);
1787
1788 mptsas_list_del(mpt);
1789
1790 /*
1791 * Cancel timeout threads for this mpt
1792 */
1793 mutex_enter(&mpt->m_mutex);
1794 if (mpt->m_quiesce_timeid) {
1795 timeout_id_t tid = mpt->m_quiesce_timeid;
1796 mpt->m_quiesce_timeid = 0;
1797 mutex_exit(&mpt->m_mutex);
1798 (void) untimeout(tid);
1799 mutex_enter(&mpt->m_mutex);
1800 }
1801
1802 if (mpt->m_restart_cmd_timeid) {
1803 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1804 mpt->m_restart_cmd_timeid = 0;
1805 mutex_exit(&mpt->m_mutex);
1806 (void) untimeout(tid);
1807 mutex_enter(&mpt->m_mutex);
1808 }
1809
1810 mutex_exit(&mpt->m_mutex);
1811
1812 /*
1813 * last mpt? ... if active, CANCEL watch threads.
1814 */
1815 mutex_enter(&mptsas_global_mutex);
1816 if (mptsas_head == NULL) {
1817 timeout_id_t tid;
1818 /*
1819 * Clear mptsas_timeouts_enable so that the watch thread
1820 * gets restarted on DDI_ATTACH
1821 */
1822 mptsas_timeouts_enabled = 0;
1823 if (mptsas_timeout_id) {
1824 tid = mptsas_timeout_id;
1825 mptsas_timeout_id = 0;
1826 mutex_exit(&mptsas_global_mutex);
1827 (void) untimeout(tid);
1828 mutex_enter(&mptsas_global_mutex);
1829 }
1830 if (mptsas_reset_watch) {
1831 tid = mptsas_reset_watch;
1832 mptsas_reset_watch = 0;
1833 mutex_exit(&mptsas_global_mutex);
1834 (void) untimeout(tid);
1835 mutex_enter(&mptsas_global_mutex);
1836 }
1837 }
1838 mutex_exit(&mptsas_global_mutex);
1839
1840 /*
1841 * Delete Phy stats
1842 */
1843 mptsas_destroy_phy_stats(mpt);
1844
1845 /*
1846 * Delete nt_active.
1847 */
1848 mutex_enter(&mpt->m_mutex);
1849 mptsas_hash_uninit(&mpt->m_active->m_tgttbl, sizeof (mptsas_target_t));
1850 mptsas_hash_uninit(&mpt->m_active->m_smptbl, sizeof (mptsas_smp_t));
1851 mptsas_free_active_slots(mpt);
1852 mutex_exit(&mpt->m_mutex);
1853
1854 /* deallocate everything that was allocated in mptsas_attach */
1855 mptsas_cache_destroy(mpt);
1856
1857 mptsas_hba_fini(mpt);
1858 mptsas_cfg_fini(mpt);
1859
1860 /* Lower the power informing PM Framework */
1861 if (mpt->m_options & MPTSAS_OPT_PM) {
1862 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
1863 mptsas_log(mpt, CE_WARN,
1864 "!mptsas%d: Lower power request failed "
1865 "during detach, ignoring.",
1866 mpt->m_instance);
1867 }
1868
1869 mutex_destroy(&mpt->m_tx_waitq_mutex);
1870 mutex_destroy(&mpt->m_passthru_mutex);
1871 mutex_destroy(&mpt->m_mutex);
1872 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1873 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
1874 }
1875 cv_destroy(&mpt->m_cv);
1876 cv_destroy(&mpt->m_passthru_cv);
1877 cv_destroy(&mpt->m_fw_cv);
1878 cv_destroy(&mpt->m_config_cv);
1879 cv_destroy(&mpt->m_fw_diag_cv);
1880
1881
1882 mptsas_smp_teardown(mpt);
1883 mptsas_hba_teardown(mpt);
1884
1885 mptsas_config_space_fini(mpt);
1886
1887 mptsas_free_handshake_msg(mpt);
1888
1889 mptsas_fm_fini(mpt);
1890 ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
1891 ddi_prop_remove_all(dip);
1892
1893 return (DDI_SUCCESS);
1894 }
1895
1896 static void
1897 mptsas_list_add(mptsas_t *mpt)
1898 {
1899 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1900
1901 if (mptsas_head == NULL) {
1902 mptsas_head = mpt;
1903 } else {
1904 mptsas_tail->m_next = mpt;
1905 }
1906 mptsas_tail = mpt;
1907 rw_exit(&mptsas_global_rwlock);
1908 }
1909
1910 static void
1911 mptsas_list_del(mptsas_t *mpt)
1912 {
1913 mptsas_t *m;
1914 /*
1915 * Remove device instance from the global linked list
1916 */
1917 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1918 if (mptsas_head == mpt) {
1919 m = mptsas_head = mpt->m_next;
1920 } else {
1921 for (m = mptsas_head; m != NULL; m = m->m_next) {
1922 if (m->m_next == mpt) {
1923 m->m_next = mpt->m_next;
1924 break;
1925 }
1926 }
1927 if (m == NULL) {
1928 mptsas_log(mpt, CE_PANIC, "Not in softc list!");
1929 }
1930 }
1931
1932 if (mptsas_tail == mpt) {
1933 mptsas_tail = m;
1934 }
1935 rw_exit(&mptsas_global_rwlock);
1936 }
1937
1938 static int
1939 mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size)
1940 {
1941 ddi_dma_attr_t task_dma_attrs;
1942
1943 task_dma_attrs = mpt->m_msg_dma_attr;
1944 task_dma_attrs.dma_attr_sgllen = 1;
1945 task_dma_attrs.dma_attr_granular = (uint32_t)(alloc_size);
1946
1947 /* allocate Task Management ddi_dma resources */
1948 if (mptsas_dma_addr_create(mpt, task_dma_attrs,
1949 &mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl, &mpt->m_hshk_memp,
1950 alloc_size, NULL) == FALSE) {
1951 return (DDI_FAILURE);
1952 }
1953 mpt->m_hshk_dma_size = alloc_size;
1954
1955 return (DDI_SUCCESS);
1956 }
1957
1958 static void
1959 mptsas_free_handshake_msg(mptsas_t *mpt)
1960 {
1961 mptsas_dma_addr_destroy(&mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl);
1962 mpt->m_hshk_dma_size = 0;
1963 }
1964
1965 static int
1966 mptsas_hba_setup(mptsas_t *mpt)
1967 {
1968 scsi_hba_tran_t *hba_tran;
1969 int tran_flags;
1970
1971 /* Allocate a transport structure */
1972 hba_tran = mpt->m_tran = scsi_hba_tran_alloc(mpt->m_dip,
1973 SCSI_HBA_CANSLEEP);
1974 ASSERT(mpt->m_tran != NULL);
1975
1976 hba_tran->tran_hba_private = mpt;
1977 hba_tran->tran_tgt_private = NULL;
1978
1979 hba_tran->tran_tgt_init = mptsas_scsi_tgt_init;
1980 hba_tran->tran_tgt_free = mptsas_scsi_tgt_free;
1981
1982 hba_tran->tran_start = mptsas_scsi_start;
1983 hba_tran->tran_reset = mptsas_scsi_reset;
1984 hba_tran->tran_abort = mptsas_scsi_abort;
1985 hba_tran->tran_getcap = mptsas_scsi_getcap;
1986 hba_tran->tran_setcap = mptsas_scsi_setcap;
1987 hba_tran->tran_init_pkt = mptsas_scsi_init_pkt;
1988 hba_tran->tran_destroy_pkt = mptsas_scsi_destroy_pkt;
1989
1990 hba_tran->tran_dmafree = mptsas_scsi_dmafree;
1991 hba_tran->tran_sync_pkt = mptsas_scsi_sync_pkt;
1992 hba_tran->tran_reset_notify = mptsas_scsi_reset_notify;
1993
1994 hba_tran->tran_get_bus_addr = mptsas_get_bus_addr;
1995 hba_tran->tran_get_name = mptsas_get_name;
1996
1997 hba_tran->tran_quiesce = mptsas_scsi_quiesce;
1998 hba_tran->tran_unquiesce = mptsas_scsi_unquiesce;
1999 hba_tran->tran_bus_reset = NULL;
2000
2001 hba_tran->tran_add_eventcall = NULL;
2002 hba_tran->tran_get_eventcookie = NULL;
2003 hba_tran->tran_post_event = NULL;
2004 hba_tran->tran_remove_eventcall = NULL;
2005
2006 hba_tran->tran_bus_config = mptsas_bus_config;
2007
2008 hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
2009
2010 /*
2011 * All children of the HBA are iports. We need tran was cloned.
2012 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
2013 * inherited to iport's tran vector.
2014 */
2015 tran_flags = (SCSI_HBA_HBA | SCSI_HBA_TRAN_CLONE);
2016
2017 if (scsi_hba_attach_setup(mpt->m_dip, &mpt->m_msg_dma_attr,
2018 hba_tran, tran_flags) != DDI_SUCCESS) {
2019 mptsas_log(mpt, CE_WARN, "hba attach setup failed");
2020 scsi_hba_tran_free(hba_tran);
2021 mpt->m_tran = NULL;
2022 return (FALSE);
2023 }
2024 return (TRUE);
2025 }
2026
2027 static void
2028 mptsas_hba_teardown(mptsas_t *mpt)
2029 {
2030 (void) scsi_hba_detach(mpt->m_dip);
2031 if (mpt->m_tran != NULL) {
2032 scsi_hba_tran_free(mpt->m_tran);
2033 mpt->m_tran = NULL;
2034 }
2035 }
2036
2037 static void
2038 mptsas_iport_register(mptsas_t *mpt)
2039 {
2040 int i, j;
2041 mptsas_phymask_t mask = 0x0;
2042 /*
2043 * initial value of mask is 0
2044 */
2045 mutex_enter(&mpt->m_mutex);
2046 for (i = 0; i < mpt->m_num_phys; i++) {
2047 mptsas_phymask_t phy_mask = 0x0;
2048 char phy_mask_name[MPTSAS_MAX_PHYS];
2049 uint8_t current_port;
2050
2051 if (mpt->m_phy_info[i].attached_devhdl == 0)
2052 continue;
2053
2054 bzero(phy_mask_name, sizeof (phy_mask_name));
2055
2056 current_port = mpt->m_phy_info[i].port_num;
2057
2058 if ((mask & (1 << i)) != 0)
2059 continue;
2060
2061 for (j = 0; j < mpt->m_num_phys; j++) {
2062 if (mpt->m_phy_info[j].attached_devhdl &&
2063 (mpt->m_phy_info[j].port_num == current_port)) {
2064 phy_mask |= (1 << j);
2065 }
2066 }
2067 mask = mask | phy_mask;
2068
2069 for (j = 0; j < mpt->m_num_phys; j++) {
2070 if ((phy_mask >> j) & 0x01) {
2071 mpt->m_phy_info[j].phy_mask = phy_mask;
2072 }
2073 }
2074
2075 (void) sprintf(phy_mask_name, "%x", phy_mask);
2076
2077 mutex_exit(&mpt->m_mutex);
2078 /*
2079 * register a iport
2080 */
2081 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
2082 mutex_enter(&mpt->m_mutex);
2083 }
2084 mutex_exit(&mpt->m_mutex);
2085 /*
2086 * register a virtual port for RAID volume always
2087 */
2088 (void) scsi_hba_iport_register(mpt->m_dip, "v0");
2089
2090 }
2091
2092 static int
2093 mptsas_smp_setup(mptsas_t *mpt)
2094 {
2095 mpt->m_smptran = smp_hba_tran_alloc(mpt->m_dip);
2096 ASSERT(mpt->m_smptran != NULL);
2097 mpt->m_smptran->smp_tran_hba_private = mpt;
2098 mpt->m_smptran->smp_tran_start = mptsas_smp_start;
2099 if (smp_hba_attach_setup(mpt->m_dip, mpt->m_smptran) != DDI_SUCCESS) {
2100 mptsas_log(mpt, CE_WARN, "smp attach setup failed");
2101 smp_hba_tran_free(mpt->m_smptran);
2102 mpt->m_smptran = NULL;
2103 return (FALSE);
2104 }
2105 /*
2106 * Initialize smp hash table
2107 */
2108 mptsas_hash_init(&mpt->m_active->m_smptbl);
2109 mpt->m_smp_devhdl = 0xFFFF;
2110
2111 return (TRUE);
2112 }
2113
2114 static void
2115 mptsas_smp_teardown(mptsas_t *mpt)
2116 {
2117 (void) smp_hba_detach(mpt->m_dip);
2118 if (mpt->m_smptran != NULL) {
2119 smp_hba_tran_free(mpt->m_smptran);
2120 mpt->m_smptran = NULL;
2121 }
2122 mpt->m_smp_devhdl = 0;
2123 }
2124
2125 static int
2126 mptsas_cache_create(mptsas_t *mpt)
2127 {
2128 int instance = mpt->m_instance;
2129 char buf[64];
2130
2131 /*
2132 * create kmem cache for packets
2133 */
2134 (void) sprintf(buf, "mptsas%d_cache", instance);
2135 mpt->m_kmem_cache = kmem_cache_create(buf,
2136 sizeof (struct mptsas_cmd) + scsi_pkt_size(), 8,
2137 mptsas_kmem_cache_constructor, mptsas_kmem_cache_destructor,
2138 NULL, (void *)mpt, NULL, 0);
2139
2140 if (mpt->m_kmem_cache == NULL) {
2141 mptsas_log(mpt, CE_WARN, "creating kmem cache failed");
2142 return (FALSE);
2143 }
2144
2145 /*
2146 * create kmem cache for extra SGL frames if SGL cannot
2147 * be accomodated into main request frame.
2148 */
2149 (void) sprintf(buf, "mptsas%d_cache_frames", instance);
2150 mpt->m_cache_frames = kmem_cache_create(buf,
2151 sizeof (mptsas_cache_frames_t), 8,
2152 mptsas_cache_frames_constructor, mptsas_cache_frames_destructor,
2153 NULL, (void *)mpt, NULL, 0);
2154
2155 if (mpt->m_cache_frames == NULL) {
2156 mptsas_log(mpt, CE_WARN, "creating cache for frames failed");
2157 return (FALSE);
2158 }
2159
2160 return (TRUE);
2161 }
2162
2163 static void
2164 mptsas_cache_destroy(mptsas_t *mpt)
2165 {
2166 /* deallocate in reverse order */
2167 if (mpt->m_cache_frames) {
2168 kmem_cache_destroy(mpt->m_cache_frames);
2169 mpt->m_cache_frames = NULL;
2170 }
2171 if (mpt->m_kmem_cache) {
2172 kmem_cache_destroy(mpt->m_kmem_cache);
2173 mpt->m_kmem_cache = NULL;
2174 }
2175 }
2176
2177 static int
2178 mptsas_power(dev_info_t *dip, int component, int level)
2179 {
2180 #ifndef __lock_lint
2181 _NOTE(ARGUNUSED(component))
2182 #endif
2183 mptsas_t *mpt;
2184 int rval = DDI_SUCCESS;
2185 int polls = 0;
2186 uint32_t ioc_status;
2187
2188 if (scsi_hba_iport_unit_address(dip) != 0)
2189 return (DDI_SUCCESS);
2190
2191 mpt = ddi_get_soft_state(mptsas_state, ddi_get_instance(dip));
2192 if (mpt == NULL) {
2193 return (DDI_FAILURE);
2194 }
2195
2196 mutex_enter(&mpt->m_mutex);
2197
2198 /*
2199 * If the device is busy, don't lower its power level
2200 */
2201 if (mpt->m_busy && (mpt->m_power_level > level)) {
2202 mutex_exit(&mpt->m_mutex);
2203 return (DDI_FAILURE);
2204 }
2205 switch (level) {
2206 case PM_LEVEL_D0:
2207 NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
2208 MPTSAS_POWER_ON(mpt);
2209 /*
2210 * Wait up to 30 seconds for IOC to come out of reset.
2211 */
2212 while (((ioc_status = ddi_get32(mpt->m_datap,
2213 &mpt->m_reg->Doorbell)) &
2214 MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2215 if (polls++ > 3000) {
2216 break;
2217 }
2218 delay(drv_usectohz(10000));
2219 }
2220 /*
2221 * If IOC is not in operational state, try to hard reset it.
2222 */
2223 if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2224 MPI2_IOC_STATE_OPERATIONAL) {
2225 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2226 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2227 mptsas_log(mpt, CE_WARN,
2228 "mptsas_power: hard reset failed");
2229 mutex_exit(&mpt->m_mutex);
2230 return (DDI_FAILURE);
2231 }
2232 }
2233 mpt->m_power_level = PM_LEVEL_D0;
2234 break;
2235 case PM_LEVEL_D3:
2236 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2237 MPTSAS_POWER_OFF(mpt);
2238 break;
2239 default:
2240 mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2241 mpt->m_instance, level);
2242 rval = DDI_FAILURE;
2243 break;
2244 }
2245 mutex_exit(&mpt->m_mutex);
2246 return (rval);
2247 }
2248
2249 /*
2250 * Initialize configuration space and figure out which
2251 * chip and revison of the chip the mpt driver is using.
2252 */
2253 static int
2254 mptsas_config_space_init(mptsas_t *mpt)
2255 {
2256 NDBG0(("mptsas_config_space_init"));
2257
2258 if (mpt->m_config_handle != NULL)
2259 return (TRUE);
2260
2261 if (pci_config_setup(mpt->m_dip,
2262 &mpt->m_config_handle) != DDI_SUCCESS) {
2263 mptsas_log(mpt, CE_WARN, "cannot map configuration space.");
2264 return (FALSE);
2265 }
2266
2267 /*
2268 * This is a workaround for a XMITS ASIC bug which does not
2269 * drive the CBE upper bits.
2270 */
2271 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) &
2272 PCI_STAT_PERROR) {
2273 pci_config_put16(mpt->m_config_handle, PCI_CONF_STAT,
2274 PCI_STAT_PERROR);
2275 }
2276
2277 mptsas_setup_cmd_reg(mpt);
2278
2279 /*
2280 * Get the chip device id:
2281 */
2282 mpt->m_devid = pci_config_get16(mpt->m_config_handle, PCI_CONF_DEVID);
2283
2284 /*
2285 * Save the revision.
2286 */
2287 mpt->m_revid = pci_config_get8(mpt->m_config_handle, PCI_CONF_REVID);
2288
2289 /*
2290 * Save the SubSystem Vendor and Device IDs
2291 */
2292 mpt->m_svid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBVENID);
2293 mpt->m_ssid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBSYSID);
2294
2295 /*
2296 * Set the latency timer to 0x40 as specified by the upa -> pci
2297 * bridge chip design team. This may be done by the sparc pci
2298 * bus nexus driver, but the driver should make sure the latency
2299 * timer is correct for performance reasons.
2300 */
2301 pci_config_put8(mpt->m_config_handle, PCI_CONF_LATENCY_TIMER,
2302 MPTSAS_LATENCY_TIMER);
2303
2304 (void) mptsas_get_pci_cap(mpt);
2305 return (TRUE);
2306 }
2307
2308 static void
2309 mptsas_config_space_fini(mptsas_t *mpt)
2310 {
2311 if (mpt->m_config_handle != NULL) {
2312 mptsas_disable_bus_master(mpt);
2313 pci_config_teardown(&mpt->m_config_handle);
2314 mpt->m_config_handle = NULL;
2315 }
2316 }
2317
2318 static void
2319 mptsas_setup_cmd_reg(mptsas_t *mpt)
2320 {
2321 ushort_t cmdreg;
2322
2323 /*
2324 * Set the command register to the needed values.
2325 */
2326 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2327 cmdreg |= (PCI_COMM_ME | PCI_COMM_SERR_ENABLE |
2328 PCI_COMM_PARITY_DETECT | PCI_COMM_MAE);
2329 cmdreg &= ~PCI_COMM_IO;
2330 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2331 }
2332
2333 static void
2334 mptsas_disable_bus_master(mptsas_t *mpt)
2335 {
2336 ushort_t cmdreg;
2337
2338 /*
2339 * Clear the master enable bit in the PCI command register.
2340 * This prevents any bus mastering activity like DMA.
2341 */
2342 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2343 cmdreg &= ~PCI_COMM_ME;
2344 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2345 }
2346
2347 int
2348 mptsas_dma_alloc(mptsas_t *mpt, mptsas_dma_alloc_state_t *dma_statep)
2349 {
2350 ddi_dma_attr_t attrs;
2351
2352 attrs = mpt->m_io_dma_attr;
2353 attrs.dma_attr_sgllen = 1;
2354
2355 ASSERT(dma_statep != NULL);
2356
2357 if (mptsas_dma_addr_create(mpt, attrs, &dma_statep->handle,
2358 &dma_statep->accessp, &dma_statep->memp, dma_statep->size,
2359 &dma_statep->cookie) == FALSE) {
2360 return (DDI_FAILURE);
2361 }
2362
2363 return (DDI_SUCCESS);
2364 }
2365
2366 void
2367 mptsas_dma_free(mptsas_dma_alloc_state_t *dma_statep)
2368 {
2369 ASSERT(dma_statep != NULL);
2370 mptsas_dma_addr_destroy(&dma_statep->handle, &dma_statep->accessp);
2371 dma_statep->size = 0;
2372 }
2373
2374 int
2375 mptsas_do_dma(mptsas_t *mpt, uint32_t size, int var, int (*callback)())
2376 {
2377 ddi_dma_attr_t attrs;
2378 ddi_dma_handle_t dma_handle;
2379 caddr_t memp;
2380 ddi_acc_handle_t accessp;
2381 int rval;
2382
2383 ASSERT(mutex_owned(&mpt->m_mutex));
2384
2385 attrs = mpt->m_msg_dma_attr;
2386 attrs.dma_attr_sgllen = 1;
2387 attrs.dma_attr_granular = size;
2388
2389 if (mptsas_dma_addr_create(mpt, attrs, &dma_handle,
2390 &accessp, &memp, size, NULL) == FALSE) {
2391 return (DDI_FAILURE);
2392 }
2393
2394 rval = (*callback) (mpt, memp, var, accessp);
2395
2396 if ((mptsas_check_dma_handle(dma_handle) != DDI_SUCCESS) ||
2397 (mptsas_check_acc_handle(accessp) != DDI_SUCCESS)) {
2398 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
2399 rval = DDI_FAILURE;
2400 }
2401
2402 mptsas_dma_addr_destroy(&dma_handle, &accessp);
2403 return (rval);
2404
2405 }
2406
2407 static int
2408 mptsas_alloc_request_frames(mptsas_t *mpt)
2409 {
2410 ddi_dma_attr_t frame_dma_attrs;
2411 caddr_t memp;
2412 ddi_dma_cookie_t cookie;
2413 size_t mem_size;
2414
2415 /*
2416 * re-alloc when it has already alloced
2417 */
2418 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2419 &mpt->m_acc_req_frame_hdl);
2420
2421 /*
2422 * The size of the request frame pool is:
2423 * Number of Request Frames * Request Frame Size
2424 */
2425 mem_size = mpt->m_max_requests * mpt->m_req_frame_size;
2426
2427 /*
2428 * set the DMA attributes. System Request Message Frames must be
2429 * aligned on a 16-byte boundry.
2430 */
2431 frame_dma_attrs = mpt->m_msg_dma_attr;
2432 frame_dma_attrs.dma_attr_align = 16;
2433 frame_dma_attrs.dma_attr_sgllen = 1;
2434
2435 /*
2436 * allocate the request frame pool.
2437 */
2438 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2439 &mpt->m_dma_req_frame_hdl, &mpt->m_acc_req_frame_hdl, &memp,
2440 mem_size, &cookie) == FALSE) {
2441 return (DDI_FAILURE);
2442 }
2443
2444 /*
2445 * Store the request frame memory address. This chip uses this
2446 * address to dma to and from the driver's frame. The second
2447 * address is the address mpt uses to fill in the frame.
2448 */
2449 mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2450 mpt->m_req_frame = memp;
2451
2452 /*
2453 * Clear the request frame pool.
2454 */
2455 bzero(mpt->m_req_frame, mem_size);
2456
2457 return (DDI_SUCCESS);
2458 }
2459
2460 static int
2461 mptsas_alloc_reply_frames(mptsas_t *mpt)
2462 {
2463 ddi_dma_attr_t frame_dma_attrs;
2464 caddr_t memp;
2465 ddi_dma_cookie_t cookie;
2466 size_t mem_size;
2467
2468 /*
2469 * re-alloc when it has already alloced
2470 */
2471 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2472 &mpt->m_acc_reply_frame_hdl);
2473
2474 /*
2475 * The size of the reply frame pool is:
2476 * Number of Reply Frames * Reply Frame Size
2477 */
2478 mem_size = mpt->m_max_replies * mpt->m_reply_frame_size;
2479
2480 /*
2481 * set the DMA attributes. System Reply Message Frames must be
2482 * aligned on a 4-byte boundry. This is the default.
2483 */
2484 frame_dma_attrs = mpt->m_msg_dma_attr;
2485 frame_dma_attrs.dma_attr_sgllen = 1;
2486
2487 /*
2488 * allocate the reply frame pool
2489 */
2490 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2491 &mpt->m_dma_reply_frame_hdl, &mpt->m_acc_reply_frame_hdl, &memp,
2492 mem_size, &cookie) == FALSE) {
2493 return (DDI_FAILURE);
2494 }
2495
2496 /*
2497 * Store the reply frame memory address. This chip uses this
2498 * address to dma to and from the driver's frame. The second
2499 * address is the address mpt uses to process the frame.
2500 */
2501 mpt->m_reply_frame_dma_addr = cookie.dmac_laddress;
2502 mpt->m_reply_frame = memp;
2503
2504 /*
2505 * Clear the reply frame pool.
2506 */
2507 bzero(mpt->m_reply_frame, mem_size);
2508
2509 return (DDI_SUCCESS);
2510 }
2511
2512 static int
2513 mptsas_alloc_free_queue(mptsas_t *mpt)
2514 {
2515 ddi_dma_attr_t frame_dma_attrs;
2516 caddr_t memp;
2517 ddi_dma_cookie_t cookie;
2518 size_t mem_size;
2519
2520 /*
2521 * re-alloc when it has already alloced
2522 */
2523 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2524 &mpt->m_acc_free_queue_hdl);
2525
2526 /*
2527 * The reply free queue size is:
2528 * Reply Free Queue Depth * 4
2529 * The "4" is the size of one 32 bit address (low part of 64-bit
2530 * address)
2531 */
2532 mem_size = mpt->m_free_queue_depth * 4;
2533
2534 /*
2535 * set the DMA attributes The Reply Free Queue must be aligned on a
2536 * 16-byte boundry.
2537 */
2538 frame_dma_attrs = mpt->m_msg_dma_attr;
2539 frame_dma_attrs.dma_attr_align = 16;
2540 frame_dma_attrs.dma_attr_sgllen = 1;
2541
2542 /*
2543 * allocate the reply free queue
2544 */
2545 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2546 &mpt->m_dma_free_queue_hdl, &mpt->m_acc_free_queue_hdl, &memp,
2547 mem_size, &cookie) == FALSE) {
2548 return (DDI_FAILURE);
2549 }
2550
2551 /*
2552 * Store the reply free queue memory address. This chip uses this
2553 * address to read from the reply free queue. The second address
2554 * is the address mpt uses to manage the queue.
2555 */
2556 mpt->m_free_queue_dma_addr = cookie.dmac_laddress;
2557 mpt->m_free_queue = memp;
2558
2559 /*
2560 * Clear the reply free queue memory.
2561 */
2562 bzero(mpt->m_free_queue, mem_size);
2563
2564 return (DDI_SUCCESS);
2565 }
2566
2567 static int
2568 mptsas_alloc_post_queue(mptsas_t *mpt)
2569 {
2570 ddi_dma_attr_t frame_dma_attrs;
2571 caddr_t memp;
2572 ddi_dma_cookie_t cookie;
2573 size_t mem_size;
2574
2575 /*
2576 * re-alloc when it has already alloced
2577 */
2578 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2579 &mpt->m_acc_post_queue_hdl);
2580
2581 /*
2582 * The reply descriptor post queue size is:
2583 * Reply Descriptor Post Queue Depth * 8
2584 * The "8" is the size of each descriptor (8 bytes or 64 bits).
2585 */
2586 mem_size = mpt->m_post_queue_depth * 8;
2587
2588 /*
2589 * set the DMA attributes. The Reply Descriptor Post Queue must be
2590 * aligned on a 16-byte boundry.
2591 */
2592 frame_dma_attrs = mpt->m_msg_dma_attr;
2593 frame_dma_attrs.dma_attr_align = 16;
2594 frame_dma_attrs.dma_attr_sgllen = 1;
2595
2596 /*
2597 * allocate the reply post queue
2598 */
2599 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2600 &mpt->m_dma_post_queue_hdl, &mpt->m_acc_post_queue_hdl, &memp,
2601 mem_size, &cookie) == FALSE) {
2602 return (DDI_FAILURE);
2603 }
2604
2605 /*
2606 * Store the reply descriptor post queue memory address. This chip
2607 * uses this address to write to the reply descriptor post queue. The
2608 * second address is the address mpt uses to manage the queue.
2609 */
2610 mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
2611 mpt->m_post_queue = memp;
2612
2613 /*
2614 * Clear the reply post queue memory.
2615 */
2616 bzero(mpt->m_post_queue, mem_size);
2617
2618 return (DDI_SUCCESS);
2619 }
2620
2621 static void
2622 mptsas_alloc_reply_args(mptsas_t *mpt)
2623 {
2624 if (mpt->m_replyh_args == NULL) {
2625 mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
2626 mpt->m_max_replies, KM_SLEEP);
2627 }
2628 }
2629
2630 static int
2631 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2632 {
2633 mptsas_cache_frames_t *frames = NULL;
2634 if (cmd->cmd_extra_frames == NULL) {
2635 frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
2636 if (frames == NULL) {
2637 return (DDI_FAILURE);
2638 }
2639 cmd->cmd_extra_frames = frames;
2640 }
2641 return (DDI_SUCCESS);
2642 }
2643
2644 static void
2645 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2646 {
2647 if (cmd->cmd_extra_frames) {
2648 kmem_cache_free(mpt->m_cache_frames,
2649 (void *)cmd->cmd_extra_frames);
2650 cmd->cmd_extra_frames = NULL;
2651 }
2652 }
2653
2654 static void
2655 mptsas_cfg_fini(mptsas_t *mpt)
2656 {
2657 NDBG0(("mptsas_cfg_fini"));
2658 ddi_regs_map_free(&mpt->m_datap);
2659 }
2660
2661 static void
2662 mptsas_hba_fini(mptsas_t *mpt)
2663 {
2664 NDBG0(("mptsas_hba_fini"));
2665
2666 /*
2667 * Free up any allocated memory
2668 */
2669 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2670 &mpt->m_acc_req_frame_hdl);
2671
2672 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2673 &mpt->m_acc_reply_frame_hdl);
2674
2675 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2676 &mpt->m_acc_free_queue_hdl);
2677
2678 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2679 &mpt->m_acc_post_queue_hdl);
2680
2681 if (mpt->m_replyh_args != NULL) {
2682 kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
2683 * mpt->m_max_replies);
2684 }
2685 }
2686
2687 static int
2688 mptsas_name_child(dev_info_t *lun_dip, char *name, int len)
2689 {
2690 int lun = 0;
2691 char *sas_wwn = NULL;
2692 int phynum = -1;
2693 int reallen = 0;
2694
2695 /* Get the target num */
2696 lun = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
2697 LUN_PROP, 0);
2698
2699 if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
2700 DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
2701 /*
2702 * Stick in the address of form "pPHY,LUN"
2703 */
2704 reallen = snprintf(name, len, "p%x,%x", phynum, lun);
2705 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
2706 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn)
2707 == DDI_PROP_SUCCESS) {
2708 /*
2709 * Stick in the address of the form "wWWN,LUN"
2710 */
2711 reallen = snprintf(name, len, "%s,%x", sas_wwn, lun);
2712 ddi_prop_free(sas_wwn);
2713 } else {
2714 return (DDI_FAILURE);
2715 }
2716
2717 ASSERT(reallen < len);
2718 if (reallen >= len) {
2719 mptsas_log(0, CE_WARN, "!mptsas_get_name: name parameter "
2720 "length too small, it needs to be %d bytes", reallen + 1);
2721 }
2722 return (DDI_SUCCESS);
2723 }
2724
2725 /*
2726 * tran_tgt_init(9E) - target device instance initialization
2727 */
2728 static int
2729 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2730 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2731 {
2732 #ifndef __lock_lint
2733 _NOTE(ARGUNUSED(hba_tran))
2734 #endif
2735
2736 /*
2737 * At this point, the scsi_device structure already exists
2738 * and has been initialized.
2739 *
2740 * Use this function to allocate target-private data structures,
2741 * if needed by this HBA. Add revised flow-control and queue
2742 * properties for child here, if desired and if you can tell they
2743 * support tagged queueing by now.
2744 */
2745 mptsas_t *mpt;
2746 int lun = sd->sd_address.a_lun;
2747 mdi_pathinfo_t *pip = NULL;
2748 mptsas_tgt_private_t *tgt_private = NULL;
2749 mptsas_target_t *ptgt = NULL;
2750 char *psas_wwn = NULL;
2751 int phymask = 0;
2752 uint64_t sas_wwn = 0;
2753 mpt = SDEV2MPT(sd);
2754
2755 ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
2756
2757 NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
2758 (void *)hba_dip, (void *)tgt_dip, lun));
2759
2760 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
2761 (void) ndi_merge_node(tgt_dip, mptsas_name_child);
2762 ddi_set_name_addr(tgt_dip, NULL);
2763 return (DDI_FAILURE);
2764 }
2765 /*
2766 * phymask is 0 means the virtual port for RAID
2767 */
2768 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
2769 "phymask", 0);
2770 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2771 if ((pip = (void *)(sd->sd_private)) == NULL) {
2772 /*
2773 * Very bad news if this occurs. Somehow scsi_vhci has
2774 * lost the pathinfo node for this target.
2775 */
2776 return (DDI_NOT_WELL_FORMED);
2777 }
2778
2779 if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
2780 DDI_PROP_SUCCESS) {
2781 mptsas_log(mpt, CE_WARN, "Get lun property failed\n");
2782 return (DDI_FAILURE);
2783 }
2784
2785 if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
2786 &psas_wwn) == MDI_SUCCESS) {
2787 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2788 sas_wwn = 0;
2789 }
2790 (void) mdi_prop_free(psas_wwn);
2791 }
2792 } else {
2793 lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
2794 DDI_PROP_DONTPASS, LUN_PROP, 0);
2795 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
2796 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
2797 DDI_PROP_SUCCESS) {
2798 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2799 sas_wwn = 0;
2800 }
2801 ddi_prop_free(psas_wwn);
2802 } else {
2803 sas_wwn = 0;
2804 }
2805 }
2806 ASSERT((sas_wwn != 0) || (phymask != 0));
2807 mutex_enter(&mpt->m_mutex);
2808 ptgt = mptsas_hash_search(&mpt->m_active->m_tgttbl, sas_wwn, phymask);
2809 mutex_exit(&mpt->m_mutex);
2810 if (ptgt == NULL) {
2811 mptsas_log(mpt, CE_WARN, "!tgt_init: target doesn't exist or "
2812 "gone already! phymask:%x, saswwn %"PRIx64, phymask,
2813 sas_wwn);
2814 return (DDI_FAILURE);
2815 }
2816 if (hba_tran->tran_tgt_private == NULL) {
2817 tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
2818 KM_SLEEP);
2819 tgt_private->t_lun = lun;
2820 tgt_private->t_private = ptgt;
2821 hba_tran->tran_tgt_private = tgt_private;
2822 }
2823
2824 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2825 return (DDI_SUCCESS);
2826 }
2827 mutex_enter(&mpt->m_mutex);
2828
2829 if (ptgt->m_deviceinfo &
2830 (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
2831 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
2832 uchar_t *inq89 = NULL;
2833 int inq89_len = 0x238;
2834 int reallen = 0;
2835 int rval = 0;
2836 struct sata_id *sid = NULL;
2837 char model[SATA_ID_MODEL_LEN + 1];
2838 char fw[SATA_ID_FW_LEN + 1];
2839 char *vid, *pid;
2840 int i;
2841
2842 mutex_exit(&mpt->m_mutex);
2843 /*
2844 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
2845 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
2846 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
2847 */
2848 inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
2849 rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
2850 inq89, inq89_len, &reallen, 1);
2851
2852 if (rval != 0) {
2853 if (inq89 != NULL) {
2854 kmem_free(inq89, inq89_len);
2855 }
2856
2857 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
2858 "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
2859 return (DDI_SUCCESS);
2860 }
2861 sid = (void *)(&inq89[60]);
2862
2863 swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
2864 swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
2865
2866 model[SATA_ID_MODEL_LEN] = 0;
2867 fw[SATA_ID_FW_LEN] = 0;
2868
2869 /*
2870 * split model into into vid/pid
2871 */
2872 for (i = 0, pid = model; i < SATA_ID_MODEL_LEN; i++, pid++)
2873 if ((*pid == ' ') || (*pid == '\t'))
2874 break;
2875 if (i < SATA_ID_MODEL_LEN) {
2876 vid = model;
2877 /*
2878 * terminate vid, establish pid
2879 */
2880 *pid++ = 0;
2881 } else {
2882 /*
2883 * vid will stay "ATA ", the rule is same
2884 * as sata framework implementation.
2885 */
2886 vid = NULL;
2887 /*
2888 * model is all pid
2889 */
2890 pid = model;
2891 }
2892
2893 /*
2894 * override SCSA "inquiry-*" properties
2895 */
2896 if (vid)
2897 (void) scsi_device_prop_update_inqstring(sd,
2898 INQUIRY_VENDOR_ID, vid, strlen(vid));
2899 if (pid)
2900 (void) scsi_device_prop_update_inqstring(sd,
2901 INQUIRY_PRODUCT_ID, pid, strlen(pid));
2902 (void) scsi_device_prop_update_inqstring(sd,
2903 INQUIRY_REVISION_ID, fw, strlen(fw));
2904
2905 if (inq89 != NULL) {
2906 kmem_free(inq89, inq89_len);
2907 }
2908 } else {
2909 mutex_exit(&mpt->m_mutex);
2910 }
2911
2912 return (DDI_SUCCESS);
2913 }
2914 /*
2915 * tran_tgt_free(9E) - target device instance deallocation
2916 */
2917 static void
2918 mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2919 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2920 {
2921 #ifndef __lock_lint
2922 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran, sd))
2923 #endif
2924
2925 mptsas_tgt_private_t *tgt_private = hba_tran->tran_tgt_private;
2926
2927 if (tgt_private != NULL) {
2928 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
2929 hba_tran->tran_tgt_private = NULL;
2930 }
2931 }
2932
2933 /*
2934 * scsi_pkt handling
2935 *
2936 * Visible to the external world via the transport structure.
2937 */
2938
2939 /*
2940 * Notes:
2941 * - transport the command to the addressed SCSI target/lun device
2942 * - normal operation is to schedule the command to be transported,
2943 * and return TRAN_ACCEPT if this is successful.
2944 * - if NO_INTR, tran_start must poll device for command completion
2945 */
2946 static int
2947 mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
2948 {
2949 #ifndef __lock_lint
2950 _NOTE(ARGUNUSED(ap))
2951 #endif
2952 mptsas_t *mpt = PKT2MPT(pkt);
2953 mptsas_cmd_t *cmd = PKT2CMD(pkt);
2954 int rval;
2955 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
2956
2957 NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt));
2958 ASSERT(ptgt);
2959 if (ptgt == NULL)
2960 return (TRAN_FATAL_ERROR);
2961
2962 /*
2963 * prepare the pkt before taking mutex.
2964 */
2965 rval = mptsas_prepare_pkt(cmd);
2966 if (rval != TRAN_ACCEPT) {
2967 return (rval);
2968 }
2969
2970 /*
2971 * Send the command to target/lun, however your HBA requires it.
2972 * If busy, return TRAN_BUSY; if there's some other formatting error
2973 * in the packet, return TRAN_BADPKT; otherwise, fall through to the
2974 * return of TRAN_ACCEPT.
2975 *
2976 * Remember that access to shared resources, including the mptsas_t
2977 * data structure and the HBA hardware registers, must be protected
2978 * with mutexes, here and everywhere.
2979 *
2980 * Also remember that at interrupt time, you'll get an argument
2981 * to the interrupt handler which is a pointer to your mptsas_t
2982 * structure; you'll have to remember which commands are outstanding
2983 * and which scsi_pkt is the currently-running command so the
2984 * interrupt handler can refer to the pkt to set completion
2985 * status, call the target driver back through pkt_comp, etc.
2986 *
2987 * If the instance lock is held by other thread, don't spin to wait
2988 * for it. Instead, queue the cmd and next time when the instance lock
2989 * is not held, accept all the queued cmd. A extra tx_waitq is
2990 * introduced to protect the queue.
2991 *
2992 * The polled cmd will not be queud and accepted as usual.
2993 *
2994 * Under the tx_waitq mutex, record whether a thread is draining
2995 * the tx_waitq. An IO requesting thread that finds the instance
2996 * mutex contended appends to the tx_waitq and while holding the
2997 * tx_wait mutex, if the draining flag is not set, sets it and then
2998 * proceeds to spin for the instance mutex. This scheme ensures that
2999 * the last cmd in a burst be processed.
3000 *
3001 * we enable this feature only when the helper threads are enabled,
3002 * at which we think the loads are heavy.
3003 *
3004 * per instance mutex m_tx_waitq_mutex is introduced to protect the
3005 * m_tx_waitqtail, m_tx_waitq, m_tx_draining.
3006 */
3007
3008 if (mpt->m_doneq_thread_n) {
3009 if (mutex_tryenter(&mpt->m_mutex) != 0) {
3010 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3011 mutex_exit(&mpt->m_mutex);
3012 } else if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3013 mutex_enter(&mpt->m_mutex);
3014 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3015 mutex_exit(&mpt->m_mutex);
3016 } else {
3017 mutex_enter(&mpt->m_tx_waitq_mutex);
3018 /*
3019 * ptgt->m_dr_flag is protected by m_mutex or
3020 * m_tx_waitq_mutex. In this case, m_tx_waitq_mutex
3021 * is acquired.
3022 */
3023 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3024 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3025 /*
3026 * The command should be allowed to
3027 * retry by returning TRAN_BUSY to
3028 * to stall the I/O's which come from
3029 * scsi_vhci since the device/path is
3030 * in unstable state now.
3031 */
3032 mutex_exit(&mpt->m_tx_waitq_mutex);
3033 return (TRAN_BUSY);
3034 } else {
3035 /*
3036 * The device is offline, just fail the
3037 * command by returning
3038 * TRAN_FATAL_ERROR.
3039 */
3040 mutex_exit(&mpt->m_tx_waitq_mutex);
3041 return (TRAN_FATAL_ERROR);
3042 }
3043 }
3044 if (mpt->m_tx_draining) {
3045 cmd->cmd_flags |= CFLAG_TXQ;
3046 *mpt->m_tx_waitqtail = cmd;
3047 mpt->m_tx_waitqtail = &cmd->cmd_linkp;
3048 mutex_exit(&mpt->m_tx_waitq_mutex);
3049 } else { /* drain the queue */
3050 mpt->m_tx_draining = 1;
3051 mutex_exit(&mpt->m_tx_waitq_mutex);
3052 mutex_enter(&mpt->m_mutex);
3053 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3054 mutex_exit(&mpt->m_mutex);
3055 }
3056 }
3057 } else {
3058 mutex_enter(&mpt->m_mutex);
3059 /*
3060 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3061 * in this case, m_mutex is acquired.
3062 */
3063 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3064 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3065 /*
3066 * commands should be allowed to retry by
3067 * returning TRAN_BUSY to stall the I/O's
3068 * which come from scsi_vhci since the device/
3069 * path is in unstable state now.
3070 */
3071 mutex_exit(&mpt->m_mutex);
3072 return (TRAN_BUSY);
3073 } else {
3074 /*
3075 * The device is offline, just fail the
3076 * command by returning TRAN_FATAL_ERROR.
3077 */
3078 mutex_exit(&mpt->m_mutex);
3079 return (TRAN_FATAL_ERROR);
3080 }
3081 }
3082 rval = mptsas_accept_pkt(mpt, cmd);
3083 mutex_exit(&mpt->m_mutex);
3084 }
3085
3086 return (rval);
3087 }
3088
3089 /*
3090 * Accept all the queued cmds(if any) before accept the current one.
3091 */
3092 static int
3093 mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3094 {
3095 int rval;
3096 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3097
3098 ASSERT(mutex_owned(&mpt->m_mutex));
3099 /*
3100 * The call to mptsas_accept_tx_waitq() must always be performed
3101 * because that is where mpt->m_tx_draining is cleared.
3102 */
3103 mutex_enter(&mpt->m_tx_waitq_mutex);
3104 mptsas_accept_tx_waitq(mpt);
3105 mutex_exit(&mpt->m_tx_waitq_mutex);
3106 /*
3107 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3108 * in this case, m_mutex is acquired.
3109 */
3110 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3111 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3112 /*
3113 * The command should be allowed to retry by returning
3114 * TRAN_BUSY to stall the I/O's which come from
3115 * scsi_vhci since the device/path is in unstable state
3116 * now.
3117 */
3118 return (TRAN_BUSY);
3119 } else {
3120 /*
3121 * The device is offline, just fail the command by
3122 * return TRAN_FATAL_ERROR.
3123 */
3124 return (TRAN_FATAL_ERROR);
3125 }
3126 }
3127 rval = mptsas_accept_pkt(mpt, cmd);
3128
3129 return (rval);
3130 }
3131
3132 static int
3133 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3134 {
3135 int rval = TRAN_ACCEPT;
3136 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3137
3138 NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3139
3140 ASSERT(mutex_owned(&mpt->m_mutex));
3141
3142 if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3143 rval = mptsas_prepare_pkt(cmd);
3144 if (rval != TRAN_ACCEPT) {
3145 cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3146 return (rval);
3147 }
3148 }
3149
3150 /*
3151 * reset the throttle if we were draining
3152 */
3153 if ((ptgt->m_t_ncmds == 0) &&
3154 (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3155 NDBG23(("reset throttle"));
3156 ASSERT(ptgt->m_reset_delay == 0);
3157 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3158 }
3159
3160 /*
3161 * If HBA is being reset, the DevHandles are being re-initialized,
3162 * which means that they could be invalid even if the target is still
3163 * attached. Check if being reset and if DevHandle is being
3164 * re-initialized. If this is the case, return BUSY so the I/O can be
3165 * retried later.
3166 */
3167 if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3168 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
3169 if (cmd->cmd_flags & CFLAG_TXQ) {
3170 mptsas_doneq_add(mpt, cmd);
3171 mptsas_doneq_empty(mpt);
3172 return (rval);
3173 } else {
3174 return (TRAN_BUSY);
3175 }
3176 }
3177
3178 /*
3179 * If device handle has already been invalidated, just
3180 * fail the command. In theory, command from scsi_vhci
3181 * client is impossible send down command with invalid
3182 * devhdl since devhdl is set after path offline, target
3183 * driver is not suppose to select a offlined path.
3184 */
3185 if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3186 NDBG20(("rejecting command, it might because invalid devhdl "
3187 "request."));
3188 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3189 if (cmd->cmd_flags & CFLAG_TXQ) {
3190 mptsas_doneq_add(mpt, cmd);
3191 mptsas_doneq_empty(mpt);
3192 return (rval);
3193 } else {
3194 return (TRAN_FATAL_ERROR);
3195 }
3196 }
3197 /*
3198 * The first case is the normal case. mpt gets a command from the
3199 * target driver and starts it.
3200 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3201 * commands is m_max_requests - 2.
3202 */
3203 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
3204 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
3205 (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3206 (ptgt->m_reset_delay == 0) &&
3207 (ptgt->m_t_nwait == 0) &&
3208 ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3209 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3210 (void) mptsas_start_cmd(mpt, cmd);
3211 } else {
3212 mptsas_waitq_add(mpt, cmd);
3213 }
3214 } else {
3215 /*
3216 * Add this pkt to the work queue
3217 */
3218 mptsas_waitq_add(mpt, cmd);
3219
3220 if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3221 (void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3222
3223 /*
3224 * Only flush the doneq if this is not a TM
3225 * cmd. For TM cmds the flushing of the
3226 * doneq will be done in those routines.
3227 */
3228 if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
3229 mptsas_doneq_empty(mpt);
3230 }
3231 }
3232 }
3233 return (rval);
3234 }
3235
3236 int
3237 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3238 {
3239 mptsas_slots_t *slots;
3240 int slot;
3241 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3242
3243 ASSERT(mutex_owned(&mpt->m_mutex));
3244 slots = mpt->m_active;
3245
3246 /*
3247 * Account for reserved TM request slot and reserved SMID of 0.
3248 */
3249 ASSERT(slots->m_n_slots == (mpt->m_max_requests - 2));
3250
3251 /*
3252 * m_tags is equivalent to the SMID when sending requests. Since the
3253 * SMID cannot be 0, start out at one if rolling over past the size
3254 * of the request queue depth. Also, don't use the last SMID, which is
3255 * reserved for TM requests.
3256 */
3257 slot = (slots->m_tags)++;
3258 if (slots->m_tags > slots->m_n_slots) {
3259 slots->m_tags = 1;
3260 }
3261
3262 alloc_tag:
3263 /* Validate tag, should never fail. */
3264 if (slots->m_slot[slot] == NULL) {
3265 /*
3266 * Make sure SMID is not using reserved value of 0
3267 * and the TM request slot.
3268 */
3269 ASSERT((slot > 0) && (slot <= slots->m_n_slots));
3270 cmd->cmd_slot = slot;
3271 slots->m_slot[slot] = cmd;
3272 mpt->m_ncmds++;
3273
3274 /*
3275 * only increment per target ncmds if this is not a
3276 * command that has no target associated with it (i.e. a
3277 * event acknoledgment)
3278 */
3279 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3280 ptgt->m_t_ncmds++;
3281 }
3282 cmd->cmd_active_timeout = cmd->cmd_pkt->pkt_time;
3283
3284 /*
3285 * If initial timout is less than or equal to one tick, bump
3286 * the timeout by a tick so that command doesn't timeout before
3287 * its allotted time.
3288 */
3289 if (cmd->cmd_active_timeout <= mptsas_scsi_watchdog_tick) {
3290 cmd->cmd_active_timeout += mptsas_scsi_watchdog_tick;
3291 }
3292 return (TRUE);
3293 } else {
3294 int i;
3295
3296 /*
3297 * If slot in use, scan until a free one is found. Don't use 0
3298 * or final slot, which is reserved for TM requests.
3299 */
3300 for (i = 0; i < slots->m_n_slots; i++) {
3301 slot = slots->m_tags;
3302 if (++(slots->m_tags) > slots->m_n_slots) {
3303 slots->m_tags = 1;
3304 }
3305 if (slots->m_slot[slot] == NULL) {
3306 NDBG22(("found free slot %d", slot));
3307 goto alloc_tag;
3308 }
3309 }
3310 }
3311 return (FALSE);
3312 }
3313
3314 /*
3315 * prepare the pkt:
3316 * the pkt may have been resubmitted or just reused so
3317 * initialize some fields and do some checks.
3318 */
3319 static int
3320 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3321 {
3322 struct scsi_pkt *pkt = CMD2PKT(cmd);
3323
3324 NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3325
3326 /*
3327 * Reinitialize some fields that need it; the packet may
3328 * have been resubmitted
3329 */
3330 pkt->pkt_reason = CMD_CMPLT;
3331 pkt->pkt_state = 0;
3332 pkt->pkt_statistics = 0;
3333 pkt->pkt_resid = 0;
3334 cmd->cmd_age = 0;
3335 cmd->cmd_pkt_flags = pkt->pkt_flags;
3336
3337 /*
3338 * zero status byte.
3339 */
3340 *(pkt->pkt_scbp) = 0;
3341
3342 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3343 pkt->pkt_resid = cmd->cmd_dmacount;
3344
3345 /*
3346 * consistent packets need to be sync'ed first
3347 * (only for data going out)
3348 */
3349 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
3350 (cmd->cmd_flags & CFLAG_DMASEND)) {
3351 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
3352 DDI_DMA_SYNC_FORDEV);
3353 }
3354 }
3355
3356 cmd->cmd_flags =
3357 (cmd->cmd_flags & ~(CFLAG_TRANFLAG)) |
3358 CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
3359
3360 return (TRAN_ACCEPT);
3361 }
3362
3363 /*
3364 * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3365 *
3366 * One of three possibilities:
3367 * - allocate scsi_pkt
3368 * - allocate scsi_pkt and DMA resources
3369 * - allocate DMA resources to an already-allocated pkt
3370 */
3371 static struct scsi_pkt *
3372 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3373 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3374 int (*callback)(), caddr_t arg)
3375 {
3376 mptsas_cmd_t *cmd, *new_cmd;
3377 mptsas_t *mpt = ADDR2MPT(ap);
3378 int failure = 1;
3379 uint_t oldcookiec;
3380 mptsas_target_t *ptgt = NULL;
3381 int rval;
3382 mptsas_tgt_private_t *tgt_private;
3383 int kf;
3384
3385 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3386
3387 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3388 tran_tgt_private;
3389 ASSERT(tgt_private != NULL);
3390 if (tgt_private == NULL) {
3391 return (NULL);
3392 }
3393 ptgt = tgt_private->t_private;
3394 ASSERT(ptgt != NULL);
3395 if (ptgt == NULL)
3396 return (NULL);
3397 ap->a_target = ptgt->m_devhdl;
3398 ap->a_lun = tgt_private->t_lun;
3399
3400 ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3401 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3402 statuslen *= 100; tgtlen *= 4;
3403 #endif
3404 NDBG3(("mptsas_scsi_init_pkt:\n"
3405 "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3406 ap->a_target, (void *)pkt, (void *)bp,
3407 cmdlen, statuslen, tgtlen, flags));
3408
3409 /*
3410 * Allocate the new packet.
3411 */
3412 if (pkt == NULL) {
3413 ddi_dma_handle_t save_dma_handle;
3414 ddi_dma_handle_t save_arq_dma_handle;
3415 struct buf *save_arq_bp;
3416 ddi_dma_cookie_t save_arqcookie;
3417
3418 cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3419
3420 if (cmd) {
3421 save_dma_handle = cmd->cmd_dmahandle;
3422 save_arq_dma_handle = cmd->cmd_arqhandle;
3423 save_arq_bp = cmd->cmd_arq_buf;
3424 save_arqcookie = cmd->cmd_arqcookie;
3425 bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3426 cmd->cmd_dmahandle = save_dma_handle;
3427 cmd->cmd_arqhandle = save_arq_dma_handle;
3428 cmd->cmd_arq_buf = save_arq_bp;
3429 cmd->cmd_arqcookie = save_arqcookie;
3430
3431 pkt = (void *)((uchar_t *)cmd +
3432 sizeof (struct mptsas_cmd));
3433 pkt->pkt_ha_private = (opaque_t)cmd;
3434 pkt->pkt_address = *ap;
3435 pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3436 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3437 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3438 cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3439 cmd->cmd_cdblen = (uchar_t)cmdlen;
3440 cmd->cmd_scblen = statuslen;
3441 cmd->cmd_rqslen = SENSE_LENGTH;
3442 cmd->cmd_tgt_addr = ptgt;
3443 failure = 0;
3444 }
3445
3446 if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
3447 (tgtlen > PKT_PRIV_LEN) ||
3448 (statuslen > EXTCMDS_STATUS_SIZE)) {
3449 if (failure == 0) {
3450 /*
3451 * if extern alloc fails, all will be
3452 * deallocated, including cmd
3453 */
3454 failure = mptsas_pkt_alloc_extern(mpt, cmd,
3455 cmdlen, tgtlen, statuslen, kf);
3456 }
3457 if (failure) {
3458 /*
3459 * if extern allocation fails, it will
3460 * deallocate the new pkt as well
3461 */
3462 return (NULL);
3463 }
3464 }
3465 new_cmd = cmd;
3466
3467 } else {
3468 cmd = PKT2CMD(pkt);
3469 new_cmd = NULL;
3470 }
3471
3472
3473 /* grab cmd->cmd_cookiec here as oldcookiec */
3474
3475 oldcookiec = cmd->cmd_cookiec;
3476
3477 /*
3478 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3479 * greater than 0 and we'll need to grab the next dma window
3480 */
3481 /*
3482 * SLM-not doing extra command frame right now; may add later
3483 */
3484
3485 if (cmd->cmd_nwin > 0) {
3486
3487 /*
3488 * Make sure we havn't gone past the the total number
3489 * of windows
3490 */
3491 if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3492 return (NULL);
3493 }
3494 if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3495 &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
3496 &cmd->cmd_cookie, &cmd->cmd_cookiec) == DDI_FAILURE) {
3497 return (NULL);
3498 }
3499 goto get_dma_cookies;
3500 }
3501
3502
3503 if (flags & PKT_XARQ) {
3504 cmd->cmd_flags |= CFLAG_XARQ;
3505 }
3506
3507 /*
3508 * DMA resource allocation. This version assumes your
3509 * HBA has some sort of bus-mastering or onboard DMA capability, with a
3510 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
3511 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
3512 */
3513 if (bp && (bp->b_bcount != 0) &&
3514 (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
3515
3516 int cnt, dma_flags;
3517 mptti_t *dmap; /* ptr to the S/G list */
3518
3519 /*
3520 * Set up DMA memory and position to the next DMA segment.
3521 */
3522 ASSERT(cmd->cmd_dmahandle != NULL);
3523
3524 if (bp->b_flags & B_READ) {
3525 dma_flags = DDI_DMA_READ;
3526 cmd->cmd_flags &= ~CFLAG_DMASEND;
3527 } else {
3528 dma_flags = DDI_DMA_WRITE;
3529 cmd->cmd_flags |= CFLAG_DMASEND;
3530 }
3531 if (flags & PKT_CONSISTENT) {
3532 cmd->cmd_flags |= CFLAG_CMDIOPB;
3533 dma_flags |= DDI_DMA_CONSISTENT;
3534 }
3535
3536 if (flags & PKT_DMA_PARTIAL) {
3537 dma_flags |= DDI_DMA_PARTIAL;
3538 }
3539
3540 /*
3541 * workaround for byte hole issue on psycho and
3542 * schizo pre 2.1
3543 */
3544 if ((bp->b_flags & B_READ) && ((bp->b_flags &
3545 (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
3546 ((uintptr_t)bp->b_un.b_addr & 0x7)) {
3547 dma_flags |= DDI_DMA_CONSISTENT;
3548 }
3549
3550 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
3551 dma_flags, callback, arg,
3552 &cmd->cmd_cookie, &cmd->cmd_cookiec);
3553 if (rval == DDI_DMA_PARTIAL_MAP) {
3554 (void) ddi_dma_numwin(cmd->cmd_dmahandle,
3555 &cmd->cmd_nwin);
3556 cmd->cmd_winindex = 0;
3557 (void) ddi_dma_getwin(cmd->cmd_dmahandle,
3558 cmd->cmd_winindex, &cmd->cmd_dma_offset,
3559 &cmd->cmd_dma_len, &cmd->cmd_cookie,
3560 &cmd->cmd_cookiec);
3561 } else if (rval && (rval != DDI_DMA_MAPPED)) {
3562 switch (rval) {
3563 case DDI_DMA_NORESOURCES:
3564 bioerror(bp, 0);
3565 break;
3566 case DDI_DMA_BADATTR:
3567 case DDI_DMA_NOMAPPING:
3568 bioerror(bp, EFAULT);
3569 break;
3570 case DDI_DMA_TOOBIG:
3571 default:
3572 bioerror(bp, EINVAL);
3573 break;
3574 }
3575 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3576 if (new_cmd) {
3577 mptsas_scsi_destroy_pkt(ap, pkt);
3578 }
3579 return ((struct scsi_pkt *)NULL);
3580 }
3581
3582 get_dma_cookies:
3583 cmd->cmd_flags |= CFLAG_DMAVALID;
3584 ASSERT(cmd->cmd_cookiec > 0);
3585
3586 if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
3587 mptsas_log(mpt, CE_NOTE, "large cookiec received %d\n",
3588 cmd->cmd_cookiec);
3589 bioerror(bp, EINVAL);
3590 if (new_cmd) {
3591 mptsas_scsi_destroy_pkt(ap, pkt);
3592 }
3593 return ((struct scsi_pkt *)NULL);
3594 }
3595
3596 /*
3597 * Allocate extra SGL buffer if needed.
3598 */
3599 if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
3600 (cmd->cmd_extra_frames == NULL)) {
3601 if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
3602 DDI_FAILURE) {
3603 mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
3604 "failed");
3605 bioerror(bp, ENOMEM);
3606 if (new_cmd) {
3607 mptsas_scsi_destroy_pkt(ap, pkt);
3608 }
3609 return ((struct scsi_pkt *)NULL);
3610 }
3611 }
3612
3613 /*
3614 * Always use scatter-gather transfer
3615 * Use the loop below to store physical addresses of
3616 * DMA segments, from the DMA cookies, into your HBA's
3617 * scatter-gather list.
3618 * We need to ensure we have enough kmem alloc'd
3619 * for the sg entries since we are no longer using an
3620 * array inside mptsas_cmd_t.
3621 *
3622 * We check cmd->cmd_cookiec against oldcookiec so
3623 * the scatter-gather list is correctly allocated
3624 */
3625
3626 if (oldcookiec != cmd->cmd_cookiec) {
3627 if (cmd->cmd_sg != (mptti_t *)NULL) {
3628 kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
3629 oldcookiec);
3630 cmd->cmd_sg = NULL;
3631 }
3632 }
3633
3634 if (cmd->cmd_sg == (mptti_t *)NULL) {
3635 cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3636 cmd->cmd_cookiec), kf);
3637
3638 if (cmd->cmd_sg == (mptti_t *)NULL) {
3639 mptsas_log(mpt, CE_WARN,
3640 "unable to kmem_alloc enough memory "
3641 "for scatter/gather list");
3642 /*
3643 * if we have an ENOMEM condition we need to behave
3644 * the same way as the rest of this routine
3645 */
3646
3647 bioerror(bp, ENOMEM);
3648 if (new_cmd) {
3649 mptsas_scsi_destroy_pkt(ap, pkt);
3650 }
3651 return ((struct scsi_pkt *)NULL);
3652 }
3653 }
3654
3655 dmap = cmd->cmd_sg;
3656
3657 ASSERT(cmd->cmd_cookie.dmac_size != 0);
3658
3659 /*
3660 * store the first segment into the S/G list
3661 */
3662 dmap->count = cmd->cmd_cookie.dmac_size;
3663 dmap->addr.address64.Low = (uint32_t)
3664 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3665 dmap->addr.address64.High = (uint32_t)
3666 (cmd->cmd_cookie.dmac_laddress >> 32);
3667
3668 /*
3669 * dmacount counts the size of the dma for this window
3670 * (if partial dma is being used). totaldmacount
3671 * keeps track of the total amount of dma we have
3672 * transferred for all the windows (needed to calculate
3673 * the resid value below).
3674 */
3675 cmd->cmd_dmacount = cmd->cmd_cookie.dmac_size;
3676 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3677
3678 /*
3679 * We already stored the first DMA scatter gather segment,
3680 * start at 1 if we need to store more.
3681 */
3682 for (cnt = 1; cnt < cmd->cmd_cookiec; cnt++) {
3683 /*
3684 * Get next DMA cookie
3685 */
3686 ddi_dma_nextcookie(cmd->cmd_dmahandle,
3687 &cmd->cmd_cookie);
3688 dmap++;
3689
3690 cmd->cmd_dmacount += cmd->cmd_cookie.dmac_size;
3691 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3692
3693 /*
3694 * store the segment parms into the S/G list
3695 */
3696 dmap->count = cmd->cmd_cookie.dmac_size;
3697 dmap->addr.address64.Low = (uint32_t)
3698 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3699 dmap->addr.address64.High = (uint32_t)
3700 (cmd->cmd_cookie.dmac_laddress >> 32);
3701 }
3702
3703 /*
3704 * If this was partially allocated we set the resid
3705 * the amount of data NOT transferred in this window
3706 * If there is only one window, the resid will be 0
3707 */
3708 pkt->pkt_resid = (bp->b_bcount - cmd->cmd_totaldmacount);
3709 NDBG16(("mptsas_dmaget: cmd_dmacount=%d.", cmd->cmd_dmacount));
3710 }
3711 return (pkt);
3712 }
3713
3714 /*
3715 * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
3716 *
3717 * Notes:
3718 * - also frees DMA resources if allocated
3719 * - implicit DMA synchonization
3720 */
3721 static void
3722 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
3723 {
3724 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3725 mptsas_t *mpt = ADDR2MPT(ap);
3726
3727 NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
3728 ap->a_target, (void *)pkt));
3729
3730 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3731 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
3732 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3733 }
3734
3735 if (cmd->cmd_sg) {
3736 kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
3737 cmd->cmd_sg = NULL;
3738 }
3739
3740 mptsas_free_extra_sgl_frame(mpt, cmd);
3741
3742 if ((cmd->cmd_flags &
3743 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
3744 CFLAG_SCBEXTERN)) == 0) {
3745 cmd->cmd_flags = CFLAG_FREE;
3746 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
3747 } else {
3748 mptsas_pkt_destroy_extern(mpt, cmd);
3749 }
3750 }
3751
3752 /*
3753 * kmem cache constructor and destructor:
3754 * When constructing, we bzero the cmd and allocate the dma handle
3755 * When destructing, just free the dma handle
3756 */
3757 static int
3758 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
3759 {
3760 mptsas_cmd_t *cmd = buf;
3761 mptsas_t *mpt = cdrarg;
3762 struct scsi_address ap;
3763 uint_t cookiec;
3764 ddi_dma_attr_t arq_dma_attr;
3765 int (*callback)(caddr_t);
3766
3767 callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3768
3769 NDBG4(("mptsas_kmem_cache_constructor"));
3770
3771 ap.a_hba_tran = mpt->m_tran;
3772 ap.a_target = 0;
3773 ap.a_lun = 0;
3774
3775 /*
3776 * allocate a dma handle
3777 */
3778 if ((ddi_dma_alloc_handle(mpt->m_dip, &mpt->m_io_dma_attr, callback,
3779 NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
3780 cmd->cmd_dmahandle = NULL;
3781 return (-1);
3782 }
3783
3784 cmd->cmd_arq_buf = scsi_alloc_consistent_buf(&ap, (struct buf *)NULL,
3785 SENSE_LENGTH, B_READ, callback, NULL);
3786 if (cmd->cmd_arq_buf == NULL) {
3787 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3788 cmd->cmd_dmahandle = NULL;
3789 return (-1);
3790 }
3791
3792 /*
3793 * allocate a arq handle
3794 */
3795 arq_dma_attr = mpt->m_msg_dma_attr;
3796 arq_dma_attr.dma_attr_sgllen = 1;
3797 if ((ddi_dma_alloc_handle(mpt->m_dip, &arq_dma_attr, callback,
3798 NULL, &cmd->cmd_arqhandle)) != DDI_SUCCESS) {
3799 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3800 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3801 cmd->cmd_dmahandle = NULL;
3802 cmd->cmd_arqhandle = NULL;
3803 return (-1);
3804 }
3805
3806 if (ddi_dma_buf_bind_handle(cmd->cmd_arqhandle,
3807 cmd->cmd_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
3808 callback, NULL, &cmd->cmd_arqcookie, &cookiec) != DDI_SUCCESS) {
3809 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3810 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3811 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3812 cmd->cmd_dmahandle = NULL;
3813 cmd->cmd_arqhandle = NULL;
3814 cmd->cmd_arq_buf = NULL;
3815 return (-1);
3816 }
3817
3818 return (0);
3819 }
3820
3821 static void
3822 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
3823 {
3824 #ifndef __lock_lint
3825 _NOTE(ARGUNUSED(cdrarg))
3826 #endif
3827 mptsas_cmd_t *cmd = buf;
3828
3829 NDBG4(("mptsas_kmem_cache_destructor"));
3830
3831 if (cmd->cmd_arqhandle) {
3832 (void) ddi_dma_unbind_handle(cmd->cmd_arqhandle);
3833 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3834 cmd->cmd_arqhandle = NULL;
3835 }
3836 if (cmd->cmd_arq_buf) {
3837 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3838 cmd->cmd_arq_buf = NULL;
3839 }
3840 if (cmd->cmd_dmahandle) {
3841 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3842 cmd->cmd_dmahandle = NULL;
3843 }
3844 }
3845
3846 static int
3847 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
3848 {
3849 mptsas_cache_frames_t *p = buf;
3850 mptsas_t *mpt = cdrarg;
3851 ddi_dma_attr_t frame_dma_attr;
3852 size_t mem_size, alloc_len;
3853 ddi_dma_cookie_t cookie;
3854 uint_t ncookie;
3855 int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
3856 ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3857
3858 frame_dma_attr = mpt->m_msg_dma_attr;
3859 frame_dma_attr.dma_attr_align = 0x10;
3860 frame_dma_attr.dma_attr_sgllen = 1;
3861
3862 if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
3863 &p->m_dma_hdl) != DDI_SUCCESS) {
3864 mptsas_log(mpt, CE_WARN, "Unable to allocate dma handle for"
3865 " extra SGL.");
3866 return (DDI_FAILURE);
3867 }
3868
3869 mem_size = (mpt->m_max_request_frames - 1) * mpt->m_req_frame_size;
3870
3871 if (ddi_dma_mem_alloc(p->m_dma_hdl, mem_size, &mpt->m_dev_acc_attr,
3872 DDI_DMA_CONSISTENT, callback, NULL, (caddr_t *)&p->m_frames_addr,
3873 &alloc_len, &p->m_acc_hdl) != DDI_SUCCESS) {
3874 ddi_dma_free_handle(&p->m_dma_hdl);
3875 p->m_dma_hdl = NULL;
3876 mptsas_log(mpt, CE_WARN, "Unable to allocate dma memory for"
3877 " extra SGL.");
3878 return (DDI_FAILURE);
3879 }
3880
3881 if (ddi_dma_addr_bind_handle(p->m_dma_hdl, NULL, p->m_frames_addr,
3882 alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
3883 &cookie, &ncookie) != DDI_DMA_MAPPED) {
3884 (void) ddi_dma_mem_free(&p->m_acc_hdl);
3885 ddi_dma_free_handle(&p->m_dma_hdl);
3886 p->m_dma_hdl = NULL;
3887 mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources for"
3888 " extra SGL");
3889 return (DDI_FAILURE);
3890 }
3891
3892 /*
3893 * Store the SGL memory address. This chip uses this
3894 * address to dma to and from the driver. The second
3895 * address is the address mpt uses to fill in the SGL.
3896 */
3897 p->m_phys_addr = cookie.dmac_address;
3898
3899 return (DDI_SUCCESS);
3900 }
3901
3902 static void
3903 mptsas_cache_frames_destructor(void *buf, void *cdrarg)
3904 {
3905 #ifndef __lock_lint
3906 _NOTE(ARGUNUSED(cdrarg))
3907 #endif
3908 mptsas_cache_frames_t *p = buf;
3909 if (p->m_dma_hdl != NULL) {
3910 (void) ddi_dma_unbind_handle(p->m_dma_hdl);
3911 (void) ddi_dma_mem_free(&p->m_acc_hdl);
3912 ddi_dma_free_handle(&p->m_dma_hdl);
3913 p->m_phys_addr = NULL;
3914 p->m_frames_addr = NULL;
3915 p->m_dma_hdl = NULL;
3916 p->m_acc_hdl = NULL;
3917 }
3918
3919 }
3920
3921 /*
3922 * allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
3923 * for non-standard length cdb, pkt_private, status areas
3924 * if allocation fails, then deallocate all external space and the pkt
3925 */
3926 /* ARGSUSED */
3927 static int
3928 mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
3929 int cmdlen, int tgtlen, int statuslen, int kf)
3930 {
3931 caddr_t cdbp, scbp, tgt;
3932 int (*callback)(caddr_t) = (kf == KM_SLEEP) ?
3933 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
3934 struct scsi_address ap;
3935 size_t senselength;
3936 ddi_dma_attr_t ext_arq_dma_attr;
3937 uint_t cookiec;
3938
3939 NDBG3(("mptsas_pkt_alloc_extern: "
3940 "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
3941 (void *)cmd, cmdlen, tgtlen, statuslen, kf));
3942
3943 tgt = cdbp = scbp = NULL;
3944 cmd->cmd_scblen = statuslen;
3945 cmd->cmd_privlen = (uchar_t)tgtlen;
3946
3947 if (cmdlen > sizeof (cmd->cmd_cdb)) {
3948 if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
3949 goto fail;
3950 }
3951 cmd->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
3952 cmd->cmd_flags |= CFLAG_CDBEXTERN;
3953 }
3954 if (tgtlen > PKT_PRIV_LEN) {
3955 if ((tgt = kmem_zalloc((size_t)tgtlen, kf)) == NULL) {
3956 goto fail;
3957 }
3958 cmd->cmd_flags |= CFLAG_PRIVEXTERN;
3959 cmd->cmd_pkt->pkt_private = tgt;
3960 }
3961 if (statuslen > EXTCMDS_STATUS_SIZE) {
3962 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
3963 goto fail;
3964 }
3965 cmd->cmd_flags |= CFLAG_SCBEXTERN;
3966 cmd->cmd_pkt->pkt_scbp = (opaque_t)scbp;
3967
3968 /* allocate sense data buf for DMA */
3969
3970 senselength = statuslen - MPTSAS_GET_ITEM_OFF(
3971 struct scsi_arq_status, sts_sensedata);
3972 cmd->cmd_rqslen = (uchar_t)senselength;
3973
3974 ap.a_hba_tran = mpt->m_tran;
3975 ap.a_target = 0;
3976 ap.a_lun = 0;
3977
3978 cmd->cmd_ext_arq_buf = scsi_alloc_consistent_buf(&ap,
3979 (struct buf *)NULL, senselength, B_READ,
3980 callback, NULL);
3981
3982 if (cmd->cmd_ext_arq_buf == NULL) {
3983 goto fail;
3984 }
3985 /*
3986 * allocate a extern arq handle and bind the buf
3987 */
3988 ext_arq_dma_attr = mpt->m_msg_dma_attr;
3989 ext_arq_dma_attr.dma_attr_sgllen = 1;
3990 if ((ddi_dma_alloc_handle(mpt->m_dip,
3991 &ext_arq_dma_attr, callback,
3992 NULL, &cmd->cmd_ext_arqhandle)) != DDI_SUCCESS) {
3993 goto fail;
3994 }
3995
3996 if (ddi_dma_buf_bind_handle(cmd->cmd_ext_arqhandle,
3997 cmd->cmd_ext_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
3998 callback, NULL, &cmd->cmd_ext_arqcookie,
3999 &cookiec)
4000 != DDI_SUCCESS) {
4001 goto fail;
4002 }
4003 cmd->cmd_flags |= CFLAG_EXTARQBUFVALID;
4004 }
4005 return (0);
4006 fail:
4007 mptsas_pkt_destroy_extern(mpt, cmd);
4008 return (1);
4009 }
4010
4011 /*
4012 * deallocate external pkt space and deallocate the pkt
4013 */
4014 static void
4015 mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd)
4016 {
4017 NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd));
4018
4019 if (cmd->cmd_flags & CFLAG_FREE) {
4020 mptsas_log(mpt, CE_PANIC,
4021 "mptsas_pkt_destroy_extern: freeing free packet");
4022 _NOTE(NOT_REACHED)
4023 /* NOTREACHED */
4024 }
4025 if (cmd->cmd_flags & CFLAG_CDBEXTERN) {
4026 kmem_free(cmd->cmd_pkt->pkt_cdbp, (size_t)cmd->cmd_cdblen);
4027 }
4028 if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
4029 kmem_free(cmd->cmd_pkt->pkt_scbp, (size_t)cmd->cmd_scblen);
4030 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4031 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4032 }
4033 if (cmd->cmd_ext_arqhandle) {
4034 ddi_dma_free_handle(&cmd->cmd_ext_arqhandle);
4035 cmd->cmd_ext_arqhandle = NULL;
4036 }
4037 if (cmd->cmd_ext_arq_buf)
4038 scsi_free_consistent_buf(cmd->cmd_ext_arq_buf);
4039 }
4040 if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
4041 kmem_free(cmd->cmd_pkt->pkt_private, (size_t)cmd->cmd_privlen);
4042 }
4043 cmd->cmd_flags = CFLAG_FREE;
4044 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4045 }
4046
4047 /*
4048 * tran_sync_pkt(9E) - explicit DMA synchronization
4049 */
4050 /*ARGSUSED*/
4051 static void
4052 mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4053 {
4054 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4055
4056 NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
4057 ap->a_target, (void *)pkt));
4058
4059 if (cmd->cmd_dmahandle) {
4060 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4061 (cmd->cmd_flags & CFLAG_DMASEND) ?
4062 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
4063 }
4064 }
4065
4066 /*
4067 * tran_dmafree(9E) - deallocate DMA resources allocated for command
4068 */
4069 /*ARGSUSED*/
4070 static void
4071 mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
4072 {
4073 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4074 mptsas_t *mpt = ADDR2MPT(ap);
4075
4076 NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
4077 ap->a_target, (void *)pkt));
4078
4079 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4080 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4081 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4082 }
4083
4084 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4085 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4086 cmd->cmd_flags &= ~CFLAG_EXTARQBUFVALID;
4087 }
4088
4089 mptsas_free_extra_sgl_frame(mpt, cmd);
4090 }
4091
4092 static void
4093 mptsas_pkt_comp(struct scsi_pkt *pkt, mptsas_cmd_t *cmd)
4094 {
4095 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
4096 (!(cmd->cmd_flags & CFLAG_DMASEND))) {
4097 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4098 DDI_DMA_SYNC_FORCPU);
4099 }
4100 (*pkt->pkt_comp)(pkt);
4101 }
4102
4103 static void
4104 mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control,
4105 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4106 {
4107 uint_t cookiec;
4108 mptti_t *dmap;
4109 uint32_t flags;
4110 pMpi2SGESimple64_t sge;
4111 pMpi2SGEChain64_t sgechain;
4112 ASSERT(cmd->cmd_flags & CFLAG_DMAVALID);
4113
4114 /*
4115 * Save the number of entries in the DMA
4116 * Scatter/Gather list
4117 */
4118 cookiec = cmd->cmd_cookiec;
4119
4120 NDBG1(("mptsas_sge_setup: cookiec=%d", cookiec));
4121
4122 /*
4123 * Set read/write bit in control.
4124 */
4125 if (cmd->cmd_flags & CFLAG_DMASEND) {
4126 *control |= MPI2_SCSIIO_CONTROL_WRITE;
4127 } else {
4128 *control |= MPI2_SCSIIO_CONTROL_READ;
4129 }
4130
4131 ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount);
4132
4133 /*
4134 * We have 2 cases here. First where we can fit all the
4135 * SG elements into the main frame, and the case
4136 * where we can't.
4137 * If we have more cookies than we can attach to a frame
4138 * we will need to use a chain element to point
4139 * a location of memory where the rest of the S/G
4140 * elements reside.
4141 */
4142 if (cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) {
4143 dmap = cmd->cmd_sg;
4144 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4145 while (cookiec--) {
4146 ddi_put32(acc_hdl,
4147 &sge->Address.Low, dmap->addr.address64.Low);
4148 ddi_put32(acc_hdl,
4149 &sge->Address.High, dmap->addr.address64.High);
4150 ddi_put32(acc_hdl, &sge->FlagsLength,
4151 dmap->count);
4152 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4153 flags |= ((uint32_t)
4154 (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4155 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4156 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4157 MPI2_SGE_FLAGS_SHIFT);
4158
4159 /*
4160 * If this is the last cookie, we set the flags
4161 * to indicate so
4162 */
4163 if (cookiec == 0) {
4164 flags |=
4165 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
4166 | MPI2_SGE_FLAGS_END_OF_BUFFER
4167 | MPI2_SGE_FLAGS_END_OF_LIST) <<
4168 MPI2_SGE_FLAGS_SHIFT);
4169 }
4170 if (cmd->cmd_flags & CFLAG_DMASEND) {
4171 flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4172 MPI2_SGE_FLAGS_SHIFT);
4173 } else {
4174 flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4175 MPI2_SGE_FLAGS_SHIFT);
4176 }
4177 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4178 dmap++;
4179 sge++;
4180 }
4181 } else {
4182 /*
4183 * Hereby we start to deal with multiple frames.
4184 * The process is as follows:
4185 * 1. Determine how many frames are needed for SGL element
4186 * storage; Note that all frames are stored in contiguous
4187 * memory space and in 64-bit DMA mode each element is
4188 * 3 double-words (12 bytes) long.
4189 * 2. Fill up the main frame. We need to do this separately
4190 * since it contains the SCSI IO request header and needs
4191 * dedicated processing. Note that the last 4 double-words
4192 * of the SCSI IO header is for SGL element storage
4193 * (MPI2_SGE_IO_UNION).
4194 * 3. Fill the chain element in the main frame, so the DMA
4195 * engine can use the following frames.
4196 * 4. Enter a loop to fill the remaining frames. Note that the
4197 * last frame contains no chain element. The remaining
4198 * frames go into the mpt SGL buffer allocated on the fly,
4199 * not immediately following the main message frame, as in
4200 * Gen1.
4201 * Some restrictions:
4202 * 1. For 64-bit DMA, the simple element and chain element
4203 * are both of 3 double-words (12 bytes) in size, even
4204 * though all frames are stored in the first 4G of mem
4205 * range and the higher 32-bits of the address are always 0.
4206 * 2. On some controllers (like the 1064/1068), a frame can
4207 * hold SGL elements with the last 1 or 2 double-words
4208 * (4 or 8 bytes) un-used. On these controllers, we should
4209 * recognize that there's not enough room for another SGL
4210 * element and move the sge pointer to the next frame.
4211 */
4212 int i, j, k, l, frames, sgemax;
4213 int temp;
4214 uint8_t chainflags;
4215 uint16_t chainlength;
4216 mptsas_cache_frames_t *p;
4217
4218 /*
4219 * Sgemax is the number of SGE's that will fit
4220 * each extra frame and frames is total
4221 * number of frames we'll need. 1 sge entry per
4222 * frame is reseverd for the chain element thus the -1 below.
4223 */
4224 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64))
4225 - 1);
4226 temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4227
4228 /*
4229 * A little check to see if we need to round up the number
4230 * of frames we need
4231 */
4232 if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4233 sgemax) > 1) {
4234 frames = (temp + 1);
4235 } else {
4236 frames = temp;
4237 }
4238 dmap = cmd->cmd_sg;
4239 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4240
4241 /*
4242 * First fill in the main frame
4243 */
4244 for (j = 1; j < MPTSAS_MAX_FRAME_SGES64(mpt); j++) {
4245 ddi_put32(acc_hdl, &sge->Address.Low,
4246 dmap->addr.address64.Low);
4247 ddi_put32(acc_hdl, &sge->Address.High,
4248 dmap->addr.address64.High);
4249 ddi_put32(acc_hdl, &sge->FlagsLength, dmap->count);
4250 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4251 flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4252 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4253 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4254 MPI2_SGE_FLAGS_SHIFT);
4255
4256 /*
4257 * If this is the last SGE of this frame
4258 * we set the end of list flag
4259 */
4260 if (j == (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) {
4261 flags |= ((uint32_t)
4262 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4263 MPI2_SGE_FLAGS_SHIFT);
4264 }
4265 if (cmd->cmd_flags & CFLAG_DMASEND) {
4266 flags |=
4267 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4268 MPI2_SGE_FLAGS_SHIFT);
4269 } else {
4270 flags |=
4271 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4272 MPI2_SGE_FLAGS_SHIFT);
4273 }
4274 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4275 dmap++;
4276 sge++;
4277 }
4278
4279 /*
4280 * Fill in the chain element in the main frame.
4281 * About calculation on ChainOffset:
4282 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4283 * in the end reserved for SGL element storage
4284 * (MPI2_SGE_IO_UNION); we should count it in our
4285 * calculation. See its definition in the header file.
4286 * 2. Constant j is the counter of the current SGL element
4287 * that will be processed, and (j - 1) is the number of
4288 * SGL elements that have been processed (stored in the
4289 * main frame).
4290 * 3. ChainOffset value should be in units of double-words (4
4291 * bytes) so the last value should be divided by 4.
4292 */
4293 ddi_put8(acc_hdl, &frame->ChainOffset,
4294 (sizeof (MPI2_SCSI_IO_REQUEST) -
4295 sizeof (MPI2_SGE_IO_UNION) +
4296 (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4297 sgechain = (pMpi2SGEChain64_t)sge;
4298 chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4299 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4300 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4301 ddi_put8(acc_hdl, &sgechain->Flags, chainflags);
4302
4303 /*
4304 * The size of the next frame is the accurate size of space
4305 * (in bytes) used to store the SGL elements. j is the counter
4306 * of SGL elements. (j - 1) is the number of SGL elements that
4307 * have been processed (stored in frames).
4308 */
4309 if (frames >= 2) {
4310 chainlength = mpt->m_req_frame_size /
4311 sizeof (MPI2_SGE_SIMPLE64) *
4312 sizeof (MPI2_SGE_SIMPLE64);
4313 } else {
4314 chainlength = ((cookiec - (j - 1)) *
4315 sizeof (MPI2_SGE_SIMPLE64));
4316 }
4317
4318 p = cmd->cmd_extra_frames;
4319
4320 ddi_put16(acc_hdl, &sgechain->Length, chainlength);
4321 ddi_put32(acc_hdl, &sgechain->Address.Low,
4322 p->m_phys_addr);
4323 /* SGL is allocated in the first 4G mem range */
4324 ddi_put32(acc_hdl, &sgechain->Address.High, 0);
4325
4326 /*
4327 * If there are more than 2 frames left we have to
4328 * fill in the next chain offset to the location of
4329 * the chain element in the next frame.
4330 * sgemax is the number of simple elements in an extra
4331 * frame. Note that the value NextChainOffset should be
4332 * in double-words (4 bytes).
4333 */
4334 if (frames >= 2) {
4335 ddi_put8(acc_hdl, &sgechain->NextChainOffset,
4336 (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4337 } else {
4338 ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0);
4339 }
4340
4341 /*
4342 * Jump to next frame;
4343 * Starting here, chain buffers go into the per command SGL.
4344 * This buffer is allocated when chain buffers are needed.
4345 */
4346 sge = (pMpi2SGESimple64_t)p->m_frames_addr;
4347 i = cookiec;
4348
4349 /*
4350 * Start filling in frames with SGE's. If we
4351 * reach the end of frame and still have SGE's
4352 * to fill we need to add a chain element and
4353 * use another frame. j will be our counter
4354 * for what cookie we are at and i will be
4355 * the total cookiec. k is the current frame
4356 */
4357 for (k = 1; k <= frames; k++) {
4358 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4359
4360 /*
4361 * If we have reached the end of frame
4362 * and we have more SGE's to fill in
4363 * we have to fill the final entry
4364 * with a chain element and then
4365 * continue to the next frame
4366 */
4367 if ((l == (sgemax + 1)) && (k != frames)) {
4368 sgechain = (pMpi2SGEChain64_t)sge;
4369 j--;
4370 chainflags = (
4371 MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4372 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4373 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4374 ddi_put8(p->m_acc_hdl,
4375 &sgechain->Flags, chainflags);
4376 /*
4377 * k is the frame counter and (k + 1)
4378 * is the number of the next frame.
4379 * Note that frames are in contiguous
4380 * memory space.
4381 */
4382 ddi_put32(p->m_acc_hdl,
4383 &sgechain->Address.Low,
4384 (p->m_phys_addr +
4385 (mpt->m_req_frame_size * k)));
4386 ddi_put32(p->m_acc_hdl,
4387 &sgechain->Address.High, 0);
4388
4389 /*
4390 * If there are more than 2 frames left
4391 * we have to next chain offset to
4392 * the location of the chain element
4393 * in the next frame and fill in the
4394 * length of the next chain
4395 */
4396 if ((frames - k) >= 2) {
4397 ddi_put8(p->m_acc_hdl,
4398 &sgechain->NextChainOffset,
4399 (sgemax *
4400 sizeof (MPI2_SGE_SIMPLE64))
4401 >> 2);
4402 ddi_put16(p->m_acc_hdl,
4403 &sgechain->Length,
4404 mpt->m_req_frame_size /
4405 sizeof (MPI2_SGE_SIMPLE64) *
4406 sizeof (MPI2_SGE_SIMPLE64));
4407 } else {
4408 /*
4409 * This is the last frame. Set
4410 * the NextChainOffset to 0 and
4411 * Length is the total size of
4412 * all remaining simple elements
4413 */
4414 ddi_put8(p->m_acc_hdl,
4415 &sgechain->NextChainOffset,
4416 0);
4417 ddi_put16(p->m_acc_hdl,
4418 &sgechain->Length,
4419 (cookiec - j) *
4420 sizeof (MPI2_SGE_SIMPLE64));
4421 }
4422
4423 /* Jump to the next frame */
4424 sge = (pMpi2SGESimple64_t)
4425 ((char *)p->m_frames_addr +
4426 (int)mpt->m_req_frame_size * k);
4427
4428 continue;
4429 }
4430
4431 ddi_put32(p->m_acc_hdl,
4432 &sge->Address.Low,
4433 dmap->addr.address64.Low);
4434 ddi_put32(p->m_acc_hdl,
4435 &sge->Address.High,
4436 dmap->addr.address64.High);
4437 ddi_put32(p->m_acc_hdl,
4438 &sge->FlagsLength, dmap->count);
4439 flags = ddi_get32(p->m_acc_hdl,
4440 &sge->FlagsLength);
4441 flags |= ((uint32_t)(
4442 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4443 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4444 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4445 MPI2_SGE_FLAGS_SHIFT);
4446
4447 /*
4448 * If we are at the end of the frame and
4449 * there is another frame to fill in
4450 * we set the last simple element as last
4451 * element
4452 */
4453 if ((l == sgemax) && (k != frames)) {
4454 flags |= ((uint32_t)
4455 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4456 MPI2_SGE_FLAGS_SHIFT);
4457 }
4458
4459 /*
4460 * If this is the final cookie we
4461 * indicate it by setting the flags
4462 */
4463 if (j == i) {
4464 flags |= ((uint32_t)
4465 (MPI2_SGE_FLAGS_LAST_ELEMENT |
4466 MPI2_SGE_FLAGS_END_OF_BUFFER |
4467 MPI2_SGE_FLAGS_END_OF_LIST) <<
4468 MPI2_SGE_FLAGS_SHIFT);
4469 }
4470 if (cmd->cmd_flags & CFLAG_DMASEND) {
4471 flags |=
4472 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4473 MPI2_SGE_FLAGS_SHIFT);
4474 } else {
4475 flags |=
4476 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4477 MPI2_SGE_FLAGS_SHIFT);
4478 }
4479 ddi_put32(p->m_acc_hdl,
4480 &sge->FlagsLength, flags);
4481 dmap++;
4482 sge++;
4483 }
4484 }
4485
4486 /*
4487 * Sync DMA with the chain buffers that were just created
4488 */
4489 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4490 }
4491 }
4492
4493 /*
4494 * Interrupt handling
4495 * Utility routine. Poll for status of a command sent to HBA
4496 * without interrupts (a FLAG_NOINTR command).
4497 */
4498 int
4499 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
4500 {
4501 int rval = TRUE;
4502
4503 NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
4504
4505 if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
4506 mptsas_restart_hba(mpt);
4507 }
4508
4509 /*
4510 * Wait, using drv_usecwait(), long enough for the command to
4511 * reasonably return from the target if the target isn't
4512 * "dead". A polled command may well be sent from scsi_poll, and
4513 * there are retries built in to scsi_poll if the transport
4514 * accepted the packet (TRAN_ACCEPT). scsi_poll waits 1 second
4515 * and retries the transport up to scsi_poll_busycnt times
4516 * (currently 60) if
4517 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
4518 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
4519 *
4520 * limit the waiting to avoid a hang in the event that the
4521 * cmd never gets started but we are still receiving interrupts
4522 */
4523 while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
4524 if (mptsas_wait_intr(mpt, polltime) == FALSE) {
4525 NDBG5(("mptsas_poll: command incomplete"));
4526 rval = FALSE;
4527 break;
4528 }
4529 }
4530
4531 if (rval == FALSE) {
4532
4533 /*
4534 * this isn't supposed to happen, the hba must be wedged
4535 * Mark this cmd as a timeout.
4536 */
4537 mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
4538 (STAT_TIMEOUT|STAT_ABORTED));
4539
4540 if (poll_cmd->cmd_queued == FALSE) {
4541
4542 NDBG5(("mptsas_poll: not on waitq"));
4543
4544 poll_cmd->cmd_pkt->pkt_state |=
4545 (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
4546 } else {
4547
4548 /* find and remove it from the waitq */
4549 NDBG5(("mptsas_poll: delete from waitq"));
4550 mptsas_waitq_delete(mpt, poll_cmd);
4551 }
4552
4553 }
4554 mptsas_fma_check(mpt, poll_cmd);
4555 NDBG5(("mptsas_poll: done"));
4556 return (rval);
4557 }
4558
4559 /*
4560 * Used for polling cmds and TM function
4561 */
4562 static int
4563 mptsas_wait_intr(mptsas_t *mpt, int polltime)
4564 {
4565 int cnt;
4566 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
4567 uint32_t int_mask;
4568
4569 NDBG5(("mptsas_wait_intr"));
4570
4571 mpt->m_polled_intr = 1;
4572
4573 /*
4574 * Get the current interrupt mask and disable interrupts. When
4575 * re-enabling ints, set mask to saved value.
4576 */
4577 int_mask = ddi_get32(mpt->m_datap, &mpt->m_reg->HostInterruptMask);
4578 MPTSAS_DISABLE_INTR(mpt);
4579
4580 /*
4581 * Keep polling for at least (polltime * 1000) seconds
4582 */
4583 for (cnt = 0; cnt < polltime; cnt++) {
4584 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
4585 DDI_DMA_SYNC_FORCPU);
4586
4587 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
4588 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
4589
4590 if (ddi_get32(mpt->m_acc_post_queue_hdl,
4591 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
4592 ddi_get32(mpt->m_acc_post_queue_hdl,
4593 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
4594 drv_usecwait(1000);
4595 continue;
4596 }
4597
4598 /*
4599 * The reply is valid, process it according to its
4600 * type.
4601 */
4602 mptsas_process_intr(mpt, reply_desc_union);
4603
4604 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
4605 mpt->m_post_index = 0;
4606 }
4607
4608 /*
4609 * Update the global reply index
4610 */
4611 ddi_put32(mpt->m_datap,
4612 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
4613 mpt->m_polled_intr = 0;
4614
4615 /*
4616 * Re-enable interrupts and quit.
4617 */
4618 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask,
4619 int_mask);
4620 return (TRUE);
4621
4622 }
4623
4624 /*
4625 * Clear polling flag, re-enable interrupts and quit.
4626 */
4627 mpt->m_polled_intr = 0;
4628 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask);
4629 return (FALSE);
4630 }
4631
4632 static void
4633 mptsas_handle_scsi_io_success(mptsas_t *mpt,
4634 pMpi2ReplyDescriptorsUnion_t reply_desc)
4635 {
4636 pMpi2SCSIIOSuccessReplyDescriptor_t scsi_io_success;
4637 uint16_t SMID;
4638 mptsas_slots_t *slots = mpt->m_active;
4639 mptsas_cmd_t *cmd = NULL;
4640 struct scsi_pkt *pkt;
4641
4642 ASSERT(mutex_owned(&mpt->m_mutex));
4643
4644 scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
4645 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &scsi_io_success->SMID);
4646
4647 /*
4648 * This is a success reply so just complete the IO. First, do a sanity
4649 * check on the SMID. The final slot is used for TM requests, which
4650 * would not come into this reply handler.
4651 */
4652 if ((SMID == 0) || (SMID > slots->m_n_slots)) {
4653 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
4654 SMID);
4655 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4656 return;
4657 }
4658
4659 cmd = slots->m_slot[SMID];
4660
4661 /*
4662 * print warning and return if the slot is empty
4663 */
4664 if (cmd == NULL) {
4665 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
4666 "in slot %d", SMID);
4667 return;
4668 }
4669
4670 pkt = CMD2PKT(cmd);
4671 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
4672 STATE_GOT_STATUS);
4673 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4674 pkt->pkt_state |= STATE_XFERRED_DATA;
4675 }
4676 pkt->pkt_resid = 0;
4677
4678 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
4679 cmd->cmd_flags |= CFLAG_FINISHED;
4680 cv_broadcast(&mpt->m_passthru_cv);
4681 return;
4682 } else {
4683 mptsas_remove_cmd(mpt, cmd);
4684 }
4685
4686 if (cmd->cmd_flags & CFLAG_RETRY) {
4687 /*
4688 * The target returned QFULL or busy, do not add tihs
4689 * pkt to the doneq since the hba will retry
4690 * this cmd.
4691 *
4692 * The pkt has already been resubmitted in
4693 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4694 * Remove this cmd_flag here.
4695 */
4696 cmd->cmd_flags &= ~CFLAG_RETRY;
4697 } else {
4698 mptsas_doneq_add(mpt, cmd);
4699 }
4700 }
4701
4702 static void
4703 mptsas_handle_address_reply(mptsas_t *mpt,
4704 pMpi2ReplyDescriptorsUnion_t reply_desc)
4705 {
4706 pMpi2AddressReplyDescriptor_t address_reply;
4707 pMPI2DefaultReply_t reply;
4708 mptsas_fw_diagnostic_buffer_t *pBuffer;
4709 uint32_t reply_addr;
4710 uint16_t SMID, iocstatus;
4711 mptsas_slots_t *slots = mpt->m_active;
4712 mptsas_cmd_t *cmd = NULL;
4713 uint8_t function, buffer_type;
4714 m_replyh_arg_t *args;
4715 int reply_frame_no;
4716
4717 ASSERT(mutex_owned(&mpt->m_mutex));
4718
4719 address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
4720 reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
4721 &address_reply->ReplyFrameAddress);
4722 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID);
4723
4724 /*
4725 * If reply frame is not in the proper range we should ignore this
4726 * message and exit the interrupt handler.
4727 */
4728 if ((reply_addr < mpt->m_reply_frame_dma_addr) ||
4729 (reply_addr >= (mpt->m_reply_frame_dma_addr +
4730 (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
4731 ((reply_addr - mpt->m_reply_frame_dma_addr) %
4732 mpt->m_reply_frame_size != 0)) {
4733 mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
4734 "address 0x%x\n", reply_addr);
4735 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4736 return;
4737 }
4738
4739 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
4740 DDI_DMA_SYNC_FORCPU);
4741 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
4742 mpt->m_reply_frame_dma_addr));
4743 function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
4744
4745 /*
4746 * don't get slot information and command for events since these values
4747 * don't exist
4748 */
4749 if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) &&
4750 (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) {
4751 /*
4752 * This could be a TM reply, which use the last allocated SMID,
4753 * so allow for that.
4754 */
4755 if ((SMID == 0) || (SMID > (slots->m_n_slots + 1))) {
4756 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of "
4757 "%d\n", SMID);
4758 ddi_fm_service_impact(mpt->m_dip,
4759 DDI_SERVICE_UNAFFECTED);
4760 return;
4761 }
4762
4763 cmd = slots->m_slot[SMID];
4764
4765 /*
4766 * print warning and return if the slot is empty
4767 */
4768 if (cmd == NULL) {
4769 mptsas_log(mpt, CE_WARN, "?NULL command for address "
4770 "reply in slot %d", SMID);
4771 return;
4772 }
4773 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
4774 (cmd->cmd_flags & CFLAG_CONFIG) ||
4775 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
4776 cmd->cmd_rfm = reply_addr;
4777 cmd->cmd_flags |= CFLAG_FINISHED;
4778 cv_broadcast(&mpt->m_passthru_cv);
4779 cv_broadcast(&mpt->m_config_cv);
4780 cv_broadcast(&mpt->m_fw_diag_cv);
4781 return;
4782 } else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
4783 mptsas_remove_cmd(mpt, cmd);
4784 }
4785 NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
4786 }
4787 /*
4788 * Depending on the function, we need to handle
4789 * the reply frame (and cmd) differently.
4790 */
4791 switch (function) {
4792 case MPI2_FUNCTION_SCSI_IO_REQUEST:
4793 mptsas_check_scsi_io_error(mpt, (pMpi2SCSIIOReply_t)reply, cmd);
4794 break;
4795 case MPI2_FUNCTION_SCSI_TASK_MGMT:
4796 cmd->cmd_rfm = reply_addr;
4797 mptsas_check_task_mgt(mpt, (pMpi2SCSIManagementReply_t)reply,
4798 cmd);
4799 break;
4800 case MPI2_FUNCTION_FW_DOWNLOAD:
4801 cmd->cmd_flags |= CFLAG_FINISHED;
4802 cv_signal(&mpt->m_fw_cv);
4803 break;
4804 case MPI2_FUNCTION_EVENT_NOTIFICATION:
4805 reply_frame_no = (reply_addr - mpt->m_reply_frame_dma_addr) /
4806 mpt->m_reply_frame_size;
4807 args = &mpt->m_replyh_args[reply_frame_no];
4808 args->mpt = (void *)mpt;
4809 args->rfm = reply_addr;
4810
4811 /*
4812 * Record the event if its type is enabled in
4813 * this mpt instance by ioctl.
4814 */
4815 mptsas_record_event(args);
4816
4817 /*
4818 * Handle time critical events
4819 * NOT_RESPONDING/ADDED only now
4820 */
4821 if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
4822 /*
4823 * Would not return main process,
4824 * just let taskq resolve ack action
4825 * and ack would be sent in taskq thread
4826 */
4827 NDBG20(("send mptsas_handle_event_sync success"));
4828 }
4829
4830 if (mpt->m_in_reset) {
4831 NDBG20(("dropping event received during reset"));
4832 return;
4833 }
4834
4835 if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
4836 (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
4837 mptsas_log(mpt, CE_WARN, "No memory available"
4838 "for dispatch taskq");
4839 /*
4840 * Return the reply frame to the free queue.
4841 */
4842 ddi_put32(mpt->m_acc_free_queue_hdl,
4843 &((uint32_t *)(void *)
4844 mpt->m_free_queue)[mpt->m_free_index], reply_addr);
4845 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
4846 DDI_DMA_SYNC_FORDEV);
4847 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
4848 mpt->m_free_index = 0;
4849 }
4850
4851 ddi_put32(mpt->m_datap,
4852 &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
4853 }
4854 return;
4855 case MPI2_FUNCTION_DIAG_BUFFER_POST:
4856 /*
4857 * If SMID is 0, this implies that the reply is due to a
4858 * release function with a status that the buffer has been
4859 * released. Set the buffer flags accordingly.
4860 */
4861 if (SMID == 0) {
4862 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
4863 &reply->IOCStatus);
4864 buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl,
4865 &(((pMpi2DiagBufferPostReply_t)reply)->BufferType));
4866 if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) {
4867 pBuffer =
4868 &mpt->m_fw_diag_buffer_list[buffer_type];
4869 pBuffer->valid_data = TRUE;
4870 pBuffer->owned_by_firmware = FALSE;
4871 pBuffer->immediate = FALSE;
4872 }
4873 } else {
4874 /*
4875 * Normal handling of diag post reply with SMID.
4876 */
4877 cmd = slots->m_slot[SMID];
4878
4879 /*
4880 * print warning and return if the slot is empty
4881 */
4882 if (cmd == NULL) {
4883 mptsas_log(mpt, CE_WARN, "?NULL command for "
4884 "address reply in slot %d", SMID);
4885 return;
4886 }
4887 cmd->cmd_rfm = reply_addr;
4888 cmd->cmd_flags |= CFLAG_FINISHED;
4889 cv_broadcast(&mpt->m_fw_diag_cv);
4890 }
4891 return;
4892 default:
4893 mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
4894 break;
4895 }
4896
4897 /*
4898 * Return the reply frame to the free queue.
4899 */
4900 ddi_put32(mpt->m_acc_free_queue_hdl,
4901 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
4902 reply_addr);
4903 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
4904 DDI_DMA_SYNC_FORDEV);
4905 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
4906 mpt->m_free_index = 0;
4907 }
4908 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
4909 mpt->m_free_index);
4910
4911 if (cmd->cmd_flags & CFLAG_FW_CMD)
4912 return;
4913
4914 if (cmd->cmd_flags & CFLAG_RETRY) {
4915 /*
4916 * The target returned QFULL or busy, do not add tihs
4917 * pkt to the doneq since the hba will retry
4918 * this cmd.
4919 *
4920 * The pkt has already been resubmitted in
4921 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4922 * Remove this cmd_flag here.
4923 */
4924 cmd->cmd_flags &= ~CFLAG_RETRY;
4925 } else {
4926 mptsas_doneq_add(mpt, cmd);
4927 }
4928 }
4929
4930 static void
4931 mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
4932 mptsas_cmd_t *cmd)
4933 {
4934 uint8_t scsi_status, scsi_state;
4935 uint16_t ioc_status;
4936 uint32_t xferred, sensecount, responsedata, loginfo = 0;
4937 struct scsi_pkt *pkt;
4938 struct scsi_arq_status *arqstat;
4939 struct buf *bp;
4940 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
4941 uint8_t *sensedata = NULL;
4942
4943 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
4944 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
4945 bp = cmd->cmd_ext_arq_buf;
4946 } else {
4947 bp = cmd->cmd_arq_buf;
4948 }
4949
4950 scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
4951 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
4952 scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
4953 xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
4954 sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
4955 responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl,
4956 &reply->ResponseInfo);
4957
4958 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
4959 loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
4960 &reply->IOCLogInfo);
4961 mptsas_log(mpt, CE_NOTE,
4962 "?Log info 0x%x received for target %d.\n"
4963 "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
4964 loginfo, Tgt(cmd), scsi_status, ioc_status,
4965 scsi_state);
4966 }
4967
4968 NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
4969 scsi_status, ioc_status, scsi_state));
4970
4971 pkt = CMD2PKT(cmd);
4972 *(pkt->pkt_scbp) = scsi_status;
4973
4974 if (loginfo == 0x31170000) {
4975 /*
4976 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
4977 * 0x31170000 comes, that means the device missing delay
4978 * is in progressing, the command need retry later.
4979 */
4980 *(pkt->pkt_scbp) = STATUS_BUSY;
4981 return;
4982 }
4983
4984 if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
4985 ((ioc_status & MPI2_IOCSTATUS_MASK) ==
4986 MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
4987 pkt->pkt_reason = CMD_INCOMPLETE;
4988 pkt->pkt_state |= STATE_GOT_BUS;
4989 if (ptgt->m_reset_delay == 0) {
4990 mptsas_set_throttle(mpt, ptgt,
4991 DRAIN_THROTTLE);
4992 }
4993 return;
4994 }
4995
4996 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
4997 responsedata &= 0x000000FF;
4998 if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
4999 mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n");
5000 pkt->pkt_reason = CMD_TLR_OFF;
5001 return;
5002 }
5003 }
5004
5005
5006 switch (scsi_status) {
5007 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5008 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5009 arqstat = (void*)(pkt->pkt_scbp);
5010 arqstat->sts_rqpkt_status = *((struct scsi_status *)
5011 (pkt->pkt_scbp));
5012 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
5013 STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
5014 if (cmd->cmd_flags & CFLAG_XARQ) {
5015 pkt->pkt_state |= STATE_XARQ_DONE;
5016 }
5017 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5018 pkt->pkt_state |= STATE_XFERRED_DATA;
5019 }
5020 arqstat->sts_rqpkt_reason = pkt->pkt_reason;
5021 arqstat->sts_rqpkt_state = pkt->pkt_state;
5022 arqstat->sts_rqpkt_state |= STATE_XFERRED_DATA;
5023 arqstat->sts_rqpkt_statistics = pkt->pkt_statistics;
5024 sensedata = (uint8_t *)&arqstat->sts_sensedata;
5025
5026 bcopy((uchar_t *)bp->b_un.b_addr, sensedata,
5027 ((cmd->cmd_rqslen >= sensecount) ? sensecount :
5028 cmd->cmd_rqslen));
5029 arqstat->sts_rqpkt_resid = (cmd->cmd_rqslen - sensecount);
5030 cmd->cmd_flags |= CFLAG_CMDARQ;
5031 /*
5032 * Set proper status for pkt if autosense was valid
5033 */
5034 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5035 struct scsi_status zero_status = { 0 };
5036 arqstat->sts_rqpkt_status = zero_status;
5037 }
5038
5039 /*
5040 * ASC=0x47 is parity error
5041 * ASC=0x48 is initiator detected error received
5042 */
5043 if ((scsi_sense_key(sensedata) == KEY_ABORTED_COMMAND) &&
5044 ((scsi_sense_asc(sensedata) == 0x47) ||
5045 (scsi_sense_asc(sensedata) == 0x48))) {
5046 mptsas_log(mpt, CE_NOTE, "Aborted_command!");
5047 }
5048
5049 /*
5050 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5051 * ASC/ASCQ=0x25/0x00 means invalid lun
5052 */
5053 if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
5054 (scsi_sense_asc(sensedata) == 0x3F) &&
5055 (scsi_sense_ascq(sensedata) == 0x0E)) ||
5056 ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
5057 (scsi_sense_asc(sensedata) == 0x25) &&
5058 (scsi_sense_ascq(sensedata) == 0x00))) {
5059 mptsas_topo_change_list_t *topo_node = NULL;
5060
5061 topo_node = kmem_zalloc(
5062 sizeof (mptsas_topo_change_list_t),
5063 KM_NOSLEEP);
5064 if (topo_node == NULL) {
5065 mptsas_log(mpt, CE_NOTE, "No memory"
5066 "resource for handle SAS dynamic"
5067 "reconfigure.\n");
5068 break;
5069 }
5070 topo_node->mpt = mpt;
5071 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
5072 topo_node->un.phymask = ptgt->m_phymask;
5073 topo_node->devhdl = ptgt->m_devhdl;
5074 topo_node->object = (void *)ptgt;
5075 topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5076
5077 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5078 mptsas_handle_dr,
5079 (void *)topo_node,
5080 DDI_NOSLEEP)) != DDI_SUCCESS) {
5081 mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5082 "for handle SAS dynamic reconfigure"
5083 "failed. \n");
5084 }
5085 }
5086 break;
5087 case MPI2_SCSI_STATUS_GOOD:
5088 switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5089 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5090 pkt->pkt_reason = CMD_DEV_GONE;
5091 pkt->pkt_state |= STATE_GOT_BUS;
5092 if (ptgt->m_reset_delay == 0) {
5093 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5094 }
5095 NDBG31(("lost disk for target%d, command:%x",
5096 Tgt(cmd), pkt->pkt_cdbp[0]));
5097 break;
5098 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5099 NDBG31(("data overrun: xferred=%d", xferred));
5100 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5101 pkt->pkt_reason = CMD_DATA_OVR;
5102 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5103 | STATE_SENT_CMD | STATE_GOT_STATUS
5104 | STATE_XFERRED_DATA);
5105 pkt->pkt_resid = 0;
5106 break;
5107 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5108 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5109 NDBG31(("data underrun: xferred=%d", xferred));
5110 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5111 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5112 | STATE_SENT_CMD | STATE_GOT_STATUS);
5113 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5114 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5115 pkt->pkt_state |= STATE_XFERRED_DATA;
5116 }
5117 break;
5118 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5119 mptsas_set_pkt_reason(mpt,
5120 cmd, CMD_RESET, STAT_BUS_RESET);
5121 break;
5122 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5123 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5124 mptsas_set_pkt_reason(mpt,
5125 cmd, CMD_RESET, STAT_DEV_RESET);
5126 break;
5127 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5128 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5129 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5130 mptsas_set_pkt_reason(mpt,
5131 cmd, CMD_TERMINATED, STAT_TERMINATED);
5132 break;
5133 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5134 case MPI2_IOCSTATUS_BUSY:
5135 /*
5136 * set throttles to drain
5137 */
5138 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5139 &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
5140 while (ptgt != NULL) {
5141 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5142
5143 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5144 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
5145 }
5146
5147 /*
5148 * retry command
5149 */
5150 cmd->cmd_flags |= CFLAG_RETRY;
5151 cmd->cmd_pkt_flags |= FLAG_HEAD;
5152
5153 (void) mptsas_accept_pkt(mpt, cmd);
5154 break;
5155 default:
5156 mptsas_log(mpt, CE_WARN,
5157 "unknown ioc_status = %x\n", ioc_status);
5158 mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5159 "count = %x, scsi_status = %x", scsi_state,
5160 xferred, scsi_status);
5161 break;
5162 }
5163 break;
5164 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5165 mptsas_handle_qfull(mpt, cmd);
5166 break;
5167 case MPI2_SCSI_STATUS_BUSY:
5168 NDBG31(("scsi_status busy received"));
5169 break;
5170 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5171 NDBG31(("scsi_status reservation conflict received"));
5172 break;
5173 default:
5174 mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x\n",
5175 scsi_status, ioc_status);
5176 mptsas_log(mpt, CE_WARN,
5177 "mptsas_process_intr: invalid scsi status\n");
5178 break;
5179 }
5180 }
5181
5182 static void
5183 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5184 mptsas_cmd_t *cmd)
5185 {
5186 uint8_t task_type;
5187 uint16_t ioc_status;
5188 uint32_t log_info;
5189 uint16_t dev_handle;
5190 struct scsi_pkt *pkt = CMD2PKT(cmd);
5191
5192 task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5193 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5194 log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5195 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5196
5197 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5198 mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5199 "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
5200 task_type, ioc_status, log_info, dev_handle);
5201 pkt->pkt_reason = CMD_INCOMPLETE;
5202 return;
5203 }
5204
5205 switch (task_type) {
5206 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5207 case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5208 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5209 case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5210 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5211 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5212 break;
5213 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5214 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5215 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5216 /*
5217 * Check for invalid DevHandle of 0 in case application
5218 * sends bad command. DevHandle of 0 could cause problems.
5219 */
5220 if (dev_handle == 0) {
5221 mptsas_log(mpt, CE_WARN, "!Can't flush target with"
5222 " DevHandle of 0.");
5223 } else {
5224 mptsas_flush_target(mpt, dev_handle, Lun(cmd),
5225 task_type);
5226 }
5227 break;
5228 default:
5229 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5230 task_type);
5231 mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5232 break;
5233 }
5234 }
5235
5236 static void
5237 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5238 {
5239 mptsas_t *mpt = arg->mpt;
5240 uint64_t t = arg->t;
5241 mptsas_cmd_t *cmd;
5242 struct scsi_pkt *pkt;
5243 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
5244
5245 mutex_enter(&item->mutex);
5246 while (item->flag & MPTSAS_DONEQ_THREAD_ACTIVE) {
5247 if (!item->doneq) {
5248 cv_wait(&item->cv, &item->mutex);
5249 }
5250 pkt = NULL;
5251 if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5252 cmd->cmd_flags |= CFLAG_COMPLETED;
5253 pkt = CMD2PKT(cmd);
5254 }
5255 mutex_exit(&item->mutex);
5256 if (pkt) {
5257 mptsas_pkt_comp(pkt, cmd);
5258 }
5259 mutex_enter(&item->mutex);
5260 }
5261 mutex_exit(&item->mutex);
5262 mutex_enter(&mpt->m_doneq_mutex);
5263 mpt->m_doneq_thread_n--;
5264 cv_broadcast(&mpt->m_doneq_thread_cv);
5265 mutex_exit(&mpt->m_doneq_mutex);
5266 }
5267
5268
5269 /*
5270 * mpt interrupt handler.
5271 */
5272 static uint_t
5273 mptsas_intr(caddr_t arg1, caddr_t arg2)
5274 {
5275 mptsas_t *mpt = (void *)arg1;
5276 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
5277 uchar_t did_reply = FALSE;
5278
5279 NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5280
5281 mutex_enter(&mpt->m_mutex);
5282
5283 /*
5284 * If interrupts are shared by two channels then check whether this
5285 * interrupt is genuinely for this channel by making sure first the
5286 * chip is in high power state.
5287 */
5288 if ((mpt->m_options & MPTSAS_OPT_PM) &&
5289 (mpt->m_power_level != PM_LEVEL_D0)) {
5290 mutex_exit(&mpt->m_mutex);
5291 return (DDI_INTR_UNCLAIMED);
5292 }
5293
5294 /*
5295 * If polling, interrupt was triggered by some shared interrupt because
5296 * IOC interrupts are disabled during polling, so polling routine will
5297 * handle any replies. Considering this, if polling is happening,
5298 * return with interrupt unclaimed.
5299 */
5300 if (mpt->m_polled_intr) {
5301 mutex_exit(&mpt->m_mutex);
5302 mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt");
5303 return (DDI_INTR_UNCLAIMED);
5304 }
5305
5306 /*
5307 * Read the istat register.
5308 */
5309 if ((INTPENDING(mpt)) != 0) {
5310 /*
5311 * read fifo until empty.
5312 */
5313 #ifndef __lock_lint
5314 _NOTE(CONSTCOND)
5315 #endif
5316 while (TRUE) {
5317 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5318 DDI_DMA_SYNC_FORCPU);
5319 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5320 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5321
5322 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5323 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5324 ddi_get32(mpt->m_acc_post_queue_hdl,
5325 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5326 break;
5327 }
5328
5329 /*
5330 * The reply is valid, process it according to its
5331 * type. Also, set a flag for updating the reply index
5332 * after they've all been processed.
5333 */
5334 did_reply = TRUE;
5335
5336 mptsas_process_intr(mpt, reply_desc_union);
5337
5338 /*
5339 * Increment post index and roll over if needed.
5340 */
5341 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5342 mpt->m_post_index = 0;
5343 }
5344 }
5345
5346 /*
5347 * Update the global reply index if at least one reply was
5348 * processed.
5349 */
5350 if (did_reply) {
5351 ddi_put32(mpt->m_datap,
5352 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5353 }
5354 } else {
5355 mutex_exit(&mpt->m_mutex);
5356 return (DDI_INTR_UNCLAIMED);
5357 }
5358 NDBG1(("mptsas_intr complete"));
5359
5360 /*
5361 * If no helper threads are created, process the doneq in ISR. If
5362 * helpers are created, use the doneq length as a metric to measure the
5363 * load on the interrupt CPU. If it is long enough, which indicates the
5364 * load is heavy, then we deliver the IO completions to the helpers.
5365 * This measurement has some limitations, although it is simple and
5366 * straightforward and works well for most of the cases at present.
5367 */
5368 if (!mpt->m_doneq_thread_n ||
5369 (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)) {
5370 mptsas_doneq_empty(mpt);
5371 } else {
5372 mptsas_deliver_doneq_thread(mpt);
5373 }
5374
5375 /*
5376 * If there are queued cmd, start them now.
5377 */
5378 if (mpt->m_waitq != NULL) {
5379 mptsas_restart_waitq(mpt);
5380 }
5381
5382 mutex_exit(&mpt->m_mutex);
5383 return (DDI_INTR_CLAIMED);
5384 }
5385
5386 static void
5387 mptsas_process_intr(mptsas_t *mpt,
5388 pMpi2ReplyDescriptorsUnion_t reply_desc_union)
5389 {
5390 uint8_t reply_type;
5391
5392 ASSERT(mutex_owned(&mpt->m_mutex));
5393
5394 /*
5395 * The reply is valid, process it according to its
5396 * type. Also, set a flag for updated the reply index
5397 * after they've all been processed.
5398 */
5399 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
5400 &reply_desc_union->Default.ReplyFlags);
5401 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
5402 if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
5403 mptsas_handle_scsi_io_success(mpt, reply_desc_union);
5404 } else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
5405 mptsas_handle_address_reply(mpt, reply_desc_union);
5406 } else {
5407 mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
5408 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5409 }
5410
5411 /*
5412 * Clear the reply descriptor for re-use and increment
5413 * index.
5414 */
5415 ddi_put64(mpt->m_acc_post_queue_hdl,
5416 &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
5417 0xFFFFFFFFFFFFFFFF);
5418 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5419 DDI_DMA_SYNC_FORDEV);
5420 }
5421
5422 /*
5423 * handle qfull condition
5424 */
5425 static void
5426 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
5427 {
5428 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5429
5430 if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
5431 (ptgt->m_qfull_retries == 0)) {
5432 /*
5433 * We have exhausted the retries on QFULL, or,
5434 * the target driver has indicated that it
5435 * wants to handle QFULL itself by setting
5436 * qfull-retries capability to 0. In either case
5437 * we want the target driver's QFULL handling
5438 * to kick in. We do this by having pkt_reason
5439 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
5440 */
5441 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5442 } else {
5443 if (ptgt->m_reset_delay == 0) {
5444 ptgt->m_t_throttle =
5445 max((ptgt->m_t_ncmds - 2), 0);
5446 }
5447
5448 cmd->cmd_pkt_flags |= FLAG_HEAD;
5449 cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
5450 cmd->cmd_flags |= CFLAG_RETRY;
5451
5452 (void) mptsas_accept_pkt(mpt, cmd);
5453
5454 /*
5455 * when target gives queue full status with no commands
5456 * outstanding (m_t_ncmds == 0), throttle is set to 0
5457 * (HOLD_THROTTLE), and the queue full handling start
5458 * (see psarc/1994/313); if there are commands outstanding,
5459 * throttle is set to (m_t_ncmds - 2)
5460 */
5461 if (ptgt->m_t_throttle == HOLD_THROTTLE) {
5462 /*
5463 * By setting throttle to QFULL_THROTTLE, we
5464 * avoid submitting new commands and in
5465 * mptsas_restart_cmd find out slots which need
5466 * their throttles to be cleared.
5467 */
5468 mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
5469 if (mpt->m_restart_cmd_timeid == 0) {
5470 mpt->m_restart_cmd_timeid =
5471 timeout(mptsas_restart_cmd, mpt,
5472 ptgt->m_qfull_retry_interval);
5473 }
5474 }
5475 }
5476 }
5477
5478 mptsas_phymask_t
5479 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
5480 {
5481 mptsas_phymask_t phy_mask = 0;
5482 uint8_t i = 0;
5483
5484 NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
5485
5486 ASSERT(mutex_owned(&mpt->m_mutex));
5487
5488 /*
5489 * If physport is 0xFF, this is a RAID volume. Use phymask of 0.
5490 */
5491 if (physport == 0xFF) {
5492 return (0);
5493 }
5494
5495 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
5496 if (mpt->m_phy_info[i].attached_devhdl &&
5497 (mpt->m_phy_info[i].phy_mask != 0) &&
5498 (mpt->m_phy_info[i].port_num == physport)) {
5499 phy_mask = mpt->m_phy_info[i].phy_mask;
5500 break;
5501 }
5502 }
5503 NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
5504 mpt->m_instance, physport, phy_mask));
5505 return (phy_mask);
5506 }
5507
5508 /*
5509 * mpt free device handle after device gone, by use of passthrough
5510 */
5511 static int
5512 mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl)
5513 {
5514 Mpi2SasIoUnitControlRequest_t req;
5515 Mpi2SasIoUnitControlReply_t rep;
5516 int ret;
5517
5518 ASSERT(mutex_owned(&mpt->m_mutex));
5519
5520 /*
5521 * Need to compose a SAS IO Unit Control request message
5522 * and call mptsas_do_passthru() function
5523 */
5524 bzero(&req, sizeof (req));
5525 bzero(&rep, sizeof (rep));
5526
5527 req.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
5528 req.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
5529 req.DevHandle = LE_16(devhdl);
5530
5531 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
5532 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
5533 if (ret != 0) {
5534 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5535 "Control error %d", ret);
5536 return (DDI_FAILURE);
5537 }
5538
5539 /* do passthrough success, check the ioc status */
5540 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
5541 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5542 "Control IOCStatus %d", LE_16(rep.IOCStatus));
5543 return (DDI_FAILURE);
5544 }
5545
5546 return (DDI_SUCCESS);
5547 }
5548
5549 static void
5550 mptsas_update_phymask(mptsas_t *mpt)
5551 {
5552 mptsas_phymask_t mask = 0, phy_mask;
5553 char *phy_mask_name;
5554 uint8_t current_port;
5555 int i, j;
5556
5557 NDBG20(("mptsas%d update phymask ", mpt->m_instance));
5558
5559 ASSERT(mutex_owned(&mpt->m_mutex));
5560
5561 (void) mptsas_get_sas_io_unit_page(mpt);
5562
5563 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5564
5565 for (i = 0; i < mpt->m_num_phys; i++) {
5566 phy_mask = 0x00;
5567
5568 if (mpt->m_phy_info[i].attached_devhdl == 0)
5569 continue;
5570
5571 bzero(phy_mask_name, sizeof (phy_mask_name));
5572
5573 current_port = mpt->m_phy_info[i].port_num;
5574
5575 if ((mask & (1 << i)) != 0)
5576 continue;
5577
5578 for (j = 0; j < mpt->m_num_phys; j++) {
5579 if (mpt->m_phy_info[j].attached_devhdl &&
5580 (mpt->m_phy_info[j].port_num == current_port)) {
5581 phy_mask |= (1 << j);
5582 }
5583 }
5584 mask = mask | phy_mask;
5585
5586 for (j = 0; j < mpt->m_num_phys; j++) {
5587 if ((phy_mask >> j) & 0x01) {
5588 mpt->m_phy_info[j].phy_mask = phy_mask;
5589 }
5590 }
5591
5592 (void) sprintf(phy_mask_name, "%x", phy_mask);
5593
5594 mutex_exit(&mpt->m_mutex);
5595 /*
5596 * register a iport, if the port has already been existed
5597 * SCSA will do nothing and just return.
5598 */
5599 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
5600 mutex_enter(&mpt->m_mutex);
5601 }
5602 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5603 NDBG20(("mptsas%d update phymask return", mpt->m_instance));
5604 }
5605
5606 /*
5607 * mptsas_handle_dr is a task handler for DR, the DR action includes:
5608 * 1. Directly attched Device Added/Removed.
5609 * 2. Expander Device Added/Removed.
5610 * 3. Indirectly Attached Device Added/Expander.
5611 * 4. LUNs of a existing device status change.
5612 * 5. RAID volume created/deleted.
5613 * 6. Member of RAID volume is released because of RAID deletion.
5614 * 7. Physical disks are removed because of RAID creation.
5615 */
5616 static void
5617 mptsas_handle_dr(void *args) {
5618 mptsas_topo_change_list_t *topo_node = NULL;
5619 mptsas_topo_change_list_t *save_node = NULL;
5620 mptsas_t *mpt;
5621 dev_info_t *parent = NULL;
5622 mptsas_phymask_t phymask = 0;
5623 char *phy_mask_name;
5624 uint8_t flags = 0, physport = 0xff;
5625 uint8_t port_update = 0;
5626 uint_t event;
5627
5628 topo_node = (mptsas_topo_change_list_t *)args;
5629
5630 mpt = topo_node->mpt;
5631 event = topo_node->event;
5632 flags = topo_node->flags;
5633
5634 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5635
5636 NDBG20(("mptsas%d handle_dr enter", mpt->m_instance));
5637
5638 switch (event) {
5639 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
5640 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
5641 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE) ||
5642 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
5643 /*
5644 * Direct attached or expander attached device added
5645 * into system or a Phys Disk that is being unhidden.
5646 */
5647 port_update = 1;
5648 }
5649 break;
5650 case MPTSAS_DR_EVENT_RECONFIG_SMP:
5651 /*
5652 * New expander added into system, it must be the head
5653 * of topo_change_list_t
5654 */
5655 port_update = 1;
5656 break;
5657 default:
5658 port_update = 0;
5659 break;
5660 }
5661 /*
5662 * All cases port_update == 1 may cause initiator port form change
5663 */
5664 mutex_enter(&mpt->m_mutex);
5665 if (mpt->m_port_chng && port_update) {
5666 /*
5667 * mpt->m_port_chng flag indicates some PHYs of initiator
5668 * port have changed to online. So when expander added or
5669 * directly attached device online event come, we force to
5670 * update port information by issueing SAS IO Unit Page and
5671 * update PHYMASKs.
5672 */
5673 (void) mptsas_update_phymask(mpt);
5674 mpt->m_port_chng = 0;
5675
5676 }
5677 mutex_exit(&mpt->m_mutex);
5678 while (topo_node) {
5679 phymask = 0;
5680 if (parent == NULL) {
5681 physport = topo_node->un.physport;
5682 event = topo_node->event;
5683 flags = topo_node->flags;
5684 if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET |
5685 MPTSAS_DR_EVENT_OFFLINE_SMP)) {
5686 /*
5687 * For all offline events, phymask is known
5688 */
5689 phymask = topo_node->un.phymask;
5690 goto find_parent;
5691 }
5692 if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
5693 goto handle_topo_change;
5694 }
5695 if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) {
5696 phymask = topo_node->un.phymask;
5697 goto find_parent;
5698 }
5699
5700 if ((flags ==
5701 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) &&
5702 (event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) {
5703 /*
5704 * There is no any field in IR_CONFIG_CHANGE
5705 * event indicate physport/phynum, let's get
5706 * parent after SAS Device Page0 request.
5707 */
5708 goto handle_topo_change;
5709 }
5710
5711 mutex_enter(&mpt->m_mutex);
5712 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
5713 /*
5714 * If the direct attached device added or a
5715 * phys disk is being unhidden, argument
5716 * physport actually is PHY#, so we have to get
5717 * phymask according PHY#.
5718 */
5719 physport = mpt->m_phy_info[physport].port_num;
5720 }
5721
5722 /*
5723 * Translate physport to phymask so that we can search
5724 * parent dip.
5725 */
5726 phymask = mptsas_physport_to_phymask(mpt,
5727 physport);
5728 mutex_exit(&mpt->m_mutex);
5729
5730 find_parent:
5731 bzero(phy_mask_name, MPTSAS_MAX_PHYS);
5732 /*
5733 * For RAID topology change node, write the iport name
5734 * as v0.
5735 */
5736 if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5737 (void) sprintf(phy_mask_name, "v0");
5738 } else {
5739 /*
5740 * phymask can bo 0 if the drive has been
5741 * pulled by the time an add event is
5742 * processed. If phymask is 0, just skip this
5743 * event and continue.
5744 */
5745 if (phymask == 0) {
5746 mutex_enter(&mpt->m_mutex);
5747 save_node = topo_node;
5748 topo_node = topo_node->next;
5749 ASSERT(save_node);
5750 kmem_free(save_node,
5751 sizeof (mptsas_topo_change_list_t));
5752 mutex_exit(&mpt->m_mutex);
5753
5754 parent = NULL;
5755 continue;
5756 }
5757 (void) sprintf(phy_mask_name, "%x", phymask);
5758 }
5759 parent = scsi_hba_iport_find(mpt->m_dip,
5760 phy_mask_name);
5761 if (parent == NULL) {
5762 mptsas_log(mpt, CE_WARN, "Failed to find an "
5763 "iport, should not happen!");
5764 goto out;
5765 }
5766
5767 }
5768 ASSERT(parent);
5769 handle_topo_change:
5770
5771 mutex_enter(&mpt->m_mutex);
5772 /*
5773 * If HBA is being reset, don't perform operations depending
5774 * on the IOC. We must free the topo list, however.
5775 */
5776 if (!mpt->m_in_reset)
5777 mptsas_handle_topo_change(topo_node, parent);
5778 else
5779 NDBG20(("skipping topo change received during reset"));
5780 save_node = topo_node;
5781 topo_node = topo_node->next;
5782 ASSERT(save_node);
5783 kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
5784 mutex_exit(&mpt->m_mutex);
5785
5786 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
5787 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
5788 (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
5789 /*
5790 * If direct attached device associated, make sure
5791 * reset the parent before start the next one. But
5792 * all devices associated with expander shares the
5793 * parent. Also, reset parent if this is for RAID.
5794 */
5795 parent = NULL;
5796 }
5797 }
5798 out:
5799 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5800 }
5801
5802 static void
5803 mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
5804 dev_info_t *parent)
5805 {
5806 mptsas_target_t *ptgt = NULL;
5807 mptsas_smp_t *psmp = NULL;
5808 mptsas_t *mpt = (void *)topo_node->mpt;
5809 uint16_t devhdl;
5810 uint16_t attached_devhdl;
5811 uint64_t sas_wwn = 0;
5812 int rval = 0;
5813 uint32_t page_address;
5814 uint8_t phy, flags;
5815 char *addr = NULL;
5816 dev_info_t *lundip;
5817 int circ = 0, circ1 = 0;
5818 char attached_wwnstr[MPTSAS_WWN_STRLEN];
5819
5820 NDBG20(("mptsas%d handle_topo_change enter", mpt->m_instance));
5821
5822 ASSERT(mutex_owned(&mpt->m_mutex));
5823
5824 switch (topo_node->event) {
5825 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
5826 {
5827 char *phy_mask_name;
5828 mptsas_phymask_t phymask = 0;
5829
5830 if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5831 /*
5832 * Get latest RAID info.
5833 */
5834 (void) mptsas_get_raid_info(mpt);
5835 ptgt = mptsas_search_by_devhdl(
5836 &mpt->m_active->m_tgttbl, topo_node->devhdl);
5837 if (ptgt == NULL)
5838 break;
5839 } else {
5840 ptgt = (void *)topo_node->object;
5841 }
5842
5843 if (ptgt == NULL) {
5844 /*
5845 * If a Phys Disk was deleted, RAID info needs to be
5846 * updated to reflect the new topology.
5847 */
5848 (void) mptsas_get_raid_info(mpt);
5849
5850 /*
5851 * Get sas device page 0 by DevHandle to make sure if
5852 * SSP/SATA end device exist.
5853 */
5854 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
5855 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
5856 topo_node->devhdl;
5857
5858 rval = mptsas_get_target_device_info(mpt, page_address,
5859 &devhdl, &ptgt);
5860 if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
5861 mptsas_log(mpt, CE_NOTE,
5862 "mptsas_handle_topo_change: target %d is "
5863 "not a SAS/SATA device. \n",
5864 topo_node->devhdl);
5865 } else if (rval == DEV_INFO_FAIL_ALLOC) {
5866 mptsas_log(mpt, CE_NOTE,
5867 "mptsas_handle_topo_change: could not "
5868 "allocate memory. \n");
5869 }
5870 /*
5871 * If rval is DEV_INFO_PHYS_DISK than there is nothing
5872 * else to do, just leave.
5873 */
5874 if (rval != DEV_INFO_SUCCESS) {
5875 return;
5876 }
5877 }
5878
5879 ASSERT(ptgt->m_devhdl == topo_node->devhdl);
5880
5881 mutex_exit(&mpt->m_mutex);
5882 flags = topo_node->flags;
5883
5884 if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
5885 phymask = ptgt->m_phymask;
5886 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5887 (void) sprintf(phy_mask_name, "%x", phymask);
5888 parent = scsi_hba_iport_find(mpt->m_dip,
5889 phy_mask_name);
5890 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5891 if (parent == NULL) {
5892 mptsas_log(mpt, CE_WARN, "Failed to find a "
5893 "iport for PD, should not happen!");
5894 mutex_enter(&mpt->m_mutex);
5895 break;
5896 }
5897 }
5898
5899 if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
5900 ndi_devi_enter(parent, &circ1);
5901 (void) mptsas_config_raid(parent, topo_node->devhdl,
5902 &lundip);
5903 ndi_devi_exit(parent, circ1);
5904 } else {
5905 /*
5906 * hold nexus for bus configure
5907 */
5908 ndi_devi_enter(scsi_vhci_dip, &circ);
5909 ndi_devi_enter(parent, &circ1);
5910 rval = mptsas_config_target(parent, ptgt);
5911 /*
5912 * release nexus for bus configure
5913 */
5914 ndi_devi_exit(parent, circ1);
5915 ndi_devi_exit(scsi_vhci_dip, circ);
5916
5917 /*
5918 * Add parent's props for SMHBA support
5919 */
5920 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
5921 bzero(attached_wwnstr,
5922 sizeof (attached_wwnstr));
5923 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
5924 ptgt->m_sas_wwn);
5925 if (ddi_prop_update_string(DDI_DEV_T_NONE,
5926 parent,
5927 SCSI_ADDR_PROP_ATTACHED_PORT,
5928 attached_wwnstr)
5929 != DDI_PROP_SUCCESS) {
5930 (void) ddi_prop_remove(DDI_DEV_T_NONE,
5931 parent,
5932 SCSI_ADDR_PROP_ATTACHED_PORT);
5933 mptsas_log(mpt, CE_WARN, "Failed to"
5934 "attached-port props");
5935 return;
5936 }
5937 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
5938 MPTSAS_NUM_PHYS, 1) !=
5939 DDI_PROP_SUCCESS) {
5940 (void) ddi_prop_remove(DDI_DEV_T_NONE,
5941 parent, MPTSAS_NUM_PHYS);
5942 mptsas_log(mpt, CE_WARN, "Failed to"
5943 " create num-phys props");
5944 return;
5945 }
5946
5947 /*
5948 * Update PHY info for smhba
5949 */
5950 mutex_enter(&mpt->m_mutex);
5951 if (mptsas_smhba_phy_init(mpt)) {
5952 mutex_exit(&mpt->m_mutex);
5953 mptsas_log(mpt, CE_WARN, "mptsas phy"
5954 " update failed");
5955 return;
5956 }
5957 mutex_exit(&mpt->m_mutex);
5958 mptsas_smhba_set_phy_props(mpt,
5959 ddi_get_name_addr(parent), parent,
5960 1, &attached_devhdl);
5961 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
5962 MPTSAS_VIRTUAL_PORT, 0) !=
5963 DDI_PROP_SUCCESS) {
5964 (void) ddi_prop_remove(DDI_DEV_T_NONE,
5965 parent, MPTSAS_VIRTUAL_PORT);
5966 mptsas_log(mpt, CE_WARN,
5967 "mptsas virtual-port"
5968 "port prop update failed");
5969 return;
5970 }
5971 }
5972 }
5973 mutex_enter(&mpt->m_mutex);
5974
5975 NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
5976 "phymask:%x.", mpt->m_instance, ptgt->m_devhdl,
5977 ptgt->m_phymask));
5978 break;
5979 }
5980 case MPTSAS_DR_EVENT_OFFLINE_TARGET:
5981 {
5982 mptsas_hash_table_t *tgttbl = &mpt->m_active->m_tgttbl;
5983 devhdl = topo_node->devhdl;
5984 ptgt = mptsas_search_by_devhdl(tgttbl, devhdl);
5985 if (ptgt == NULL)
5986 break;
5987
5988 sas_wwn = ptgt->m_sas_wwn;
5989 phy = ptgt->m_phynum;
5990
5991 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
5992
5993 if (sas_wwn) {
5994 (void) sprintf(addr, "w%016"PRIx64, sas_wwn);
5995 } else {
5996 (void) sprintf(addr, "p%x", phy);
5997 }
5998 ASSERT(ptgt->m_devhdl == devhdl);
5999
6000 if ((topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) ||
6001 (topo_node->flags ==
6002 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6003 /*
6004 * Get latest RAID info if RAID volume status changes
6005 * or Phys Disk status changes
6006 */
6007 (void) mptsas_get_raid_info(mpt);
6008 }
6009 /*
6010 * Abort all outstanding command on the device
6011 */
6012 rval = mptsas_do_scsi_reset(mpt, devhdl);
6013 if (rval) {
6014 NDBG20(("mptsas%d handle_topo_change to reset target "
6015 "before offline devhdl:%x, phymask:%x, rval:%x",
6016 mpt->m_instance, ptgt->m_devhdl, ptgt->m_phymask,
6017 rval));
6018 }
6019
6020 mutex_exit(&mpt->m_mutex);
6021
6022 ndi_devi_enter(scsi_vhci_dip, &circ);
6023 ndi_devi_enter(parent, &circ1);
6024 rval = mptsas_offline_target(parent, addr);
6025 ndi_devi_exit(parent, circ1);
6026 ndi_devi_exit(scsi_vhci_dip, circ);
6027 NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
6028 "phymask:%x, rval:%x", mpt->m_instance,
6029 ptgt->m_devhdl, ptgt->m_phymask, rval));
6030
6031 kmem_free(addr, SCSI_MAXNAMELEN);
6032
6033 /*
6034 * Clear parent's props for SMHBA support
6035 */
6036 flags = topo_node->flags;
6037 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6038 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6039 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6040 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6041 DDI_PROP_SUCCESS) {
6042 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6043 SCSI_ADDR_PROP_ATTACHED_PORT);
6044 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6045 "prop update failed");
6046 break;
6047 }
6048 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6049 MPTSAS_NUM_PHYS, 0) !=
6050 DDI_PROP_SUCCESS) {
6051 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6052 MPTSAS_NUM_PHYS);
6053 mptsas_log(mpt, CE_WARN, "mptsas num phys "
6054 "prop update failed");
6055 break;
6056 }
6057 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6058 MPTSAS_VIRTUAL_PORT, 1) !=
6059 DDI_PROP_SUCCESS) {
6060 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6061 MPTSAS_VIRTUAL_PORT);
6062 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6063 "prop update failed");
6064 break;
6065 }
6066 }
6067
6068 mutex_enter(&mpt->m_mutex);
6069 if (rval == DDI_SUCCESS) {
6070 mptsas_tgt_free(&mpt->m_active->m_tgttbl,
6071 ptgt->m_sas_wwn, ptgt->m_phymask);
6072 ptgt = NULL;
6073 } else {
6074 /*
6075 * clean DR_INTRANSITION flag to allow I/O down to
6076 * PHCI driver since failover finished.
6077 * Invalidate the devhdl
6078 */
6079 ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6080 ptgt->m_tgt_unconfigured = 0;
6081 mutex_enter(&mpt->m_tx_waitq_mutex);
6082 ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6083 mutex_exit(&mpt->m_tx_waitq_mutex);
6084 }
6085
6086 /*
6087 * Send SAS IO Unit Control to free the dev handle
6088 */
6089 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6090 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
6091 rval = mptsas_free_devhdl(mpt, devhdl);
6092
6093 NDBG20(("mptsas%d handle_topo_change to remove "
6094 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6095 rval));
6096 }
6097
6098 break;
6099 }
6100 case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
6101 {
6102 devhdl = topo_node->devhdl;
6103 /*
6104 * If this is the remove handle event, do a reset first.
6105 */
6106 if (topo_node->event == MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6107 rval = mptsas_do_scsi_reset(mpt, devhdl);
6108 if (rval) {
6109 NDBG20(("mpt%d reset target before remove "
6110 "devhdl:%x, rval:%x", mpt->m_instance,
6111 devhdl, rval));
6112 }
6113 }
6114
6115 /*
6116 * Send SAS IO Unit Control to free the dev handle
6117 */
6118 rval = mptsas_free_devhdl(mpt, devhdl);
6119 NDBG20(("mptsas%d handle_topo_change to remove "
6120 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6121 rval));
6122 break;
6123 }
6124 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6125 {
6126 mptsas_smp_t smp;
6127 dev_info_t *smpdip;
6128 mptsas_hash_table_t *smptbl = &mpt->m_active->m_smptbl;
6129
6130 devhdl = topo_node->devhdl;
6131
6132 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
6133 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)devhdl;
6134 rval = mptsas_get_sas_expander_page0(mpt, page_address, &smp);
6135 if (rval != DDI_SUCCESS) {
6136 mptsas_log(mpt, CE_WARN, "failed to online smp, "
6137 "handle %x", devhdl);
6138 return;
6139 }
6140
6141 psmp = mptsas_smp_alloc(smptbl, &smp);
6142 if (psmp == NULL) {
6143 return;
6144 }
6145
6146 mutex_exit(&mpt->m_mutex);
6147 ndi_devi_enter(parent, &circ1);
6148 (void) mptsas_online_smp(parent, psmp, &smpdip);
6149 ndi_devi_exit(parent, circ1);
6150
6151 mutex_enter(&mpt->m_mutex);
6152 break;
6153 }
6154 case MPTSAS_DR_EVENT_OFFLINE_SMP:
6155 {
6156 mptsas_hash_table_t *smptbl = &mpt->m_active->m_smptbl;
6157 devhdl = topo_node->devhdl;
6158 uint32_t dev_info;
6159
6160 psmp = mptsas_search_by_devhdl(smptbl, devhdl);
6161 if (psmp == NULL)
6162 break;
6163 /*
6164 * The mptsas_smp_t data is released only if the dip is offlined
6165 * successfully.
6166 */
6167 mutex_exit(&mpt->m_mutex);
6168
6169 ndi_devi_enter(parent, &circ1);
6170 rval = mptsas_offline_smp(parent, psmp, NDI_DEVI_REMOVE);
6171 ndi_devi_exit(parent, circ1);
6172
6173 dev_info = psmp->m_deviceinfo;
6174 if ((dev_info & DEVINFO_DIRECT_ATTACHED) ==
6175 DEVINFO_DIRECT_ATTACHED) {
6176 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6177 MPTSAS_VIRTUAL_PORT, 1) !=
6178 DDI_PROP_SUCCESS) {
6179 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6180 MPTSAS_VIRTUAL_PORT);
6181 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6182 "prop update failed");
6183 return;
6184 }
6185 /*
6186 * Check whether the smp connected to the iport,
6187 */
6188 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6189 MPTSAS_NUM_PHYS, 0) !=
6190 DDI_PROP_SUCCESS) {
6191 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6192 MPTSAS_NUM_PHYS);
6193 mptsas_log(mpt, CE_WARN, "mptsas num phys"
6194 "prop update failed");
6195 return;
6196 }
6197 /*
6198 * Clear parent's attached-port props
6199 */
6200 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6201 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6202 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6203 DDI_PROP_SUCCESS) {
6204 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6205 SCSI_ADDR_PROP_ATTACHED_PORT);
6206 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6207 "prop update failed");
6208 return;
6209 }
6210 }
6211
6212 mutex_enter(&mpt->m_mutex);
6213 NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
6214 "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
6215 if (rval == DDI_SUCCESS) {
6216 mptsas_smp_free(smptbl, psmp->m_sasaddr,
6217 psmp->m_phymask);
6218 } else {
6219 psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
6220 }
6221
6222 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6223
6224 break;
6225 }
6226 default:
6227 return;
6228 }
6229 }
6230
6231 /*
6232 * Record the event if its type is enabled in mpt instance by ioctl.
6233 */
6234 static void
6235 mptsas_record_event(void *args)
6236 {
6237 m_replyh_arg_t *replyh_arg;
6238 pMpi2EventNotificationReply_t eventreply;
6239 uint32_t event, rfm;
6240 mptsas_t *mpt;
6241 int i, j;
6242 uint16_t event_data_len;
6243 boolean_t sendAEN = FALSE;
6244
6245 replyh_arg = (m_replyh_arg_t *)args;
6246 rfm = replyh_arg->rfm;
6247 mpt = replyh_arg->mpt;
6248
6249 eventreply = (pMpi2EventNotificationReply_t)
6250 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6251 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6252
6253
6254 /*
6255 * Generate a system event to let anyone who cares know that a
6256 * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the
6257 * event mask is set to.
6258 */
6259 if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
6260 sendAEN = TRUE;
6261 }
6262
6263 /*
6264 * Record the event only if it is not masked. Determine which dword
6265 * and bit of event mask to test.
6266 */
6267 i = (uint8_t)(event / 32);
6268 j = (uint8_t)(event % 32);
6269 if ((i < 4) && ((1 << j) & mpt->m_event_mask[i])) {
6270 i = mpt->m_event_index;
6271 mpt->m_events[i].Type = event;
6272 mpt->m_events[i].Number = ++mpt->m_event_number;
6273 bzero(mpt->m_events[i].Data, MPTSAS_MAX_EVENT_DATA_LENGTH * 4);
6274 event_data_len = ddi_get16(mpt->m_acc_reply_frame_hdl,
6275 &eventreply->EventDataLength);
6276
6277 if (event_data_len > 0) {
6278 /*
6279 * Limit data to size in m_event entry
6280 */
6281 if (event_data_len > MPTSAS_MAX_EVENT_DATA_LENGTH) {
6282 event_data_len = MPTSAS_MAX_EVENT_DATA_LENGTH;
6283 }
6284 for (j = 0; j < event_data_len; j++) {
6285 mpt->m_events[i].Data[j] =
6286 ddi_get32(mpt->m_acc_reply_frame_hdl,
6287 &(eventreply->EventData[j]));
6288 }
6289
6290 /*
6291 * check for index wrap-around
6292 */
6293 if (++i == MPTSAS_EVENT_QUEUE_SIZE) {
6294 i = 0;
6295 }
6296 mpt->m_event_index = (uint8_t)i;
6297
6298 /*
6299 * Set flag to send the event.
6300 */
6301 sendAEN = TRUE;
6302 }
6303 }
6304
6305 /*
6306 * Generate a system event if flag is set to let anyone who cares know
6307 * that an event has occurred.
6308 */
6309 if (sendAEN) {
6310 (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
6311 "SAS", NULL, NULL, DDI_NOSLEEP);
6312 }
6313 }
6314
6315 #define SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS
6316 /*
6317 * handle sync events from ioc in interrupt
6318 * return value:
6319 * DDI_SUCCESS: The event is handled by this func
6320 * DDI_FAILURE: Event is not handled
6321 */
6322 static int
6323 mptsas_handle_event_sync(void *args)
6324 {
6325 m_replyh_arg_t *replyh_arg;
6326 pMpi2EventNotificationReply_t eventreply;
6327 uint32_t event, rfm;
6328 mptsas_t *mpt;
6329 uint_t iocstatus;
6330
6331 replyh_arg = (m_replyh_arg_t *)args;
6332 rfm = replyh_arg->rfm;
6333 mpt = replyh_arg->mpt;
6334
6335 ASSERT(mutex_owned(&mpt->m_mutex));
6336
6337 eventreply = (pMpi2EventNotificationReply_t)
6338 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6339 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6340
6341 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
6342 &eventreply->IOCStatus)) {
6343 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
6344 mptsas_log(mpt, CE_WARN,
6345 "!mptsas_handle_event_sync: IOCStatus=0x%x, "
6346 "IOCLogInfo=0x%x", iocstatus,
6347 ddi_get32(mpt->m_acc_reply_frame_hdl,
6348 &eventreply->IOCLogInfo));
6349 } else {
6350 mptsas_log(mpt, CE_WARN,
6351 "mptsas_handle_event_sync: IOCStatus=0x%x, "
6352 "IOCLogInfo=0x%x", iocstatus,
6353 ddi_get32(mpt->m_acc_reply_frame_hdl,
6354 &eventreply->IOCLogInfo));
6355 }
6356 }
6357
6358 /*
6359 * figure out what kind of event we got and handle accordingly
6360 */
6361 switch (event) {
6362 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
6363 {
6364 pMpi2EventDataSasTopologyChangeList_t sas_topo_change_list;
6365 uint8_t num_entries, expstatus, phy;
6366 uint8_t phystatus, physport, state, i;
6367 uint8_t start_phy_num, link_rate;
6368 uint16_t dev_handle, reason_code;
6369 uint16_t enc_handle, expd_handle;
6370 char string[80], curr[80], prev[80];
6371 mptsas_topo_change_list_t *topo_head = NULL;
6372 mptsas_topo_change_list_t *topo_tail = NULL;
6373 mptsas_topo_change_list_t *topo_node = NULL;
6374 mptsas_target_t *ptgt;
6375 mptsas_smp_t *psmp;
6376 mptsas_hash_table_t *tgttbl, *smptbl;
6377 uint8_t flags = 0, exp_flag;
6378 smhba_info_t *pSmhba = NULL;
6379
6380 NDBG20(("mptsas_handle_event_sync: SAS topology change"));
6381
6382 tgttbl = &mpt->m_active->m_tgttbl;
6383 smptbl = &mpt->m_active->m_smptbl;
6384
6385 sas_topo_change_list = (pMpi2EventDataSasTopologyChangeList_t)
6386 eventreply->EventData;
6387
6388 enc_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6389 &sas_topo_change_list->EnclosureHandle);
6390 expd_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6391 &sas_topo_change_list->ExpanderDevHandle);
6392 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6393 &sas_topo_change_list->NumEntries);
6394 start_phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
6395 &sas_topo_change_list->StartPhyNum);
6396 expstatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6397 &sas_topo_change_list->ExpStatus);
6398 physport = ddi_get8(mpt->m_acc_reply_frame_hdl,
6399 &sas_topo_change_list->PhysicalPort);
6400
6401 string[0] = 0;
6402 if (expd_handle) {
6403 flags = MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED;
6404 switch (expstatus) {
6405 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6406 (void) sprintf(string, " added");
6407 /*
6408 * New expander device added
6409 */
6410 mpt->m_port_chng = 1;
6411 topo_node = kmem_zalloc(
6412 sizeof (mptsas_topo_change_list_t),
6413 KM_SLEEP);
6414 topo_node->mpt = mpt;
6415 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_SMP;
6416 topo_node->un.physport = physport;
6417 topo_node->devhdl = expd_handle;
6418 topo_node->flags = flags;
6419 topo_node->object = NULL;
6420 if (topo_head == NULL) {
6421 topo_head = topo_tail = topo_node;
6422 } else {
6423 topo_tail->next = topo_node;
6424 topo_tail = topo_node;
6425 }
6426 break;
6427 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6428 (void) sprintf(string, " not responding, "
6429 "removed");
6430 psmp = mptsas_search_by_devhdl(smptbl,
6431 expd_handle);
6432 if (psmp == NULL)
6433 break;
6434
6435 topo_node = kmem_zalloc(
6436 sizeof (mptsas_topo_change_list_t),
6437 KM_SLEEP);
6438 topo_node->mpt = mpt;
6439 topo_node->un.phymask = psmp->m_phymask;
6440 topo_node->event = MPTSAS_DR_EVENT_OFFLINE_SMP;
6441 topo_node->devhdl = expd_handle;
6442 topo_node->flags = flags;
6443 topo_node->object = NULL;
6444 if (topo_head == NULL) {
6445 topo_head = topo_tail = topo_node;
6446 } else {
6447 topo_tail->next = topo_node;
6448 topo_tail = topo_node;
6449 }
6450 break;
6451 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6452 break;
6453 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6454 (void) sprintf(string, " not responding, "
6455 "delaying removal");
6456 break;
6457 default:
6458 break;
6459 }
6460 } else {
6461 flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
6462 }
6463
6464 NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
6465 enc_handle, expd_handle, string));
6466 for (i = 0; i < num_entries; i++) {
6467 phy = i + start_phy_num;
6468 phystatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6469 &sas_topo_change_list->PHY[i].PhyStatus);
6470 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6471 &sas_topo_change_list->PHY[i].AttachedDevHandle);
6472 reason_code = phystatus & MPI2_EVENT_SAS_TOPO_RC_MASK;
6473 /*
6474 * Filter out processing of Phy Vacant Status unless
6475 * the reason code is "Not Responding". Process all
6476 * other combinations of Phy Status and Reason Codes.
6477 */
6478 if ((phystatus &
6479 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) &&
6480 (reason_code !=
6481 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
6482 continue;
6483 }
6484 curr[0] = 0;
6485 prev[0] = 0;
6486 string[0] = 0;
6487 switch (reason_code) {
6488 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6489 {
6490 NDBG20(("mptsas%d phy %d physical_port %d "
6491 "dev_handle %d added", mpt->m_instance, phy,
6492 physport, dev_handle));
6493 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6494 &sas_topo_change_list->PHY[i].LinkRate);
6495 state = (link_rate &
6496 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6497 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6498 switch (state) {
6499 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6500 (void) sprintf(curr, "is disabled");
6501 break;
6502 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6503 (void) sprintf(curr, "is offline, "
6504 "failed speed negotiation");
6505 break;
6506 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6507 (void) sprintf(curr, "SATA OOB "
6508 "complete");
6509 break;
6510 case SMP_RESET_IN_PROGRESS:
6511 (void) sprintf(curr, "SMP reset in "
6512 "progress");
6513 break;
6514 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6515 (void) sprintf(curr, "is online at "
6516 "1.5 Gbps");
6517 break;
6518 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6519 (void) sprintf(curr, "is online at 3.0 "
6520 "Gbps");
6521 break;
6522 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6523 (void) sprintf(curr, "is online at 6.0 "
6524 "Gbps");
6525 break;
6526 default:
6527 (void) sprintf(curr, "state is "
6528 "unknown");
6529 break;
6530 }
6531 /*
6532 * New target device added into the system.
6533 * Set association flag according to if an
6534 * expander is used or not.
6535 */
6536 exp_flag =
6537 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6538 if (flags ==
6539 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6540 flags = exp_flag;
6541 }
6542 topo_node = kmem_zalloc(
6543 sizeof (mptsas_topo_change_list_t),
6544 KM_SLEEP);
6545 topo_node->mpt = mpt;
6546 topo_node->event =
6547 MPTSAS_DR_EVENT_RECONFIG_TARGET;
6548 if (expd_handle == 0) {
6549 /*
6550 * Per MPI 2, if expander dev handle
6551 * is 0, it's a directly attached
6552 * device. So driver use PHY to decide
6553 * which iport is associated
6554 */
6555 physport = phy;
6556 mpt->m_port_chng = 1;
6557 }
6558 topo_node->un.physport = physport;
6559 topo_node->devhdl = dev_handle;
6560 topo_node->flags = flags;
6561 topo_node->object = NULL;
6562 if (topo_head == NULL) {
6563 topo_head = topo_tail = topo_node;
6564 } else {
6565 topo_tail->next = topo_node;
6566 topo_tail = topo_node;
6567 }
6568 break;
6569 }
6570 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6571 {
6572 NDBG20(("mptsas%d phy %d physical_port %d "
6573 "dev_handle %d removed", mpt->m_instance,
6574 phy, physport, dev_handle));
6575 /*
6576 * Set association flag according to if an
6577 * expander is used or not.
6578 */
6579 exp_flag =
6580 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6581 if (flags ==
6582 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6583 flags = exp_flag;
6584 }
6585 /*
6586 * Target device is removed from the system
6587 * Before the device is really offline from
6588 * from system.
6589 */
6590 ptgt = mptsas_search_by_devhdl(tgttbl,
6591 dev_handle);
6592 /*
6593 * If ptgt is NULL here, it means that the
6594 * DevHandle is not in the hash table. This is
6595 * reasonable sometimes. For example, if a
6596 * disk was pulled, then added, then pulled
6597 * again, the disk will not have been put into
6598 * the hash table because the add event will
6599 * have an invalid phymask. BUT, this does not
6600 * mean that the DevHandle is invalid. The
6601 * controller will still have a valid DevHandle
6602 * that must be removed. To do this, use the
6603 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
6604 */
6605 if (ptgt == NULL) {
6606 topo_node = kmem_zalloc(
6607 sizeof (mptsas_topo_change_list_t),
6608 KM_SLEEP);
6609 topo_node->mpt = mpt;
6610 topo_node->un.phymask = 0;
6611 topo_node->event =
6612 MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
6613 topo_node->devhdl = dev_handle;
6614 topo_node->flags = flags;
6615 topo_node->object = NULL;
6616 if (topo_head == NULL) {
6617 topo_head = topo_tail =
6618 topo_node;
6619 } else {
6620 topo_tail->next = topo_node;
6621 topo_tail = topo_node;
6622 }
6623 break;
6624 }
6625
6626 /*
6627 * Update DR flag immediately avoid I/O failure
6628 * before failover finish. Pay attention to the
6629 * mutex protect, we need grab m_tx_waitq_mutex
6630 * during set m_dr_flag because we won't add
6631 * the following command into waitq, instead,
6632 * we need return TRAN_BUSY in the tran_start
6633 * context.
6634 */
6635 mutex_enter(&mpt->m_tx_waitq_mutex);
6636 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6637 mutex_exit(&mpt->m_tx_waitq_mutex);
6638
6639 topo_node = kmem_zalloc(
6640 sizeof (mptsas_topo_change_list_t),
6641 KM_SLEEP);
6642 topo_node->mpt = mpt;
6643 topo_node->un.phymask = ptgt->m_phymask;
6644 topo_node->event =
6645 MPTSAS_DR_EVENT_OFFLINE_TARGET;
6646 topo_node->devhdl = dev_handle;
6647 topo_node->flags = flags;
6648 topo_node->object = NULL;
6649 if (topo_head == NULL) {
6650 topo_head = topo_tail = topo_node;
6651 } else {
6652 topo_tail->next = topo_node;
6653 topo_tail = topo_node;
6654 }
6655 break;
6656 }
6657 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6658 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6659 &sas_topo_change_list->PHY[i].LinkRate);
6660 state = (link_rate &
6661 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6662 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6663 pSmhba = &mpt->m_phy_info[i].smhba_info;
6664 pSmhba->negotiated_link_rate = state;
6665 switch (state) {
6666 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6667 (void) sprintf(curr, "is disabled");
6668 mptsas_smhba_log_sysevent(mpt,
6669 ESC_SAS_PHY_EVENT,
6670 SAS_PHY_REMOVE,
6671 &mpt->m_phy_info[i].smhba_info);
6672 mpt->m_phy_info[i].smhba_info.
6673 negotiated_link_rate
6674 = 0x1;
6675 break;
6676 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6677 (void) sprintf(curr, "is offline, "
6678 "failed speed negotiation");
6679 mptsas_smhba_log_sysevent(mpt,
6680 ESC_SAS_PHY_EVENT,
6681 SAS_PHY_OFFLINE,
6682 &mpt->m_phy_info[i].smhba_info);
6683 break;
6684 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6685 (void) sprintf(curr, "SATA OOB "
6686 "complete");
6687 break;
6688 case SMP_RESET_IN_PROGRESS:
6689 (void) sprintf(curr, "SMP reset in "
6690 "progress");
6691 break;
6692 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6693 (void) sprintf(curr, "is online at "
6694 "1.5 Gbps");
6695 if ((expd_handle == 0) &&
6696 (enc_handle == 1)) {
6697 mpt->m_port_chng = 1;
6698 }
6699 mptsas_smhba_log_sysevent(mpt,
6700 ESC_SAS_PHY_EVENT,
6701 SAS_PHY_ONLINE,
6702 &mpt->m_phy_info[i].smhba_info);
6703 break;
6704 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6705 (void) sprintf(curr, "is online at 3.0 "
6706 "Gbps");
6707 if ((expd_handle == 0) &&
6708 (enc_handle == 1)) {
6709 mpt->m_port_chng = 1;
6710 }
6711 mptsas_smhba_log_sysevent(mpt,
6712 ESC_SAS_PHY_EVENT,
6713 SAS_PHY_ONLINE,
6714 &mpt->m_phy_info[i].smhba_info);
6715 break;
6716 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6717 (void) sprintf(curr, "is online at "
6718 "6.0 Gbps");
6719 if ((expd_handle == 0) &&
6720 (enc_handle == 1)) {
6721 mpt->m_port_chng = 1;
6722 }
6723 mptsas_smhba_log_sysevent(mpt,
6724 ESC_SAS_PHY_EVENT,
6725 SAS_PHY_ONLINE,
6726 &mpt->m_phy_info[i].smhba_info);
6727 break;
6728 default:
6729 (void) sprintf(curr, "state is "
6730 "unknown");
6731 break;
6732 }
6733
6734 state = (link_rate &
6735 MPI2_EVENT_SAS_TOPO_LR_PREV_MASK) >>
6736 MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT;
6737 switch (state) {
6738 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6739 (void) sprintf(prev, ", was disabled");
6740 break;
6741 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6742 (void) sprintf(prev, ", was offline, "
6743 "failed speed negotiation");
6744 break;
6745 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6746 (void) sprintf(prev, ", was SATA OOB "
6747 "complete");
6748 break;
6749 case SMP_RESET_IN_PROGRESS:
6750 (void) sprintf(prev, ", was SMP reset "
6751 "in progress");
6752 break;
6753 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6754 (void) sprintf(prev, ", was online at "
6755 "1.5 Gbps");
6756 break;
6757 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6758 (void) sprintf(prev, ", was online at "
6759 "3.0 Gbps");
6760 break;
6761 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6762 (void) sprintf(prev, ", was online at "
6763 "6.0 Gbps");
6764 break;
6765 default:
6766 break;
6767 }
6768 (void) sprintf(&string[strlen(string)], "link "
6769 "changed, ");
6770 break;
6771 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
6772 continue;
6773 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
6774 (void) sprintf(&string[strlen(string)],
6775 "target not responding, delaying "
6776 "removal");
6777 break;
6778 }
6779 NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
6780 mpt->m_instance, phy, dev_handle, string, curr,
6781 prev));
6782 }
6783 if (topo_head != NULL) {
6784 /*
6785 * Launch DR taskq to handle topology change
6786 */
6787 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
6788 mptsas_handle_dr, (void *)topo_head,
6789 DDI_NOSLEEP)) != DDI_SUCCESS) {
6790 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
6791 "for handle SAS DR event failed. \n");
6792 }
6793 }
6794 break;
6795 }
6796 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
6797 {
6798 Mpi2EventDataIrConfigChangeList_t *irChangeList;
6799 mptsas_topo_change_list_t *topo_head = NULL;
6800 mptsas_topo_change_list_t *topo_tail = NULL;
6801 mptsas_topo_change_list_t *topo_node = NULL;
6802 mptsas_target_t *ptgt;
6803 mptsas_hash_table_t *tgttbl;
6804 uint8_t num_entries, i, reason;
6805 uint16_t volhandle, diskhandle;
6806
6807 irChangeList = (pMpi2EventDataIrConfigChangeList_t)
6808 eventreply->EventData;
6809 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6810 &irChangeList->NumElements);
6811
6812 tgttbl = &mpt->m_active->m_tgttbl;
6813
6814 NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
6815 mpt->m_instance));
6816
6817 for (i = 0; i < num_entries; i++) {
6818 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
6819 &irChangeList->ConfigElement[i].ReasonCode);
6820 volhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6821 &irChangeList->ConfigElement[i].VolDevHandle);
6822 diskhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6823 &irChangeList->ConfigElement[i].PhysDiskDevHandle);
6824
6825 switch (reason) {
6826 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
6827 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
6828 {
6829 NDBG20(("mptsas %d volume added\n",
6830 mpt->m_instance));
6831
6832 topo_node = kmem_zalloc(
6833 sizeof (mptsas_topo_change_list_t),
6834 KM_SLEEP);
6835
6836 topo_node->mpt = mpt;
6837 topo_node->event =
6838 MPTSAS_DR_EVENT_RECONFIG_TARGET;
6839 topo_node->un.physport = 0xff;
6840 topo_node->devhdl = volhandle;
6841 topo_node->flags =
6842 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
6843 topo_node->object = NULL;
6844 if (topo_head == NULL) {
6845 topo_head = topo_tail = topo_node;
6846 } else {
6847 topo_tail->next = topo_node;
6848 topo_tail = topo_node;
6849 }
6850 break;
6851 }
6852 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
6853 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
6854 {
6855 NDBG20(("mptsas %d volume deleted\n",
6856 mpt->m_instance));
6857 ptgt = mptsas_search_by_devhdl(tgttbl,
6858 volhandle);
6859 if (ptgt == NULL)
6860 break;
6861
6862 /*
6863 * Clear any flags related to volume
6864 */
6865 (void) mptsas_delete_volume(mpt, volhandle);
6866
6867 /*
6868 * Update DR flag immediately avoid I/O failure
6869 */
6870 mutex_enter(&mpt->m_tx_waitq_mutex);
6871 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6872 mutex_exit(&mpt->m_tx_waitq_mutex);
6873
6874 topo_node = kmem_zalloc(
6875 sizeof (mptsas_topo_change_list_t),
6876 KM_SLEEP);
6877 topo_node->mpt = mpt;
6878 topo_node->un.phymask = ptgt->m_phymask;
6879 topo_node->event =
6880 MPTSAS_DR_EVENT_OFFLINE_TARGET;
6881 topo_node->devhdl = volhandle;
6882 topo_node->flags =
6883 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
6884 topo_node->object = (void *)ptgt;
6885 if (topo_head == NULL) {
6886 topo_head = topo_tail = topo_node;
6887 } else {
6888 topo_tail->next = topo_node;
6889 topo_tail = topo_node;
6890 }
6891 break;
6892 }
6893 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
6894 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
6895 {
6896 ptgt = mptsas_search_by_devhdl(tgttbl,
6897 diskhandle);
6898 if (ptgt == NULL)
6899 break;
6900
6901 /*
6902 * Update DR flag immediately avoid I/O failure
6903 */
6904 mutex_enter(&mpt->m_tx_waitq_mutex);
6905 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
6906 mutex_exit(&mpt->m_tx_waitq_mutex);
6907
6908 topo_node = kmem_zalloc(
6909 sizeof (mptsas_topo_change_list_t),
6910 KM_SLEEP);
6911 topo_node->mpt = mpt;
6912 topo_node->un.phymask = ptgt->m_phymask;
6913 topo_node->event =
6914 MPTSAS_DR_EVENT_OFFLINE_TARGET;
6915 topo_node->devhdl = diskhandle;
6916 topo_node->flags =
6917 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
6918 topo_node->object = (void *)ptgt;
6919 if (topo_head == NULL) {
6920 topo_head = topo_tail = topo_node;
6921 } else {
6922 topo_tail->next = topo_node;
6923 topo_tail = topo_node;
6924 }
6925 break;
6926 }
6927 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
6928 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
6929 {
6930 /*
6931 * The physical drive is released by a IR
6932 * volume. But we cannot get the the physport
6933 * or phynum from the event data, so we only
6934 * can get the physport/phynum after SAS
6935 * Device Page0 request for the devhdl.
6936 */
6937 topo_node = kmem_zalloc(
6938 sizeof (mptsas_topo_change_list_t),
6939 KM_SLEEP);
6940 topo_node->mpt = mpt;
6941 topo_node->un.phymask = 0;
6942 topo_node->event =
6943 MPTSAS_DR_EVENT_RECONFIG_TARGET;
6944 topo_node->devhdl = diskhandle;
6945 topo_node->flags =
6946 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
6947 topo_node->object = NULL;
6948 mpt->m_port_chng = 1;
6949 if (topo_head == NULL) {
6950 topo_head = topo_tail = topo_node;
6951 } else {
6952 topo_tail->next = topo_node;
6953 topo_tail = topo_node;
6954 }
6955 break;
6956 }
6957 default:
6958 break;
6959 }
6960 }
6961
6962 if (topo_head != NULL) {
6963 /*
6964 * Launch DR taskq to handle topology change
6965 */
6966 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
6967 mptsas_handle_dr, (void *)topo_head,
6968 DDI_NOSLEEP)) != DDI_SUCCESS) {
6969 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
6970 "for handle SAS DR event failed. \n");
6971 }
6972 }
6973 break;
6974 }
6975 default:
6976 return (DDI_FAILURE);
6977 }
6978
6979 return (DDI_SUCCESS);
6980 }
6981
6982 /*
6983 * handle events from ioc
6984 */
6985 static void
6986 mptsas_handle_event(void *args)
6987 {
6988 m_replyh_arg_t *replyh_arg;
6989 pMpi2EventNotificationReply_t eventreply;
6990 uint32_t event, iocloginfo, rfm;
6991 uint32_t status;
6992 uint8_t port;
6993 mptsas_t *mpt;
6994 uint_t iocstatus;
6995
6996 replyh_arg = (m_replyh_arg_t *)args;
6997 rfm = replyh_arg->rfm;
6998 mpt = replyh_arg->mpt;
6999
7000 mutex_enter(&mpt->m_mutex);
7001 /*
7002 * If HBA is being reset, drop incoming event.
7003 */
7004 if (mpt->m_in_reset) {
7005 NDBG20(("dropping event received prior to reset"));
7006 mutex_exit(&mpt->m_mutex);
7007 return;
7008 }
7009
7010 eventreply = (pMpi2EventNotificationReply_t)
7011 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
7012 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7013
7014 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7015 &eventreply->IOCStatus)) {
7016 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7017 mptsas_log(mpt, CE_WARN,
7018 "!mptsas_handle_event: IOCStatus=0x%x, "
7019 "IOCLogInfo=0x%x", iocstatus,
7020 ddi_get32(mpt->m_acc_reply_frame_hdl,
7021 &eventreply->IOCLogInfo));
7022 } else {
7023 mptsas_log(mpt, CE_WARN,
7024 "mptsas_handle_event: IOCStatus=0x%x, "
7025 "IOCLogInfo=0x%x", iocstatus,
7026 ddi_get32(mpt->m_acc_reply_frame_hdl,
7027 &eventreply->IOCLogInfo));
7028 }
7029 }
7030
7031 /*
7032 * figure out what kind of event we got and handle accordingly
7033 */
7034 switch (event) {
7035 case MPI2_EVENT_LOG_ENTRY_ADDED:
7036 break;
7037 case MPI2_EVENT_LOG_DATA:
7038 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7039 &eventreply->IOCLogInfo);
7040 NDBG20(("mptsas %d log info %x received.\n", mpt->m_instance,
7041 iocloginfo));
7042 break;
7043 case MPI2_EVENT_STATE_CHANGE:
7044 NDBG20(("mptsas%d state change.", mpt->m_instance));
7045 break;
7046 case MPI2_EVENT_HARD_RESET_RECEIVED:
7047 NDBG20(("mptsas%d event change.", mpt->m_instance));
7048 break;
7049 case MPI2_EVENT_SAS_DISCOVERY:
7050 {
7051 MPI2_EVENT_DATA_SAS_DISCOVERY *sasdiscovery;
7052 char string[80];
7053 uint8_t rc;
7054
7055 sasdiscovery =
7056 (pMpi2EventDataSasDiscovery_t)eventreply->EventData;
7057
7058 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7059 &sasdiscovery->ReasonCode);
7060 port = ddi_get8(mpt->m_acc_reply_frame_hdl,
7061 &sasdiscovery->PhysicalPort);
7062 status = ddi_get32(mpt->m_acc_reply_frame_hdl,
7063 &sasdiscovery->DiscoveryStatus);
7064
7065 string[0] = 0;
7066 switch (rc) {
7067 case MPI2_EVENT_SAS_DISC_RC_STARTED:
7068 (void) sprintf(string, "STARTING");
7069 break;
7070 case MPI2_EVENT_SAS_DISC_RC_COMPLETED:
7071 (void) sprintf(string, "COMPLETED");
7072 break;
7073 default:
7074 (void) sprintf(string, "UNKNOWN");
7075 break;
7076 }
7077
7078 NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string,
7079 port, status));
7080
7081 break;
7082 }
7083 case MPI2_EVENT_EVENT_CHANGE:
7084 NDBG20(("mptsas%d event change.", mpt->m_instance));
7085 break;
7086 case MPI2_EVENT_TASK_SET_FULL:
7087 {
7088 pMpi2EventDataTaskSetFull_t taskfull;
7089
7090 taskfull = (pMpi2EventDataTaskSetFull_t)eventreply->EventData;
7091
7092 NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
7093 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7094 &taskfull->CurrentDepth)));
7095 break;
7096 }
7097 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7098 {
7099 /*
7100 * SAS TOPOLOGY CHANGE LIST Event has already been handled
7101 * in mptsas_handle_event_sync() of interrupt context
7102 */
7103 break;
7104 }
7105 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
7106 {
7107 pMpi2EventDataSasEnclDevStatusChange_t encstatus;
7108 uint8_t rc;
7109 char string[80];
7110
7111 encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
7112 eventreply->EventData;
7113
7114 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7115 &encstatus->ReasonCode);
7116 switch (rc) {
7117 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7118 (void) sprintf(string, "added");
7119 break;
7120 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7121 (void) sprintf(string, ", not responding");
7122 break;
7123 default:
7124 break;
7125 }
7126 NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure %x%s\n",
7127 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7128 &encstatus->EnclosureHandle), string));
7129 break;
7130 }
7131
7132 /*
7133 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
7134 * mptsas_handle_event_sync,in here just send ack message.
7135 */
7136 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
7137 {
7138 pMpi2EventDataSasDeviceStatusChange_t statuschange;
7139 uint8_t rc;
7140 uint16_t devhdl;
7141 uint64_t wwn = 0;
7142 uint32_t wwn_lo, wwn_hi;
7143
7144 statuschange = (pMpi2EventDataSasDeviceStatusChange_t)
7145 eventreply->EventData;
7146 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7147 &statuschange->ReasonCode);
7148 wwn_lo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7149 (uint32_t *)(void *)&statuschange->SASAddress);
7150 wwn_hi = ddi_get32(mpt->m_acc_reply_frame_hdl,
7151 (uint32_t *)(void *)&statuschange->SASAddress + 1);
7152 wwn = ((uint64_t)wwn_hi << 32) | wwn_lo;
7153 devhdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
7154 &statuschange->DevHandle);
7155
7156 NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64,
7157 wwn));
7158
7159 switch (rc) {
7160 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7161 NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x",
7162 ddi_get8(mpt->m_acc_reply_frame_hdl,
7163 &statuschange->ASC),
7164 ddi_get8(mpt->m_acc_reply_frame_hdl,
7165 &statuschange->ASCQ)));
7166 break;
7167
7168 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7169 NDBG20(("Device not supported"));
7170 break;
7171
7172 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7173 NDBG20(("IOC internally generated the Target Reset "
7174 "for devhdl:%x", devhdl));
7175 break;
7176
7177 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7178 NDBG20(("IOC's internally generated Target Reset "
7179 "completed for devhdl:%x", devhdl));
7180 break;
7181
7182 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7183 NDBG20(("IOC internally generated Abort Task"));
7184 break;
7185
7186 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7187 NDBG20(("IOC's internally generated Abort Task "
7188 "completed"));
7189 break;
7190
7191 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7192 NDBG20(("IOC internally generated Abort Task Set"));
7193 break;
7194
7195 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7196 NDBG20(("IOC internally generated Clear Task Set"));
7197 break;
7198
7199 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7200 NDBG20(("IOC internally generated Query Task"));
7201 break;
7202
7203 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7204 NDBG20(("Device sent an Asynchronous Notification"));
7205 break;
7206
7207 default:
7208 break;
7209 }
7210 break;
7211 }
7212 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7213 {
7214 /*
7215 * IR TOPOLOGY CHANGE LIST Event has already been handled
7216 * in mpt_handle_event_sync() of interrupt context
7217 */
7218 break;
7219 }
7220 case MPI2_EVENT_IR_OPERATION_STATUS:
7221 {
7222 Mpi2EventDataIrOperationStatus_t *irOpStatus;
7223 char reason_str[80];
7224 uint8_t rc, percent;
7225 uint16_t handle;
7226
7227 irOpStatus = (pMpi2EventDataIrOperationStatus_t)
7228 eventreply->EventData;
7229 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7230 &irOpStatus->RAIDOperation);
7231 percent = ddi_get8(mpt->m_acc_reply_frame_hdl,
7232 &irOpStatus->PercentComplete);
7233 handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7234 &irOpStatus->VolDevHandle);
7235
7236 switch (rc) {
7237 case MPI2_EVENT_IR_RAIDOP_RESYNC:
7238 (void) sprintf(reason_str, "resync");
7239 break;
7240 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
7241 (void) sprintf(reason_str, "online capacity "
7242 "expansion");
7243 break;
7244 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
7245 (void) sprintf(reason_str, "consistency check");
7246 break;
7247 default:
7248 (void) sprintf(reason_str, "unknown reason %x",
7249 rc);
7250 }
7251
7252 NDBG20(("mptsas%d raid operational status: (%s)"
7253 "\thandle(0x%04x), percent complete(%d)\n",
7254 mpt->m_instance, reason_str, handle, percent));
7255 break;
7256 }
7257 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
7258 {
7259 pMpi2EventDataSasBroadcastPrimitive_t sas_broadcast;
7260 uint8_t phy_num;
7261 uint8_t primitive;
7262
7263 sas_broadcast = (pMpi2EventDataSasBroadcastPrimitive_t)
7264 eventreply->EventData;
7265
7266 phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
7267 &sas_broadcast->PhyNum);
7268 primitive = ddi_get8(mpt->m_acc_reply_frame_hdl,
7269 &sas_broadcast->Primitive);
7270
7271 switch (primitive) {
7272 case MPI2_EVENT_PRIMITIVE_CHANGE:
7273 mptsas_smhba_log_sysevent(mpt,
7274 ESC_SAS_HBA_PORT_BROADCAST,
7275 SAS_PORT_BROADCAST_CHANGE,
7276 &mpt->m_phy_info[phy_num].smhba_info);
7277 break;
7278 case MPI2_EVENT_PRIMITIVE_SES:
7279 mptsas_smhba_log_sysevent(mpt,
7280 ESC_SAS_HBA_PORT_BROADCAST,
7281 SAS_PORT_BROADCAST_SES,
7282 &mpt->m_phy_info[phy_num].smhba_info);
7283 break;
7284 case MPI2_EVENT_PRIMITIVE_EXPANDER:
7285 mptsas_smhba_log_sysevent(mpt,
7286 ESC_SAS_HBA_PORT_BROADCAST,
7287 SAS_PORT_BROADCAST_D01_4,
7288 &mpt->m_phy_info[phy_num].smhba_info);
7289 break;
7290 case MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT:
7291 mptsas_smhba_log_sysevent(mpt,
7292 ESC_SAS_HBA_PORT_BROADCAST,
7293 SAS_PORT_BROADCAST_D04_7,
7294 &mpt->m_phy_info[phy_num].smhba_info);
7295 break;
7296 case MPI2_EVENT_PRIMITIVE_RESERVED3:
7297 mptsas_smhba_log_sysevent(mpt,
7298 ESC_SAS_HBA_PORT_BROADCAST,
7299 SAS_PORT_BROADCAST_D16_7,
7300 &mpt->m_phy_info[phy_num].smhba_info);
7301 break;
7302 case MPI2_EVENT_PRIMITIVE_RESERVED4:
7303 mptsas_smhba_log_sysevent(mpt,
7304 ESC_SAS_HBA_PORT_BROADCAST,
7305 SAS_PORT_BROADCAST_D29_7,
7306 &mpt->m_phy_info[phy_num].smhba_info);
7307 break;
7308 case MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED:
7309 mptsas_smhba_log_sysevent(mpt,
7310 ESC_SAS_HBA_PORT_BROADCAST,
7311 SAS_PORT_BROADCAST_D24_0,
7312 &mpt->m_phy_info[phy_num].smhba_info);
7313 break;
7314 case MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED:
7315 mptsas_smhba_log_sysevent(mpt,
7316 ESC_SAS_HBA_PORT_BROADCAST,
7317 SAS_PORT_BROADCAST_D27_4,
7318 &mpt->m_phy_info[phy_num].smhba_info);
7319 break;
7320 default:
7321 NDBG20(("mptsas%d: unknown BROADCAST PRIMITIVE"
7322 " %x received",
7323 mpt->m_instance, primitive));
7324 break;
7325 }
7326 NDBG20(("mptsas%d sas broadcast primitive: "
7327 "\tprimitive(0x%04x), phy(%d) complete\n",
7328 mpt->m_instance, primitive, phy_num));
7329 break;
7330 }
7331 case MPI2_EVENT_IR_VOLUME:
7332 {
7333 Mpi2EventDataIrVolume_t *irVolume;
7334 uint16_t devhandle;
7335 uint32_t state;
7336 int config, vol;
7337 mptsas_slots_t *slots = mpt->m_active;
7338 uint8_t found = FALSE;
7339
7340 irVolume = (pMpi2EventDataIrVolume_t)eventreply->EventData;
7341 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7342 &irVolume->NewValue);
7343 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7344 &irVolume->VolDevHandle);
7345
7346 NDBG20(("EVENT_IR_VOLUME event is received"));
7347
7348 /*
7349 * Get latest RAID info and then find the DevHandle for this
7350 * event in the configuration. If the DevHandle is not found
7351 * just exit the event.
7352 */
7353 (void) mptsas_get_raid_info(mpt);
7354 for (config = 0; (config < slots->m_num_raid_configs) &&
7355 (!found); config++) {
7356 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
7357 if (slots->m_raidconfig[config].m_raidvol[vol].
7358 m_raidhandle == devhandle) {
7359 found = TRUE;
7360 break;
7361 }
7362 }
7363 }
7364 if (!found) {
7365 break;
7366 }
7367
7368 switch (irVolume->ReasonCode) {
7369 case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
7370 {
7371 uint32_t i;
7372 slots->m_raidconfig[config].m_raidvol[vol].m_settings =
7373 state;
7374
7375 i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
7376 mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
7377 ", auto-config of hot-swap drives is %s"
7378 ", write caching is %s"
7379 ", hot-spare pool mask is %02x\n",
7380 vol, state &
7381 MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
7382 ? "disabled" : "enabled",
7383 i == MPI2_RAIDVOL0_SETTING_UNCHANGED
7384 ? "controlled by member disks" :
7385 i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
7386 ? "disabled" :
7387 i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
7388 ? "enabled" :
7389 "incorrectly set",
7390 (state >> 16) & 0xff);
7391 break;
7392 }
7393 case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
7394 {
7395 slots->m_raidconfig[config].m_raidvol[vol].m_state =
7396 (uint8_t)state;
7397
7398 mptsas_log(mpt, CE_NOTE,
7399 "Volume %d is now %s\n", vol,
7400 state == MPI2_RAID_VOL_STATE_OPTIMAL
7401 ? "optimal" :
7402 state == MPI2_RAID_VOL_STATE_DEGRADED
7403 ? "degraded" :
7404 state == MPI2_RAID_VOL_STATE_ONLINE
7405 ? "online" :
7406 state == MPI2_RAID_VOL_STATE_INITIALIZING
7407 ? "initializing" :
7408 state == MPI2_RAID_VOL_STATE_FAILED
7409 ? "failed" :
7410 state == MPI2_RAID_VOL_STATE_MISSING
7411 ? "missing" :
7412 "state unknown");
7413 break;
7414 }
7415 case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
7416 {
7417 slots->m_raidconfig[config].m_raidvol[vol].
7418 m_statusflags = state;
7419
7420 mptsas_log(mpt, CE_NOTE,
7421 " Volume %d is now %s%s%s%s%s%s%s%s%s\n",
7422 vol,
7423 state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
7424 ? ", enabled" : ", disabled",
7425 state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
7426 ? ", quiesced" : "",
7427 state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
7428 ? ", inactive" : ", active",
7429 state &
7430 MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
7431 ? ", bad block table is full" : "",
7432 state &
7433 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
7434 ? ", resync in progress" : "",
7435 state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
7436 ? ", background initialization in progress" : "",
7437 state &
7438 MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
7439 ? ", capacity expansion in progress" : "",
7440 state &
7441 MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
7442 ? ", consistency check in progress" : "",
7443 state & MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB
7444 ? ", data scrub in progress" : "");
7445 break;
7446 }
7447 default:
7448 break;
7449 }
7450 break;
7451 }
7452 case MPI2_EVENT_IR_PHYSICAL_DISK:
7453 {
7454 Mpi2EventDataIrPhysicalDisk_t *irPhysDisk;
7455 uint16_t devhandle, enchandle, slot;
7456 uint32_t status, state;
7457 uint8_t physdisknum, reason;
7458
7459 irPhysDisk = (Mpi2EventDataIrPhysicalDisk_t *)
7460 eventreply->EventData;
7461 physdisknum = ddi_get8(mpt->m_acc_reply_frame_hdl,
7462 &irPhysDisk->PhysDiskNum);
7463 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7464 &irPhysDisk->PhysDiskDevHandle);
7465 enchandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7466 &irPhysDisk->EnclosureHandle);
7467 slot = ddi_get16(mpt->m_acc_reply_frame_hdl,
7468 &irPhysDisk->Slot);
7469 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7470 &irPhysDisk->NewValue);
7471 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7472 &irPhysDisk->ReasonCode);
7473
7474 NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
7475
7476 switch (reason) {
7477 case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
7478 mptsas_log(mpt, CE_NOTE,
7479 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7480 "for enclosure with handle 0x%x is now in hot "
7481 "spare pool %d",
7482 physdisknum, devhandle, slot, enchandle,
7483 (state >> 16) & 0xff);
7484 break;
7485
7486 case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
7487 status = state;
7488 mptsas_log(mpt, CE_NOTE,
7489 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7490 "for enclosure with handle 0x%x is now "
7491 "%s%s%s%s%s\n", physdisknum, devhandle, slot,
7492 enchandle,
7493 status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
7494 ? ", inactive" : ", active",
7495 status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
7496 ? ", out of sync" : "",
7497 status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
7498 ? ", quiesced" : "",
7499 status &
7500 MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
7501 ? ", write cache enabled" : "",
7502 status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
7503 ? ", capacity expansion target" : "");
7504 break;
7505
7506 case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
7507 mptsas_log(mpt, CE_NOTE,
7508 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7509 "for enclosure with handle 0x%x is now %s\n",
7510 physdisknum, devhandle, slot, enchandle,
7511 state == MPI2_RAID_PD_STATE_OPTIMAL
7512 ? "optimal" :
7513 state == MPI2_RAID_PD_STATE_REBUILDING
7514 ? "rebuilding" :
7515 state == MPI2_RAID_PD_STATE_DEGRADED
7516 ? "degraded" :
7517 state == MPI2_RAID_PD_STATE_HOT_SPARE
7518 ? "a hot spare" :
7519 state == MPI2_RAID_PD_STATE_ONLINE
7520 ? "online" :
7521 state == MPI2_RAID_PD_STATE_OFFLINE
7522 ? "offline" :
7523 state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
7524 ? "not compatible" :
7525 state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
7526 ? "not configured" :
7527 "state unknown");
7528 break;
7529 }
7530 break;
7531 }
7532 default:
7533 NDBG20(("mptsas%d: unknown event %x received",
7534 mpt->m_instance, event));
7535 break;
7536 }
7537
7538 /*
7539 * Return the reply frame to the free queue.
7540 */
7541 ddi_put32(mpt->m_acc_free_queue_hdl,
7542 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm);
7543 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
7544 DDI_DMA_SYNC_FORDEV);
7545 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
7546 mpt->m_free_index = 0;
7547 }
7548 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
7549 mpt->m_free_index);
7550 mutex_exit(&mpt->m_mutex);
7551 }
7552
7553 /*
7554 * invoked from timeout() to restart qfull cmds with throttle == 0
7555 */
7556 static void
7557 mptsas_restart_cmd(void *arg)
7558 {
7559 mptsas_t *mpt = arg;
7560 mptsas_target_t *ptgt = NULL;
7561
7562 mutex_enter(&mpt->m_mutex);
7563
7564 mpt->m_restart_cmd_timeid = 0;
7565
7566 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
7567 MPTSAS_HASH_FIRST);
7568 while (ptgt != NULL) {
7569 if (ptgt->m_reset_delay == 0) {
7570 if (ptgt->m_t_throttle == QFULL_THROTTLE) {
7571 mptsas_set_throttle(mpt, ptgt,
7572 MAX_THROTTLE);
7573 }
7574 }
7575
7576 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
7577 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
7578 }
7579 mptsas_restart_hba(mpt);
7580 mutex_exit(&mpt->m_mutex);
7581 }
7582
7583 void
7584 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7585 {
7586 int slot;
7587 mptsas_slots_t *slots = mpt->m_active;
7588 int t;
7589 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
7590
7591 ASSERT(cmd != NULL);
7592 ASSERT(cmd->cmd_queued == FALSE);
7593
7594 /*
7595 * Task Management cmds are removed in their own routines. Also,
7596 * we don't want to modify timeout based on TM cmds.
7597 */
7598 if (cmd->cmd_flags & CFLAG_TM_CMD) {
7599 return;
7600 }
7601
7602 t = Tgt(cmd);
7603 slot = cmd->cmd_slot;
7604
7605 /*
7606 * remove the cmd.
7607 */
7608 if (cmd == slots->m_slot[slot]) {
7609 NDBG31(("mptsas_remove_cmd: removing cmd=0x%p", (void *)cmd));
7610 slots->m_slot[slot] = NULL;
7611 mpt->m_ncmds--;
7612
7613 /*
7614 * only decrement per target ncmds if command
7615 * has a target associated with it.
7616 */
7617 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
7618 ptgt->m_t_ncmds--;
7619 /*
7620 * reset throttle if we just ran an untagged command
7621 * to a tagged target
7622 */
7623 if ((ptgt->m_t_ncmds == 0) &&
7624 ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
7625 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7626 }
7627 }
7628
7629 }
7630
7631 /*
7632 * This is all we need to do for ioc commands.
7633 */
7634 if (cmd->cmd_flags & CFLAG_CMDIOC) {
7635 mptsas_return_to_pool(mpt, cmd);
7636 return;
7637 }
7638
7639 /*
7640 * Figure out what to set tag Q timeout for...
7641 *
7642 * Optimize: If we have duplicate's of same timeout
7643 * we're using, then we'll use it again until we run
7644 * out of duplicates. This should be the normal case
7645 * for block and raw I/O.
7646 * If no duplicates, we have to scan through tag que and
7647 * find the longest timeout value and use it. This is
7648 * going to take a while...
7649 * Add 1 to m_n_slots to account for TM request.
7650 */
7651 if (cmd->cmd_pkt->pkt_time == ptgt->m_timebase) {
7652 if (--(ptgt->m_dups) == 0) {
7653 if (ptgt->m_t_ncmds) {
7654 mptsas_cmd_t *ssp;
7655 uint_t n = 0;
7656 ushort_t nslots = (slots->m_n_slots + 1);
7657 ushort_t i;
7658 /*
7659 * This crude check assumes we don't do
7660 * this too often which seems reasonable
7661 * for block and raw I/O.
7662 */
7663 for (i = 0; i < nslots; i++) {
7664 ssp = slots->m_slot[i];
7665 if (ssp && (Tgt(ssp) == t) &&
7666 (ssp->cmd_pkt->pkt_time > n)) {
7667 n = ssp->cmd_pkt->pkt_time;
7668 ptgt->m_dups = 1;
7669 } else if (ssp && (Tgt(ssp) == t) &&
7670 (ssp->cmd_pkt->pkt_time == n)) {
7671 ptgt->m_dups++;
7672 }
7673 }
7674 ptgt->m_timebase = n;
7675 } else {
7676 ptgt->m_dups = 0;
7677 ptgt->m_timebase = 0;
7678 }
7679 }
7680 }
7681 ptgt->m_timeout = ptgt->m_timebase;
7682
7683 ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);
7684 }
7685
7686 /*
7687 * accept all cmds on the tx_waitq if any and then
7688 * start a fresh request from the top of the device queue.
7689 *
7690 * since there are always cmds queued on the tx_waitq, and rare cmds on
7691 * the instance waitq, so this function should not be invoked in the ISR,
7692 * the mptsas_restart_waitq() is invoked in the ISR instead. otherwise, the
7693 * burden belongs to the IO dispatch CPUs is moved the interrupt CPU.
7694 */
7695 static void
7696 mptsas_restart_hba(mptsas_t *mpt)
7697 {
7698 ASSERT(mutex_owned(&mpt->m_mutex));
7699
7700 mutex_enter(&mpt->m_tx_waitq_mutex);
7701 if (mpt->m_tx_waitq) {
7702 mptsas_accept_tx_waitq(mpt);
7703 }
7704 mutex_exit(&mpt->m_tx_waitq_mutex);
7705 mptsas_restart_waitq(mpt);
7706 }
7707
7708 /*
7709 * start a fresh request from the top of the device queue
7710 */
7711 static void
7712 mptsas_restart_waitq(mptsas_t *mpt)
7713 {
7714 mptsas_cmd_t *cmd, *next_cmd;
7715 mptsas_target_t *ptgt = NULL;
7716
7717 NDBG1(("mptsas_restart_waitq: mpt=0x%p", (void *)mpt));
7718
7719 ASSERT(mutex_owned(&mpt->m_mutex));
7720
7721 /*
7722 * If there is a reset delay, don't start any cmds. Otherwise, start
7723 * as many cmds as possible.
7724 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
7725 * commands is m_max_requests - 2.
7726 */
7727 cmd = mpt->m_waitq;
7728
7729 while (cmd != NULL) {
7730 next_cmd = cmd->cmd_linkp;
7731 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
7732 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7733 /*
7734 * passthru command get slot need
7735 * set CFLAG_PREPARED.
7736 */
7737 cmd->cmd_flags |= CFLAG_PREPARED;
7738 mptsas_waitq_delete(mpt, cmd);
7739 mptsas_start_passthru(mpt, cmd);
7740 }
7741 cmd = next_cmd;
7742 continue;
7743 }
7744 if (cmd->cmd_flags & CFLAG_CONFIG) {
7745 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7746 /*
7747 * Send the config page request and delete it
7748 * from the waitq.
7749 */
7750 cmd->cmd_flags |= CFLAG_PREPARED;
7751 mptsas_waitq_delete(mpt, cmd);
7752 mptsas_start_config_page_access(mpt, cmd);
7753 }
7754 cmd = next_cmd;
7755 continue;
7756 }
7757 if (cmd->cmd_flags & CFLAG_FW_DIAG) {
7758 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7759 /*
7760 * Send the FW Diag request and delete if from
7761 * the waitq.
7762 */
7763 cmd->cmd_flags |= CFLAG_PREPARED;
7764 mptsas_waitq_delete(mpt, cmd);
7765 mptsas_start_diag(mpt, cmd);
7766 }
7767 cmd = next_cmd;
7768 continue;
7769 }
7770
7771 ptgt = cmd->cmd_tgt_addr;
7772 if (ptgt && (ptgt->m_t_throttle == DRAIN_THROTTLE) &&
7773 (ptgt->m_t_ncmds == 0)) {
7774 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7775 }
7776 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
7777 (ptgt && (ptgt->m_reset_delay == 0)) &&
7778 (ptgt && (ptgt->m_t_ncmds <
7779 ptgt->m_t_throttle))) {
7780 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7781 mptsas_waitq_delete(mpt, cmd);
7782 (void) mptsas_start_cmd(mpt, cmd);
7783 }
7784 }
7785 cmd = next_cmd;
7786 }
7787 }
7788 /*
7789 * Cmds are queued if tran_start() doesn't get the m_mutexlock(no wait).
7790 * Accept all those queued cmds before new cmd is accept so that the
7791 * cmds are sent in order.
7792 */
7793 static void
7794 mptsas_accept_tx_waitq(mptsas_t *mpt)
7795 {
7796 mptsas_cmd_t *cmd;
7797
7798 ASSERT(mutex_owned(&mpt->m_mutex));
7799 ASSERT(mutex_owned(&mpt->m_tx_waitq_mutex));
7800
7801 /*
7802 * A Bus Reset could occur at any time and flush the tx_waitq,
7803 * so we cannot count on the tx_waitq to contain even one cmd.
7804 * And when the m_tx_waitq_mutex is released and run
7805 * mptsas_accept_pkt(), the tx_waitq may be flushed.
7806 */
7807 cmd = mpt->m_tx_waitq;
7808 for (;;) {
7809 if ((cmd = mpt->m_tx_waitq) == NULL) {
7810 mpt->m_tx_draining = 0;
7811 break;
7812 }
7813 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL) {
7814 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
7815 }
7816 cmd->cmd_linkp = NULL;
7817 mutex_exit(&mpt->m_tx_waitq_mutex);
7818 if (mptsas_accept_pkt(mpt, cmd) != TRAN_ACCEPT)
7819 cmn_err(CE_WARN, "mpt: mptsas_accept_tx_waitq: failed "
7820 "to accept cmd on queue\n");
7821 mutex_enter(&mpt->m_tx_waitq_mutex);
7822 }
7823 }
7824
7825
7826 /*
7827 * mpt tag type lookup
7828 */
7829 static char mptsas_tag_lookup[] =
7830 {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
7831
7832 static int
7833 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7834 {
7835 struct scsi_pkt *pkt = CMD2PKT(cmd);
7836 uint32_t control = 0;
7837 int n;
7838 caddr_t mem;
7839 pMpi2SCSIIORequest_t io_request;
7840 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
7841 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
7842 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
7843 uint16_t SMID, io_flags = 0;
7844 uint32_t request_desc_low, request_desc_high;
7845
7846 NDBG1(("mptsas_start_cmd: cmd=0x%p", (void *)cmd));
7847
7848 /*
7849 * Set SMID and increment index. Rollover to 1 instead of 0 if index
7850 * is at the max. 0 is an invalid SMID, so we call the first index 1.
7851 */
7852 SMID = cmd->cmd_slot;
7853
7854 /*
7855 * It is possible for back to back device reset to
7856 * happen before the reset delay has expired. That's
7857 * ok, just let the device reset go out on the bus.
7858 */
7859 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7860 ASSERT(ptgt->m_reset_delay == 0);
7861 }
7862
7863 /*
7864 * if a non-tagged cmd is submitted to an active tagged target
7865 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
7866 * to be untagged
7867 */
7868 if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
7869 (ptgt->m_t_ncmds > 1) &&
7870 ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
7871 (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
7872 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
7873 NDBG23(("target=%d, untagged cmd, start draining\n",
7874 ptgt->m_devhdl));
7875
7876 if (ptgt->m_reset_delay == 0) {
7877 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
7878 }
7879
7880 mptsas_remove_cmd(mpt, cmd);
7881 cmd->cmd_pkt_flags |= FLAG_HEAD;
7882 mptsas_waitq_add(mpt, cmd);
7883 }
7884 return (DDI_FAILURE);
7885 }
7886
7887 /*
7888 * Set correct tag bits.
7889 */
7890 if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
7891 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
7892 FLAG_TAGMASK) >> 12)]) {
7893 case MSG_SIMPLE_QTAG:
7894 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
7895 break;
7896 case MSG_HEAD_QTAG:
7897 control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
7898 break;
7899 case MSG_ORDERED_QTAG:
7900 control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
7901 break;
7902 default:
7903 mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
7904 break;
7905 }
7906 } else {
7907 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
7908 ptgt->m_t_throttle = 1;
7909 }
7910 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
7911 }
7912
7913 if (cmd->cmd_pkt_flags & FLAG_TLR) {
7914 control |= MPI2_SCSIIO_CONTROL_TLR_ON;
7915 }
7916
7917 mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
7918 io_request = (pMpi2SCSIIORequest_t)mem;
7919
7920 bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
7921 ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
7922 (MPI2_SCSI_IO_REQUEST, SGL) / 4);
7923 mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
7924 MPI2_FUNCTION_SCSI_IO_REQUEST);
7925
7926 (void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
7927 io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
7928
7929 io_flags = cmd->cmd_cdblen;
7930 ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
7931 /*
7932 * setup the Scatter/Gather DMA list for this request
7933 */
7934 if (cmd->cmd_cookiec > 0) {
7935 mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
7936 } else {
7937 ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
7938 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
7939 MPI2_SGE_FLAGS_END_OF_BUFFER |
7940 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
7941 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
7942 }
7943
7944 /*
7945 * save ARQ information
7946 */
7947 ddi_put8(acc_hdl, &io_request->SenseBufferLength, cmd->cmd_rqslen);
7948 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
7949 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
7950 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
7951 cmd->cmd_ext_arqcookie.dmac_address);
7952 } else {
7953 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
7954 cmd->cmd_arqcookie.dmac_address);
7955 }
7956
7957 ddi_put32(acc_hdl, &io_request->Control, control);
7958
7959 NDBG31(("starting message=0x%p, with cmd=0x%p",
7960 (void *)(uintptr_t)mpt->m_req_frame_dma_addr, (void *)cmd));
7961
7962 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
7963
7964 /*
7965 * Build request descriptor and write it to the request desc post reg.
7966 */
7967 request_desc_low = (SMID << 16) + MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
7968 request_desc_high = ptgt->m_devhdl << 16;
7969 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
7970
7971 /*
7972 * Start timeout.
7973 */
7974 #ifdef MPTSAS_TEST
7975 /*
7976 * Temporarily set timebase = 0; needed for
7977 * timeout torture test.
7978 */
7979 if (mptsas_test_timeouts) {
7980 ptgt->m_timebase = 0;
7981 }
7982 #endif
7983 n = pkt->pkt_time - ptgt->m_timebase;
7984
7985 if (n == 0) {
7986 (ptgt->m_dups)++;
7987 ptgt->m_timeout = ptgt->m_timebase;
7988 } else if (n > 0) {
7989 ptgt->m_timeout =
7990 ptgt->m_timebase = pkt->pkt_time;
7991 ptgt->m_dups = 1;
7992 } else if (n < 0) {
7993 ptgt->m_timeout = ptgt->m_timebase;
7994 }
7995 #ifdef MPTSAS_TEST
7996 /*
7997 * Set back to a number higher than
7998 * mptsas_scsi_watchdog_tick
7999 * so timeouts will happen in mptsas_watchsubr
8000 */
8001 if (mptsas_test_timeouts) {
8002 ptgt->m_timebase = 60;
8003 }
8004 #endif
8005
8006 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8007 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8008 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8009 return (DDI_FAILURE);
8010 }
8011 return (DDI_SUCCESS);
8012 }
8013
8014 /*
8015 * Select a helper thread to handle current doneq
8016 */
8017 static void
8018 mptsas_deliver_doneq_thread(mptsas_t *mpt)
8019 {
8020 uint64_t t, i;
8021 uint32_t min = 0xffffffff;
8022 mptsas_doneq_thread_list_t *item;
8023
8024 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
8025 item = &mpt->m_doneq_thread_id[i];
8026 /*
8027 * If the completed command on help thread[i] less than
8028 * doneq_thread_threshold, then pick the thread[i]. Otherwise
8029 * pick a thread which has least completed command.
8030 */
8031
8032 mutex_enter(&item->mutex);
8033 if (item->len < mpt->m_doneq_thread_threshold) {
8034 t = i;
8035 mutex_exit(&item->mutex);
8036 break;
8037 }
8038 if (item->len < min) {
8039 min = item->len;
8040 t = i;
8041 }
8042 mutex_exit(&item->mutex);
8043 }
8044 mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
8045 mptsas_doneq_mv(mpt, t);
8046 cv_signal(&mpt->m_doneq_thread_id[t].cv);
8047 mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
8048 }
8049
8050 /*
8051 * move the current global doneq to the doneq of thead[t]
8052 */
8053 static void
8054 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
8055 {
8056 mptsas_cmd_t *cmd;
8057 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8058
8059 ASSERT(mutex_owned(&item->mutex));
8060 while ((cmd = mpt->m_doneq) != NULL) {
8061 if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
8062 mpt->m_donetail = &mpt->m_doneq;
8063 }
8064 cmd->cmd_linkp = NULL;
8065 *item->donetail = cmd;
8066 item->donetail = &cmd->cmd_linkp;
8067 mpt->m_doneq_len--;
8068 item->len++;
8069 }
8070 }
8071
8072 void
8073 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
8074 {
8075 struct scsi_pkt *pkt = CMD2PKT(cmd);
8076
8077 /* Check all acc and dma handles */
8078 if ((mptsas_check_acc_handle(mpt->m_datap) !=
8079 DDI_SUCCESS) ||
8080 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
8081 DDI_SUCCESS) ||
8082 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
8083 DDI_SUCCESS) ||
8084 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
8085 DDI_SUCCESS) ||
8086 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
8087 DDI_SUCCESS) ||
8088 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
8089 DDI_SUCCESS) ||
8090 (mptsas_check_acc_handle(mpt->m_config_handle) !=
8091 DDI_SUCCESS)) {
8092 ddi_fm_service_impact(mpt->m_dip,
8093 DDI_SERVICE_UNAFFECTED);
8094 ddi_fm_acc_err_clear(mpt->m_config_handle,
8095 DDI_FME_VER0);
8096 pkt->pkt_reason = CMD_TRAN_ERR;
8097 pkt->pkt_statistics = 0;
8098 }
8099 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
8100 DDI_SUCCESS) ||
8101 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
8102 DDI_SUCCESS) ||
8103 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
8104 DDI_SUCCESS) ||
8105 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
8106 DDI_SUCCESS) ||
8107 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
8108 DDI_SUCCESS)) {
8109 ddi_fm_service_impact(mpt->m_dip,
8110 DDI_SERVICE_UNAFFECTED);
8111 pkt->pkt_reason = CMD_TRAN_ERR;
8112 pkt->pkt_statistics = 0;
8113 }
8114 if (cmd->cmd_dmahandle &&
8115 (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
8116 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8117 pkt->pkt_reason = CMD_TRAN_ERR;
8118 pkt->pkt_statistics = 0;
8119 }
8120 if ((cmd->cmd_extra_frames &&
8121 ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
8122 DDI_SUCCESS) ||
8123 (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
8124 DDI_SUCCESS)))) {
8125 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8126 pkt->pkt_reason = CMD_TRAN_ERR;
8127 pkt->pkt_statistics = 0;
8128 }
8129 if (cmd->cmd_arqhandle &&
8130 (mptsas_check_dma_handle(cmd->cmd_arqhandle) != DDI_SUCCESS)) {
8131 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8132 pkt->pkt_reason = CMD_TRAN_ERR;
8133 pkt->pkt_statistics = 0;
8134 }
8135 if (cmd->cmd_ext_arqhandle &&
8136 (mptsas_check_dma_handle(cmd->cmd_ext_arqhandle) != DDI_SUCCESS)) {
8137 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8138 pkt->pkt_reason = CMD_TRAN_ERR;
8139 pkt->pkt_statistics = 0;
8140 }
8141 }
8142
8143 /*
8144 * These routines manipulate the queue of commands that
8145 * are waiting for their completion routines to be called.
8146 * The queue is usually in FIFO order but on an MP system
8147 * it's possible for the completion routines to get out
8148 * of order. If that's a problem you need to add a global
8149 * mutex around the code that calls the completion routine
8150 * in the interrupt handler.
8151 */
8152 static void
8153 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8154 {
8155 struct scsi_pkt *pkt = CMD2PKT(cmd);
8156
8157 NDBG31(("mptsas_doneq_add: cmd=0x%p", (void *)cmd));
8158
8159 ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
8160 cmd->cmd_linkp = NULL;
8161 cmd->cmd_flags |= CFLAG_FINISHED;
8162 cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
8163
8164 mptsas_fma_check(mpt, cmd);
8165
8166 /*
8167 * only add scsi pkts that have completion routines to
8168 * the doneq. no intr cmds do not have callbacks.
8169 */
8170 if (pkt && (pkt->pkt_comp)) {
8171 *mpt->m_donetail = cmd;
8172 mpt->m_donetail = &cmd->cmd_linkp;
8173 mpt->m_doneq_len++;
8174 }
8175 }
8176
8177 static mptsas_cmd_t *
8178 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
8179 {
8180 mptsas_cmd_t *cmd;
8181 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8182
8183 /* pop one off the done queue */
8184 if ((cmd = item->doneq) != NULL) {
8185 /* if the queue is now empty fix the tail pointer */
8186 NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
8187 if ((item->doneq = cmd->cmd_linkp) == NULL) {
8188 item->donetail = &item->doneq;
8189 }
8190 cmd->cmd_linkp = NULL;
8191 item->len--;
8192 }
8193 return (cmd);
8194 }
8195
8196 static void
8197 mptsas_doneq_empty(mptsas_t *mpt)
8198 {
8199 if (mpt->m_doneq && !mpt->m_in_callback) {
8200 mptsas_cmd_t *cmd, *next;
8201 struct scsi_pkt *pkt;
8202
8203 mpt->m_in_callback = 1;
8204 cmd = mpt->m_doneq;
8205 mpt->m_doneq = NULL;
8206 mpt->m_donetail = &mpt->m_doneq;
8207 mpt->m_doneq_len = 0;
8208
8209 mutex_exit(&mpt->m_mutex);
8210 /*
8211 * run the completion routines of all the
8212 * completed commands
8213 */
8214 while (cmd != NULL) {
8215 next = cmd->cmd_linkp;
8216 cmd->cmd_linkp = NULL;
8217 /* run this command's completion routine */
8218 cmd->cmd_flags |= CFLAG_COMPLETED;
8219 pkt = CMD2PKT(cmd);
8220 mptsas_pkt_comp(pkt, cmd);
8221 cmd = next;
8222 }
8223 mutex_enter(&mpt->m_mutex);
8224 mpt->m_in_callback = 0;
8225 }
8226 }
8227
8228 /*
8229 * These routines manipulate the target's queue of pending requests
8230 */
8231 void
8232 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8233 {
8234 NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
8235 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8236 cmd->cmd_queued = TRUE;
8237 if (ptgt)
8238 ptgt->m_t_nwait++;
8239 if (cmd->cmd_pkt_flags & FLAG_HEAD) {
8240 if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
8241 mpt->m_waitqtail = &cmd->cmd_linkp;
8242 }
8243 mpt->m_waitq = cmd;
8244 } else {
8245 cmd->cmd_linkp = NULL;
8246 *(mpt->m_waitqtail) = cmd;
8247 mpt->m_waitqtail = &cmd->cmd_linkp;
8248 }
8249 }
8250
8251 static mptsas_cmd_t *
8252 mptsas_waitq_rm(mptsas_t *mpt)
8253 {
8254 mptsas_cmd_t *cmd;
8255 mptsas_target_t *ptgt;
8256 NDBG7(("mptsas_waitq_rm"));
8257
8258 MPTSAS_WAITQ_RM(mpt, cmd);
8259
8260 NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
8261 if (cmd) {
8262 ptgt = cmd->cmd_tgt_addr;
8263 if (ptgt) {
8264 ptgt->m_t_nwait--;
8265 ASSERT(ptgt->m_t_nwait >= 0);
8266 }
8267 }
8268 return (cmd);
8269 }
8270
8271 /*
8272 * remove specified cmd from the middle of the wait queue.
8273 */
8274 static void
8275 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8276 {
8277 mptsas_cmd_t *prevp = mpt->m_waitq;
8278 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8279
8280 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8281 (void *)mpt, (void *)cmd));
8282 if (ptgt) {
8283 ptgt->m_t_nwait--;
8284 ASSERT(ptgt->m_t_nwait >= 0);
8285 }
8286
8287 if (prevp == cmd) {
8288 if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
8289 mpt->m_waitqtail = &mpt->m_waitq;
8290
8291 cmd->cmd_linkp = NULL;
8292 cmd->cmd_queued = FALSE;
8293 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8294 (void *)mpt, (void *)cmd));
8295 return;
8296 }
8297
8298 while (prevp != NULL) {
8299 if (prevp->cmd_linkp == cmd) {
8300 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8301 mpt->m_waitqtail = &prevp->cmd_linkp;
8302
8303 cmd->cmd_linkp = NULL;
8304 cmd->cmd_queued = FALSE;
8305 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8306 (void *)mpt, (void *)cmd));
8307 return;
8308 }
8309 prevp = prevp->cmd_linkp;
8310 }
8311 cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
8312 }
8313
8314 static mptsas_cmd_t *
8315 mptsas_tx_waitq_rm(mptsas_t *mpt)
8316 {
8317 mptsas_cmd_t *cmd;
8318 NDBG7(("mptsas_tx_waitq_rm"));
8319
8320 MPTSAS_TX_WAITQ_RM(mpt, cmd);
8321
8322 NDBG7(("mptsas_tx_waitq_rm: cmd=0x%p", (void *)cmd));
8323
8324 return (cmd);
8325 }
8326
8327 /*
8328 * remove specified cmd from the middle of the tx_waitq.
8329 */
8330 static void
8331 mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8332 {
8333 mptsas_cmd_t *prevp = mpt->m_tx_waitq;
8334
8335 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8336 (void *)mpt, (void *)cmd));
8337
8338 if (prevp == cmd) {
8339 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL)
8340 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8341
8342 cmd->cmd_linkp = NULL;
8343 cmd->cmd_queued = FALSE;
8344 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8345 (void *)mpt, (void *)cmd));
8346 return;
8347 }
8348
8349 while (prevp != NULL) {
8350 if (prevp->cmd_linkp == cmd) {
8351 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8352 mpt->m_tx_waitqtail = &prevp->cmd_linkp;
8353
8354 cmd->cmd_linkp = NULL;
8355 cmd->cmd_queued = FALSE;
8356 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8357 (void *)mpt, (void *)cmd));
8358 return;
8359 }
8360 prevp = prevp->cmd_linkp;
8361 }
8362 cmn_err(CE_PANIC, "mpt: mptsas_tx_waitq_delete: queue botch");
8363 }
8364
8365 /*
8366 * device and bus reset handling
8367 *
8368 * Notes:
8369 * - RESET_ALL: reset the controller
8370 * - RESET_TARGET: reset the target specified in scsi_address
8371 */
8372 static int
8373 mptsas_scsi_reset(struct scsi_address *ap, int level)
8374 {
8375 mptsas_t *mpt = ADDR2MPT(ap);
8376 int rval;
8377 mptsas_tgt_private_t *tgt_private;
8378 mptsas_target_t *ptgt = NULL;
8379
8380 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
8381 ptgt = tgt_private->t_private;
8382 if (ptgt == NULL) {
8383 return (FALSE);
8384 }
8385 NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,
8386 level));
8387
8388 mutex_enter(&mpt->m_mutex);
8389 /*
8390 * if we are not in panic set up a reset delay for this target
8391 */
8392 if (!ddi_in_panic()) {
8393 mptsas_setup_bus_reset_delay(mpt);
8394 } else {
8395 drv_usecwait(mpt->m_scsi_reset_delay * 1000);
8396 }
8397 rval = mptsas_do_scsi_reset(mpt, ptgt->m_devhdl);
8398 mutex_exit(&mpt->m_mutex);
8399
8400 /*
8401 * The transport layer expect to only see TRUE and
8402 * FALSE. Therefore, we will adjust the return value
8403 * if mptsas_do_scsi_reset returns FAILED.
8404 */
8405 if (rval == FAILED)
8406 rval = FALSE;
8407 return (rval);
8408 }
8409
8410 static int
8411 mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl)
8412 {
8413 int rval = FALSE;
8414 uint8_t config, disk;
8415 mptsas_slots_t *slots = mpt->m_active;
8416
8417 ASSERT(mutex_owned(&mpt->m_mutex));
8418
8419 if (mptsas_debug_resets) {
8420 mptsas_log(mpt, CE_WARN, "mptsas_do_scsi_reset: target=%d",
8421 devhdl);
8422 }
8423
8424 /*
8425 * Issue a Target Reset message to the target specified but not to a
8426 * disk making up a raid volume. Just look through the RAID config
8427 * Phys Disk list of DevHandles. If the target's DevHandle is in this
8428 * list, then don't reset this target.
8429 */
8430 for (config = 0; config < slots->m_num_raid_configs; config++) {
8431 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
8432 if (devhdl == slots->m_raidconfig[config].
8433 m_physdisk_devhdl[disk]) {
8434 return (TRUE);
8435 }
8436 }
8437 }
8438
8439 rval = mptsas_ioc_task_management(mpt,
8440 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, devhdl, 0, NULL, 0, 0);
8441
8442 mptsas_doneq_empty(mpt);
8443 return (rval);
8444 }
8445
8446 static int
8447 mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
8448 void (*callback)(caddr_t), caddr_t arg)
8449 {
8450 mptsas_t *mpt = ADDR2MPT(ap);
8451
8452 NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap->a_target));
8453
8454 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
8455 &mpt->m_mutex, &mpt->m_reset_notify_listf));
8456 }
8457
8458 static int
8459 mptsas_get_name(struct scsi_device *sd, char *name, int len)
8460 {
8461 dev_info_t *lun_dip = NULL;
8462
8463 ASSERT(sd != NULL);
8464 ASSERT(name != NULL);
8465 lun_dip = sd->sd_dev;
8466 ASSERT(lun_dip != NULL);
8467
8468 if (mptsas_name_child(lun_dip, name, len) == DDI_SUCCESS) {
8469 return (1);
8470 } else {
8471 return (0);
8472 }
8473 }
8474
8475 static int
8476 mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len)
8477 {
8478 return (mptsas_get_name(sd, name, len));
8479 }
8480
8481 void
8482 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
8483 {
8484
8485 NDBG25(("mptsas_set_throttle: throttle=%x", what));
8486
8487 /*
8488 * if the bus is draining/quiesced, no changes to the throttles
8489 * are allowed. Not allowing change of throttles during draining
8490 * limits error recovery but will reduce draining time
8491 *
8492 * all throttles should have been set to HOLD_THROTTLE
8493 */
8494 if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
8495 return;
8496 }
8497
8498 if (what == HOLD_THROTTLE) {
8499 ptgt->m_t_throttle = HOLD_THROTTLE;
8500 } else if (ptgt->m_reset_delay == 0) {
8501 ptgt->m_t_throttle = what;
8502 }
8503 }
8504
8505 /*
8506 * Clean up from a device reset.
8507 * For the case of target reset, this function clears the waitq of all
8508 * commands for a particular target. For the case of abort task set, this
8509 * function clears the waitq of all commonds for a particular target/lun.
8510 */
8511 static void
8512 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
8513 {
8514 mptsas_slots_t *slots = mpt->m_active;
8515 mptsas_cmd_t *cmd, *next_cmd;
8516 int slot;
8517 uchar_t reason;
8518 uint_t stat;
8519
8520 NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
8521
8522 /*
8523 * Make sure the I/O Controller has flushed all cmds
8524 * that are associated with this target for a target reset
8525 * and target/lun for abort task set.
8526 * Account for TM requests, which use the last SMID.
8527 */
8528 for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
8529 if ((cmd = slots->m_slot[slot]) == NULL)
8530 continue;
8531 reason = CMD_RESET;
8532 stat = STAT_DEV_RESET;
8533 switch (tasktype) {
8534 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8535 if (Tgt(cmd) == target) {
8536 if (cmd->cmd_tgt_addr->m_timeout < 0) {
8537 /*
8538 * When timeout requested, propagate
8539 * proper reason and statistics to
8540 * target drivers.
8541 */
8542 reason = CMD_TIMEOUT;
8543 stat |= STAT_TIMEOUT;
8544 }
8545 NDBG25(("mptsas_flush_target discovered non-"
8546 "NULL cmd in slot %d, tasktype 0x%x", slot,
8547 tasktype));
8548 mptsas_dump_cmd(mpt, cmd);
8549 mptsas_remove_cmd(mpt, cmd);
8550 mptsas_set_pkt_reason(mpt, cmd, reason, stat);
8551 mptsas_doneq_add(mpt, cmd);
8552 }
8553 break;
8554 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
8555 reason = CMD_ABORTED;
8556 stat = STAT_ABORTED;
8557 /*FALLTHROUGH*/
8558 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
8559 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8560
8561 NDBG25(("mptsas_flush_target discovered non-"
8562 "NULL cmd in slot %d, tasktype 0x%x", slot,
8563 tasktype));
8564 mptsas_dump_cmd(mpt, cmd);
8565 mptsas_remove_cmd(mpt, cmd);
8566 mptsas_set_pkt_reason(mpt, cmd, reason,
8567 stat);
8568 mptsas_doneq_add(mpt, cmd);
8569 }
8570 break;
8571 default:
8572 break;
8573 }
8574 }
8575
8576 /*
8577 * Flush the waitq and tx_waitq of this target's cmds
8578 */
8579 cmd = mpt->m_waitq;
8580
8581 reason = CMD_RESET;
8582 stat = STAT_DEV_RESET;
8583
8584 switch (tasktype) {
8585 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8586 while (cmd != NULL) {
8587 next_cmd = cmd->cmd_linkp;
8588 if (Tgt(cmd) == target) {
8589 mptsas_waitq_delete(mpt, cmd);
8590 mptsas_set_pkt_reason(mpt, cmd,
8591 reason, stat);
8592 mptsas_doneq_add(mpt, cmd);
8593 }
8594 cmd = next_cmd;
8595 }
8596 mutex_enter(&mpt->m_tx_waitq_mutex);
8597 cmd = mpt->m_tx_waitq;
8598 while (cmd != NULL) {
8599 next_cmd = cmd->cmd_linkp;
8600 if (Tgt(cmd) == target) {
8601 mptsas_tx_waitq_delete(mpt, cmd);
8602 mutex_exit(&mpt->m_tx_waitq_mutex);
8603 mptsas_set_pkt_reason(mpt, cmd,
8604 reason, stat);
8605 mptsas_doneq_add(mpt, cmd);
8606 mutex_enter(&mpt->m_tx_waitq_mutex);
8607 }
8608 cmd = next_cmd;
8609 }
8610 mutex_exit(&mpt->m_tx_waitq_mutex);
8611 break;
8612 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
8613 reason = CMD_ABORTED;
8614 stat = STAT_ABORTED;
8615 /*FALLTHROUGH*/
8616 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
8617 while (cmd != NULL) {
8618 next_cmd = cmd->cmd_linkp;
8619 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8620 mptsas_waitq_delete(mpt, cmd);
8621 mptsas_set_pkt_reason(mpt, cmd,
8622 reason, stat);
8623 mptsas_doneq_add(mpt, cmd);
8624 }
8625 cmd = next_cmd;
8626 }
8627 mutex_enter(&mpt->m_tx_waitq_mutex);
8628 cmd = mpt->m_tx_waitq;
8629 while (cmd != NULL) {
8630 next_cmd = cmd->cmd_linkp;
8631 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8632 mptsas_tx_waitq_delete(mpt, cmd);
8633 mutex_exit(&mpt->m_tx_waitq_mutex);
8634 mptsas_set_pkt_reason(mpt, cmd,
8635 reason, stat);
8636 mptsas_doneq_add(mpt, cmd);
8637 mutex_enter(&mpt->m_tx_waitq_mutex);
8638 }
8639 cmd = next_cmd;
8640 }
8641 mutex_exit(&mpt->m_tx_waitq_mutex);
8642 break;
8643 default:
8644 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
8645 tasktype);
8646 break;
8647 }
8648 }
8649
8650 /*
8651 * Clean up hba state, abort all outstanding command and commands in waitq
8652 * reset timeout of all targets.
8653 */
8654 static void
8655 mptsas_flush_hba(mptsas_t *mpt)
8656 {
8657 mptsas_slots_t *slots = mpt->m_active;
8658 mptsas_cmd_t *cmd;
8659 int slot;
8660
8661 NDBG25(("mptsas_flush_hba"));
8662
8663 /*
8664 * The I/O Controller should have already sent back
8665 * all commands via the scsi I/O reply frame. Make
8666 * sure all commands have been flushed.
8667 * Account for TM request, which use the last SMID.
8668 */
8669 for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
8670 if ((cmd = slots->m_slot[slot]) == NULL)
8671 continue;
8672
8673 if (cmd->cmd_flags & CFLAG_CMDIOC) {
8674 /*
8675 * Need to make sure to tell everyone that might be
8676 * waiting on this command that it's going to fail. If
8677 * we get here, this command will never timeout because
8678 * the active command table is going to be re-allocated,
8679 * so there will be nothing to check against a time out.
8680 * Instead, mark the command as failed due to reset.
8681 */
8682 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
8683 STAT_BUS_RESET);
8684 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
8685 (cmd->cmd_flags & CFLAG_CONFIG) ||
8686 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
8687 cmd->cmd_flags |= CFLAG_FINISHED;
8688 cv_broadcast(&mpt->m_passthru_cv);
8689 cv_broadcast(&mpt->m_config_cv);
8690 cv_broadcast(&mpt->m_fw_diag_cv);
8691 }
8692 continue;
8693 }
8694
8695 NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
8696 slot));
8697 mptsas_dump_cmd(mpt, cmd);
8698
8699 mptsas_remove_cmd(mpt, cmd);
8700 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8701 mptsas_doneq_add(mpt, cmd);
8702 }
8703
8704 /*
8705 * Flush the waitq.
8706 */
8707 while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
8708 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8709 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
8710 (cmd->cmd_flags & CFLAG_CONFIG) ||
8711 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
8712 cmd->cmd_flags |= CFLAG_FINISHED;
8713 cv_broadcast(&mpt->m_passthru_cv);
8714 cv_broadcast(&mpt->m_config_cv);
8715 cv_broadcast(&mpt->m_fw_diag_cv);
8716 } else {
8717 mptsas_doneq_add(mpt, cmd);
8718 }
8719 }
8720
8721 /*
8722 * Flush the tx_waitq
8723 */
8724 mutex_enter(&mpt->m_tx_waitq_mutex);
8725 while ((cmd = mptsas_tx_waitq_rm(mpt)) != NULL) {
8726 mutex_exit(&mpt->m_tx_waitq_mutex);
8727 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8728 mptsas_doneq_add(mpt, cmd);
8729 mutex_enter(&mpt->m_tx_waitq_mutex);
8730 }
8731 mutex_exit(&mpt->m_tx_waitq_mutex);
8732
8733 /*
8734 * Drain the taskqs prior to reallocating resources.
8735 */
8736 mutex_exit(&mpt->m_mutex);
8737 ddi_taskq_wait(mpt->m_event_taskq);
8738 ddi_taskq_wait(mpt->m_dr_taskq);
8739 mutex_enter(&mpt->m_mutex);
8740 }
8741
8742 /*
8743 * set pkt_reason and OR in pkt_statistics flag
8744 */
8745 static void
8746 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
8747 uint_t stat)
8748 {
8749 #ifndef __lock_lint
8750 _NOTE(ARGUNUSED(mpt))
8751 #endif
8752
8753 NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
8754 (void *)cmd, reason, stat));
8755
8756 if (cmd) {
8757 if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
8758 cmd->cmd_pkt->pkt_reason = reason;
8759 }
8760 cmd->cmd_pkt->pkt_statistics |= stat;
8761 }
8762 }
8763
8764 static void
8765 mptsas_start_watch_reset_delay()
8766 {
8767 NDBG22(("mptsas_start_watch_reset_delay"));
8768
8769 mutex_enter(&mptsas_global_mutex);
8770 if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
8771 mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
8772 drv_usectohz((clock_t)
8773 MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
8774 ASSERT(mptsas_reset_watch != NULL);
8775 }
8776 mutex_exit(&mptsas_global_mutex);
8777 }
8778
8779 static void
8780 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
8781 {
8782 mptsas_target_t *ptgt = NULL;
8783
8784 NDBG22(("mptsas_setup_bus_reset_delay"));
8785 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
8786 MPTSAS_HASH_FIRST);
8787 while (ptgt != NULL) {
8788 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
8789 ptgt->m_reset_delay = mpt->m_scsi_reset_delay;
8790
8791 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
8792 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
8793 }
8794
8795 mptsas_start_watch_reset_delay();
8796 }
8797
8798 /*
8799 * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
8800 * mpt instance for active reset delays
8801 */
8802 static void
8803 mptsas_watch_reset_delay(void *arg)
8804 {
8805 #ifndef __lock_lint
8806 _NOTE(ARGUNUSED(arg))
8807 #endif
8808
8809 mptsas_t *mpt;
8810 int not_done = 0;
8811
8812 NDBG22(("mptsas_watch_reset_delay"));
8813
8814 mutex_enter(&mptsas_global_mutex);
8815 mptsas_reset_watch = 0;
8816 mutex_exit(&mptsas_global_mutex);
8817 rw_enter(&mptsas_global_rwlock, RW_READER);
8818 for (mpt = mptsas_head; mpt != NULL; mpt = mpt->m_next) {
8819 if (mpt->m_tran == 0) {
8820 continue;
8821 }
8822 mutex_enter(&mpt->m_mutex);
8823 not_done += mptsas_watch_reset_delay_subr(mpt);
8824 mutex_exit(&mpt->m_mutex);
8825 }
8826 rw_exit(&mptsas_global_rwlock);
8827
8828 if (not_done) {
8829 mptsas_start_watch_reset_delay();
8830 }
8831 }
8832
8833 static int
8834 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
8835 {
8836 int done = 0;
8837 int restart = 0;
8838 mptsas_target_t *ptgt = NULL;
8839
8840 NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
8841
8842 ASSERT(mutex_owned(&mpt->m_mutex));
8843
8844 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
8845 MPTSAS_HASH_FIRST);
8846 while (ptgt != NULL) {
8847 if (ptgt->m_reset_delay != 0) {
8848 ptgt->m_reset_delay -=
8849 MPTSAS_WATCH_RESET_DELAY_TICK;
8850 if (ptgt->m_reset_delay <= 0) {
8851 ptgt->m_reset_delay = 0;
8852 mptsas_set_throttle(mpt, ptgt,
8853 MAX_THROTTLE);
8854 restart++;
8855 } else {
8856 done = -1;
8857 }
8858 }
8859
8860 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
8861 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
8862 }
8863
8864 if (restart > 0) {
8865 mptsas_restart_hba(mpt);
8866 }
8867 return (done);
8868 }
8869
8870 #ifdef MPTSAS_TEST
8871 static void
8872 mptsas_test_reset(mptsas_t *mpt, int target)
8873 {
8874 mptsas_target_t *ptgt = NULL;
8875
8876 if (mptsas_rtest == target) {
8877 if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
8878 mptsas_rtest = -1;
8879 }
8880 if (mptsas_rtest == -1) {
8881 NDBG22(("mptsas_test_reset success"));
8882 }
8883 }
8884 }
8885 #endif
8886
8887 /*
8888 * abort handling:
8889 *
8890 * Notes:
8891 * - if pkt is not NULL, abort just that command
8892 * - if pkt is NULL, abort all outstanding commands for target
8893 */
8894 static int
8895 mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
8896 {
8897 mptsas_t *mpt = ADDR2MPT(ap);
8898 int rval;
8899 mptsas_tgt_private_t *tgt_private;
8900 int target, lun;
8901
8902 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
8903 tran_tgt_private;
8904 ASSERT(tgt_private != NULL);
8905 target = tgt_private->t_private->m_devhdl;
8906 lun = tgt_private->t_lun;
8907
8908 NDBG23(("mptsas_scsi_abort: target=%d.%d", target, lun));
8909
8910 mutex_enter(&mpt->m_mutex);
8911 rval = mptsas_do_scsi_abort(mpt, target, lun, pkt);
8912 mutex_exit(&mpt->m_mutex);
8913 return (rval);
8914 }
8915
8916 static int
8917 mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, struct scsi_pkt *pkt)
8918 {
8919 mptsas_cmd_t *sp = NULL;
8920 mptsas_slots_t *slots = mpt->m_active;
8921 int rval = FALSE;
8922
8923 ASSERT(mutex_owned(&mpt->m_mutex));
8924
8925 /*
8926 * Abort the command pkt on the target/lun in ap. If pkt is
8927 * NULL, abort all outstanding commands on that target/lun.
8928 * If you can abort them, return 1, else return 0.
8929 * Each packet that's aborted should be sent back to the target
8930 * driver through the callback routine, with pkt_reason set to
8931 * CMD_ABORTED.
8932 *
8933 * abort cmd pkt on HBA hardware; clean out of outstanding
8934 * command lists, etc.
8935 */
8936 if (pkt != NULL) {
8937 /* abort the specified packet */
8938 sp = PKT2CMD(pkt);
8939
8940 if (sp->cmd_queued) {
8941 NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
8942 (void *)sp));
8943 mptsas_waitq_delete(mpt, sp);
8944 mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
8945 STAT_ABORTED);
8946 mptsas_doneq_add(mpt, sp);
8947 rval = TRUE;
8948 goto done;
8949 }
8950
8951 /*
8952 * Have mpt firmware abort this command
8953 */
8954
8955 if (slots->m_slot[sp->cmd_slot] != NULL) {
8956 rval = mptsas_ioc_task_management(mpt,
8957 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
8958 lun, NULL, 0, 0);
8959
8960 /*
8961 * The transport layer expects only TRUE and FALSE.
8962 * Therefore, if mptsas_ioc_task_management returns
8963 * FAILED we will return FALSE.
8964 */
8965 if (rval == FAILED)
8966 rval = FALSE;
8967 goto done;
8968 }
8969 }
8970
8971 /*
8972 * If pkt is NULL then abort task set
8973 */
8974 rval = mptsas_ioc_task_management(mpt,
8975 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0);
8976
8977 /*
8978 * The transport layer expects only TRUE and FALSE.
8979 * Therefore, if mptsas_ioc_task_management returns
8980 * FAILED we will return FALSE.
8981 */
8982 if (rval == FAILED)
8983 rval = FALSE;
8984
8985 #ifdef MPTSAS_TEST
8986 if (rval && mptsas_test_stop) {
8987 debug_enter("mptsas_do_scsi_abort");
8988 }
8989 #endif
8990
8991 done:
8992 mptsas_doneq_empty(mpt);
8993 return (rval);
8994 }
8995
8996 /*
8997 * capability handling:
8998 * (*tran_getcap). Get the capability named, and return its value.
8999 */
9000 static int
9001 mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
9002 {
9003 mptsas_t *mpt = ADDR2MPT(ap);
9004 int ckey;
9005 int rval = FALSE;
9006
9007 NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
9008 ap->a_target, cap, tgtonly));
9009
9010 mutex_enter(&mpt->m_mutex);
9011
9012 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9013 mutex_exit(&mpt->m_mutex);
9014 return (UNDEFINED);
9015 }
9016
9017 switch (ckey) {
9018 case SCSI_CAP_DMA_MAX:
9019 rval = (int)mpt->m_msg_dma_attr.dma_attr_maxxfer;
9020 break;
9021 case SCSI_CAP_ARQ:
9022 rval = TRUE;
9023 break;
9024 case SCSI_CAP_MSG_OUT:
9025 case SCSI_CAP_PARITY:
9026 case SCSI_CAP_UNTAGGED_QING:
9027 rval = TRUE;
9028 break;
9029 case SCSI_CAP_TAGGED_QING:
9030 rval = TRUE;
9031 break;
9032 case SCSI_CAP_RESET_NOTIFICATION:
9033 rval = TRUE;
9034 break;
9035 case SCSI_CAP_LINKED_CMDS:
9036 rval = FALSE;
9037 break;
9038 case SCSI_CAP_QFULL_RETRIES:
9039 rval = ((mptsas_tgt_private_t *)(ap->a_hba_tran->
9040 tran_tgt_private))->t_private->m_qfull_retries;
9041 break;
9042 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9043 rval = drv_hztousec(((mptsas_tgt_private_t *)
9044 (ap->a_hba_tran->tran_tgt_private))->
9045 t_private->m_qfull_retry_interval) / 1000;
9046 break;
9047 case SCSI_CAP_CDB_LEN:
9048 rval = CDB_GROUP4;
9049 break;
9050 case SCSI_CAP_INTERCONNECT_TYPE:
9051 rval = INTERCONNECT_SAS;
9052 break;
9053 case SCSI_CAP_TRAN_LAYER_RETRIES:
9054 if (mpt->m_ioc_capabilities &
9055 MPI2_IOCFACTS_CAPABILITY_TLR)
9056 rval = TRUE;
9057 else
9058 rval = FALSE;
9059 break;
9060 default:
9061 rval = UNDEFINED;
9062 break;
9063 }
9064
9065 NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
9066
9067 mutex_exit(&mpt->m_mutex);
9068 return (rval);
9069 }
9070
9071 /*
9072 * (*tran_setcap). Set the capability named to the value given.
9073 */
9074 static int
9075 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
9076 {
9077 mptsas_t *mpt = ADDR2MPT(ap);
9078 int ckey;
9079 int rval = FALSE;
9080
9081 NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
9082 ap->a_target, cap, value, tgtonly));
9083
9084 if (!tgtonly) {
9085 return (rval);
9086 }
9087
9088 mutex_enter(&mpt->m_mutex);
9089
9090 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9091 mutex_exit(&mpt->m_mutex);
9092 return (UNDEFINED);
9093 }
9094
9095 switch (ckey) {
9096 case SCSI_CAP_DMA_MAX:
9097 case SCSI_CAP_MSG_OUT:
9098 case SCSI_CAP_PARITY:
9099 case SCSI_CAP_INITIATOR_ID:
9100 case SCSI_CAP_LINKED_CMDS:
9101 case SCSI_CAP_UNTAGGED_QING:
9102 case SCSI_CAP_RESET_NOTIFICATION:
9103 /*
9104 * None of these are settable via
9105 * the capability interface.
9106 */
9107 break;
9108 case SCSI_CAP_ARQ:
9109 /*
9110 * We cannot turn off arq so return false if asked to
9111 */
9112 if (value) {
9113 rval = TRUE;
9114 } else {
9115 rval = FALSE;
9116 }
9117 break;
9118 case SCSI_CAP_TAGGED_QING:
9119 mptsas_set_throttle(mpt, ((mptsas_tgt_private_t *)
9120 (ap->a_hba_tran->tran_tgt_private))->t_private,
9121 MAX_THROTTLE);
9122 rval = TRUE;
9123 break;
9124 case SCSI_CAP_QFULL_RETRIES:
9125 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9126 t_private->m_qfull_retries = (uchar_t)value;
9127 rval = TRUE;
9128 break;
9129 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9130 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9131 t_private->m_qfull_retry_interval =
9132 drv_usectohz(value * 1000);
9133 rval = TRUE;
9134 break;
9135 default:
9136 rval = UNDEFINED;
9137 break;
9138 }
9139 mutex_exit(&mpt->m_mutex);
9140 return (rval);
9141 }
9142
9143 /*
9144 * Utility routine for mptsas_ifsetcap/ifgetcap
9145 */
9146 /*ARGSUSED*/
9147 static int
9148 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp)
9149 {
9150 NDBG24(("mptsas_scsi_capchk: cap=%s", cap));
9151
9152 if (!cap)
9153 return (FALSE);
9154
9155 *cidxp = scsi_hba_lookup_capstr(cap);
9156 return (TRUE);
9157 }
9158
9159 static int
9160 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
9161 {
9162 mptsas_slots_t *old_active = mpt->m_active;
9163 mptsas_slots_t *new_active;
9164 size_t size;
9165 int rval = -1, i;
9166
9167 /*
9168 * if there are active commands, then we cannot
9169 * change size of active slots array.
9170 */
9171 ASSERT(mpt->m_ncmds == 0);
9172
9173 size = MPTSAS_SLOTS_SIZE(mpt);
9174 new_active = kmem_zalloc(size, flag);
9175 if (new_active == NULL) {
9176 NDBG1(("new active alloc failed"));
9177 return (rval);
9178 }
9179 /*
9180 * Since SMID 0 is reserved and the TM slot is reserved, the
9181 * number of slots that can be used at any one time is
9182 * m_max_requests - 2.
9183 */
9184 new_active->m_n_slots = (mpt->m_max_requests - 2);
9185 new_active->m_size = size;
9186 new_active->m_tags = 1;
9187 if (old_active) {
9188 new_active->m_tgttbl = old_active->m_tgttbl;
9189 new_active->m_smptbl = old_active->m_smptbl;
9190 new_active->m_num_raid_configs =
9191 old_active->m_num_raid_configs;
9192 for (i = 0; i < new_active->m_num_raid_configs; i++) {
9193 new_active->m_raidconfig[i] =
9194 old_active->m_raidconfig[i];
9195 }
9196 mptsas_free_active_slots(mpt);
9197 }
9198 mpt->m_active = new_active;
9199 rval = 0;
9200
9201 return (rval);
9202 }
9203
9204 static void
9205 mptsas_free_active_slots(mptsas_t *mpt)
9206 {
9207 mptsas_slots_t *active = mpt->m_active;
9208 size_t size;
9209
9210 if (active == NULL)
9211 return;
9212 size = active->m_size;
9213 kmem_free(active, size);
9214 mpt->m_active = NULL;
9215 }
9216
9217 /*
9218 * Error logging, printing, and debug print routines.
9219 */
9220 static char *mptsas_label = "mpt_sas";
9221
9222 /*PRINTFLIKE3*/
9223 void
9224 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
9225 {
9226 dev_info_t *dev;
9227 va_list ap;
9228
9229 if (mpt) {
9230 dev = mpt->m_dip;
9231 } else {
9232 dev = 0;
9233 }
9234
9235 mutex_enter(&mptsas_log_mutex);
9236
9237 va_start(ap, fmt);
9238 (void) vsprintf(mptsas_log_buf, fmt, ap);
9239 va_end(ap);
9240
9241 if (level == CE_CONT) {
9242 scsi_log(dev, mptsas_label, level, "%s\n", mptsas_log_buf);
9243 } else {
9244 scsi_log(dev, mptsas_label, level, "%s", mptsas_log_buf);
9245 }
9246
9247 mutex_exit(&mptsas_log_mutex);
9248 }
9249
9250 #ifdef MPTSAS_DEBUG
9251 /*PRINTFLIKE1*/
9252 void
9253 mptsas_printf(char *fmt, ...)
9254 {
9255 dev_info_t *dev = 0;
9256 va_list ap;
9257
9258 mutex_enter(&mptsas_log_mutex);
9259
9260 va_start(ap, fmt);
9261 (void) vsprintf(mptsas_log_buf, fmt, ap);
9262 va_end(ap);
9263
9264 #ifdef PROM_PRINTF
9265 prom_printf("%s:\t%s\n", mptsas_label, mptsas_log_buf);
9266 #else
9267 scsi_log(dev, mptsas_label, SCSI_DEBUG, "%s\n", mptsas_log_buf);
9268 #endif
9269 mutex_exit(&mptsas_log_mutex);
9270 }
9271 #endif
9272
9273 /*
9274 * timeout handling
9275 */
9276 static void
9277 mptsas_watch(void *arg)
9278 {
9279 #ifndef __lock_lint
9280 _NOTE(ARGUNUSED(arg))
9281 #endif
9282
9283 mptsas_t *mpt;
9284 uint32_t doorbell;
9285
9286 NDBG30(("mptsas_watch"));
9287
9288 rw_enter(&mptsas_global_rwlock, RW_READER);
9289 for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
9290
9291 mutex_enter(&mpt->m_mutex);
9292
9293 /* Skip device if not powered on */
9294 if (mpt->m_options & MPTSAS_OPT_PM) {
9295 if (mpt->m_power_level == PM_LEVEL_D0) {
9296 (void) pm_busy_component(mpt->m_dip, 0);
9297 mpt->m_busy = 1;
9298 } else {
9299 mutex_exit(&mpt->m_mutex);
9300 continue;
9301 }
9302 }
9303
9304 /*
9305 * Check if controller is in a FAULT state. If so, reset it.
9306 */
9307 doorbell = ddi_get32(mpt->m_datap, &mpt->m_reg->Doorbell);
9308 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
9309 doorbell &= MPI2_DOORBELL_DATA_MASK;
9310 mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
9311 "code: %04x", doorbell);
9312 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
9313 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
9314 mptsas_log(mpt, CE_WARN, "Reset failed"
9315 "after fault was detected");
9316 }
9317 }
9318
9319 /*
9320 * For now, always call mptsas_watchsubr.
9321 */
9322 mptsas_watchsubr(mpt);
9323
9324 if (mpt->m_options & MPTSAS_OPT_PM) {
9325 mpt->m_busy = 0;
9326 (void) pm_idle_component(mpt->m_dip, 0);
9327 }
9328
9329 mutex_exit(&mpt->m_mutex);
9330 }
9331 rw_exit(&mptsas_global_rwlock);
9332
9333 mutex_enter(&mptsas_global_mutex);
9334 if (mptsas_timeouts_enabled)
9335 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
9336 mutex_exit(&mptsas_global_mutex);
9337 }
9338
9339 static void
9340 mptsas_watchsubr(mptsas_t *mpt)
9341 {
9342 int i;
9343 mptsas_cmd_t *cmd;
9344 mptsas_target_t *ptgt = NULL;
9345
9346 NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
9347
9348 #ifdef MPTSAS_TEST
9349 if (mptsas_enable_untagged) {
9350 mptsas_test_untagged++;
9351 }
9352 #endif
9353
9354 /*
9355 * Check for commands stuck in active slot
9356 * Account for TM requests, which use the last SMID.
9357 */
9358 for (i = 0; i <= mpt->m_active->m_n_slots; i++) {
9359 if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
9360 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
9361 cmd->cmd_active_timeout -=
9362 mptsas_scsi_watchdog_tick;
9363 if (cmd->cmd_active_timeout <= 0) {
9364 /*
9365 * There seems to be a command stuck
9366 * in the active slot. Drain throttle.
9367 */
9368 mptsas_set_throttle(mpt,
9369 cmd->cmd_tgt_addr,
9370 DRAIN_THROTTLE);
9371 }
9372 }
9373 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9374 (cmd->cmd_flags & CFLAG_CONFIG) ||
9375 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9376 cmd->cmd_active_timeout -=
9377 mptsas_scsi_watchdog_tick;
9378 if (cmd->cmd_active_timeout <= 0) {
9379 /*
9380 * passthrough command timeout
9381 */
9382 cmd->cmd_flags |= (CFLAG_FINISHED |
9383 CFLAG_TIMEOUT);
9384 cv_broadcast(&mpt->m_passthru_cv);
9385 cv_broadcast(&mpt->m_config_cv);
9386 cv_broadcast(&mpt->m_fw_diag_cv);
9387 }
9388 }
9389 }
9390 }
9391
9392 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9393 MPTSAS_HASH_FIRST);
9394 while (ptgt != NULL) {
9395 /*
9396 * If we were draining due to a qfull condition,
9397 * go back to full throttle.
9398 */
9399 if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
9400 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
9401 (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
9402 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9403 mptsas_restart_hba(mpt);
9404 }
9405
9406 if ((ptgt->m_t_ncmds > 0) &&
9407 (ptgt->m_timebase)) {
9408
9409 if (ptgt->m_timebase <=
9410 mptsas_scsi_watchdog_tick) {
9411 ptgt->m_timebase +=
9412 mptsas_scsi_watchdog_tick;
9413 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9414 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9415 continue;
9416 }
9417
9418 ptgt->m_timeout -= mptsas_scsi_watchdog_tick;
9419
9420 if (ptgt->m_timeout_count > 0) {
9421 ptgt->m_timeout_interval +=
9422 mptsas_scsi_watchdog_tick;
9423 }
9424 if (ptgt->m_timeout_interval >
9425 mptsas_timeout_interval) {
9426 ptgt->m_timeout_interval = 0;
9427 ptgt->m_timeout_count = 0;
9428 }
9429
9430 if (ptgt->m_timeout < 0) {
9431 ptgt->m_timeout_count++;
9432 if (ptgt->m_timeout_count >
9433 mptsas_timeout_threshold) {
9434 ptgt->m_timeout_count = 0;
9435 mptsas_kill_target(mpt, ptgt);
9436 } else {
9437 mptsas_cmd_timeout(mpt, ptgt->m_devhdl);
9438 }
9439 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9440 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9441 continue;
9442 }
9443
9444 if ((ptgt->m_timeout) <=
9445 mptsas_scsi_watchdog_tick) {
9446 NDBG23(("pending timeout"));
9447 mptsas_set_throttle(mpt, ptgt,
9448 DRAIN_THROTTLE);
9449 }
9450 }
9451
9452 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9453 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9454 }
9455 }
9456
9457 /*
9458 * timeout recovery
9459 */
9460 static void
9461 mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl)
9462 {
9463
9464 NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
9465 mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
9466 "Target %d", devhdl);
9467
9468 /*
9469 * If the current target is not the target passed in,
9470 * try to reset that target.
9471 */
9472 NDBG29(("mptsas_cmd_timeout: device reset"));
9473 if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
9474 mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
9475 "recovery failed!", devhdl);
9476 }
9477 }
9478
9479 /*
9480 * target causing too many timeouts
9481 */
9482 static void
9483 mptsas_kill_target(mptsas_t *mpt, mptsas_target_t *ptgt)
9484 {
9485 mptsas_topo_change_list_t *topo_node = NULL;
9486
9487 NDBG29(("mptsas_tgt_kill: target=%d", ptgt->m_devhdl));
9488 mptsas_log(mpt, CE_WARN, "timeout threshold exceeded for "
9489 "Target %d", ptgt->m_devhdl);
9490
9491 topo_node = kmem_zalloc(sizeof (mptsas_topo_change_list_t), KM_SLEEP);
9492 topo_node->mpt = mpt;
9493 topo_node->un.phymask = ptgt->m_phymask;
9494 topo_node->event = MPTSAS_DR_EVENT_OFFLINE_TARGET;
9495 topo_node->devhdl = ptgt->m_devhdl;
9496 if (ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
9497 topo_node->flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
9498 else
9499 topo_node->flags = MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
9500 topo_node->object = NULL;
9501
9502 /*
9503 * Launch DR taskq to fake topology change
9504 */
9505 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
9506 mptsas_handle_dr, (void *)topo_node,
9507 DDI_NOSLEEP)) != DDI_SUCCESS) {
9508 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
9509 "for fake offline event failed. \n");
9510 }
9511 }
9512
9513 /*
9514 * Device / Hotplug control
9515 */
9516 static int
9517 mptsas_scsi_quiesce(dev_info_t *dip)
9518 {
9519 mptsas_t *mpt;
9520 scsi_hba_tran_t *tran;
9521
9522 tran = ddi_get_driver_private(dip);
9523 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9524 return (-1);
9525
9526 return (mptsas_quiesce_bus(mpt));
9527 }
9528
9529 static int
9530 mptsas_scsi_unquiesce(dev_info_t *dip)
9531 {
9532 mptsas_t *mpt;
9533 scsi_hba_tran_t *tran;
9534
9535 tran = ddi_get_driver_private(dip);
9536 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9537 return (-1);
9538
9539 return (mptsas_unquiesce_bus(mpt));
9540 }
9541
9542 static int
9543 mptsas_quiesce_bus(mptsas_t *mpt)
9544 {
9545 mptsas_target_t *ptgt = NULL;
9546
9547 NDBG28(("mptsas_quiesce_bus"));
9548 mutex_enter(&mpt->m_mutex);
9549
9550 /* Set all the throttles to zero */
9551 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9552 MPTSAS_HASH_FIRST);
9553 while (ptgt != NULL) {
9554 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9555
9556 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9557 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9558 }
9559
9560 /* If there are any outstanding commands in the queue */
9561 if (mpt->m_ncmds) {
9562 mpt->m_softstate |= MPTSAS_SS_DRAINING;
9563 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9564 mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
9565 if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
9566 /*
9567 * Quiesce has been interrupted
9568 */
9569 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9570 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9571 &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
9572 while (ptgt != NULL) {
9573 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9574
9575 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9576 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9577 }
9578 mptsas_restart_hba(mpt);
9579 if (mpt->m_quiesce_timeid != 0) {
9580 timeout_id_t tid = mpt->m_quiesce_timeid;
9581 mpt->m_quiesce_timeid = 0;
9582 mutex_exit(&mpt->m_mutex);
9583 (void) untimeout(tid);
9584 return (-1);
9585 }
9586 mutex_exit(&mpt->m_mutex);
9587 return (-1);
9588 } else {
9589 /* Bus has been quiesced */
9590 ASSERT(mpt->m_quiesce_timeid == 0);
9591 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9592 mpt->m_softstate |= MPTSAS_SS_QUIESCED;
9593 mutex_exit(&mpt->m_mutex);
9594 return (0);
9595 }
9596 }
9597 /* Bus was not busy - QUIESCED */
9598 mutex_exit(&mpt->m_mutex);
9599
9600 return (0);
9601 }
9602
9603 static int
9604 mptsas_unquiesce_bus(mptsas_t *mpt)
9605 {
9606 mptsas_target_t *ptgt = NULL;
9607
9608 NDBG28(("mptsas_unquiesce_bus"));
9609 mutex_enter(&mpt->m_mutex);
9610 mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
9611 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9612 MPTSAS_HASH_FIRST);
9613 while (ptgt != NULL) {
9614 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9615
9616 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9617 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9618 }
9619 mptsas_restart_hba(mpt);
9620 mutex_exit(&mpt->m_mutex);
9621 return (0);
9622 }
9623
9624 static void
9625 mptsas_ncmds_checkdrain(void *arg)
9626 {
9627 mptsas_t *mpt = arg;
9628 mptsas_target_t *ptgt = NULL;
9629
9630 mutex_enter(&mpt->m_mutex);
9631 if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
9632 mpt->m_quiesce_timeid = 0;
9633 if (mpt->m_ncmds == 0) {
9634 /* Command queue has been drained */
9635 cv_signal(&mpt->m_cv);
9636 } else {
9637 /*
9638 * The throttle may have been reset because
9639 * of a SCSI bus reset
9640 */
9641 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9642 &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
9643 while (ptgt != NULL) {
9644 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9645
9646 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9647 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9648 }
9649
9650 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9651 mpt, (MPTSAS_QUIESCE_TIMEOUT *
9652 drv_usectohz(1000000)));
9653 }
9654 }
9655 mutex_exit(&mpt->m_mutex);
9656 }
9657
9658 /*ARGSUSED*/
9659 static void
9660 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
9661 {
9662 int i;
9663 uint8_t *cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
9664 char buf[128];
9665
9666 buf[0] = '\0';
9667 NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd,
9668 Tgt(cmd), Lun(cmd)));
9669 (void) sprintf(&buf[0], "\tcdb=[");
9670 for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
9671 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
9672 }
9673 (void) sprintf(&buf[strlen(buf)], " ]");
9674 NDBG25(("?%s\n", buf));
9675 NDBG25(("?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
9676 cmd->cmd_pkt->pkt_flags, cmd->cmd_pkt->pkt_statistics,
9677 cmd->cmd_pkt->pkt_state));
9678 NDBG25(("?pkt_scbp=0x%x cmd_flags=0x%x\n", cmd->cmd_pkt->pkt_scbp ?
9679 *(cmd->cmd_pkt->pkt_scbp) : 0, cmd->cmd_flags));
9680 }
9681
9682 static void
9683 mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd)
9684 {
9685 caddr_t memp;
9686 pMPI2RequestHeader_t request_hdrp;
9687 struct scsi_pkt *pkt = cmd->cmd_pkt;
9688 mptsas_pt_request_t *pt = pkt->pkt_ha_private;
9689 uint32_t request_size, data_size, dataout_size;
9690 uint32_t direction;
9691 ddi_dma_cookie_t data_cookie;
9692 ddi_dma_cookie_t dataout_cookie;
9693 uint32_t request_desc_low, request_desc_high = 0;
9694 uint32_t i, sense_bufp;
9695 uint8_t desc_type;
9696 uint8_t *request, function;
9697 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
9698 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
9699
9700 desc_type = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
9701
9702 request = pt->request;
9703 direction = pt->direction;
9704 request_size = pt->request_size;
9705 data_size = pt->data_size;
9706 dataout_size = pt->dataout_size;
9707 data_cookie = pt->data_cookie;
9708 dataout_cookie = pt->dataout_cookie;
9709
9710 /*
9711 * Store the passthrough message in memory location
9712 * corresponding to our slot number
9713 */
9714 memp = mpt->m_req_frame + (mpt->m_req_frame_size * cmd->cmd_slot);
9715 request_hdrp = (pMPI2RequestHeader_t)memp;
9716 bzero(memp, mpt->m_req_frame_size);
9717
9718 for (i = 0; i < request_size; i++) {
9719 bcopy(request + i, memp + i, 1);
9720 }
9721
9722 if (data_size || dataout_size) {
9723 pMpi2SGESimple64_t sgep;
9724 uint32_t sge_flags;
9725
9726 sgep = (pMpi2SGESimple64_t)((uint8_t *)request_hdrp +
9727 request_size);
9728 if (dataout_size) {
9729
9730 sge_flags = dataout_size |
9731 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
9732 MPI2_SGE_FLAGS_END_OF_BUFFER |
9733 MPI2_SGE_FLAGS_HOST_TO_IOC |
9734 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
9735 MPI2_SGE_FLAGS_SHIFT);
9736 ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
9737 ddi_put32(acc_hdl, &sgep->Address.Low,
9738 (uint32_t)(dataout_cookie.dmac_laddress &
9739 0xffffffffull));
9740 ddi_put32(acc_hdl, &sgep->Address.High,
9741 (uint32_t)(dataout_cookie.dmac_laddress
9742 >> 32));
9743 sgep++;
9744 }
9745 sge_flags = data_size;
9746 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
9747 MPI2_SGE_FLAGS_LAST_ELEMENT |
9748 MPI2_SGE_FLAGS_END_OF_BUFFER |
9749 MPI2_SGE_FLAGS_END_OF_LIST |
9750 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
9751 MPI2_SGE_FLAGS_SHIFT);
9752 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
9753 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
9754 MPI2_SGE_FLAGS_SHIFT);
9755 } else {
9756 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
9757 MPI2_SGE_FLAGS_SHIFT);
9758 }
9759 ddi_put32(acc_hdl, &sgep->FlagsLength,
9760 sge_flags);
9761 ddi_put32(acc_hdl, &sgep->Address.Low,
9762 (uint32_t)(data_cookie.dmac_laddress &
9763 0xffffffffull));
9764 ddi_put32(acc_hdl, &sgep->Address.High,
9765 (uint32_t)(data_cookie.dmac_laddress >> 32));
9766 }
9767
9768 function = request_hdrp->Function;
9769 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
9770 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
9771 pMpi2SCSIIORequest_t scsi_io_req;
9772
9773 scsi_io_req = (pMpi2SCSIIORequest_t)request_hdrp;
9774 /*
9775 * Put SGE for data and data_out buffer at the end of
9776 * scsi_io_request message header.(64 bytes in total)
9777 * Following above SGEs, the residual space will be
9778 * used by sense data.
9779 */
9780 ddi_put8(acc_hdl,
9781 &scsi_io_req->SenseBufferLength,
9782 (uint8_t)(request_size - 64));
9783
9784 sense_bufp = mpt->m_req_frame_dma_addr +
9785 (mpt->m_req_frame_size * cmd->cmd_slot);
9786 sense_bufp += 64;
9787 ddi_put32(acc_hdl,
9788 &scsi_io_req->SenseBufferLowAddress, sense_bufp);
9789
9790 /*
9791 * Set SGLOffset0 value
9792 */
9793 ddi_put8(acc_hdl, &scsi_io_req->SGLOffset0,
9794 offsetof(MPI2_SCSI_IO_REQUEST, SGL) / 4);
9795
9796 /*
9797 * Setup descriptor info. RAID passthrough must use the
9798 * default request descriptor which is already set, so if this
9799 * is a SCSI IO request, change the descriptor to SCSI IO.
9800 */
9801 if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
9802 desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
9803 request_desc_high = (ddi_get16(acc_hdl,
9804 &scsi_io_req->DevHandle) << 16);
9805 }
9806 }
9807
9808 /*
9809 * We must wait till the message has been completed before
9810 * beginning the next message so we wait for this one to
9811 * finish.
9812 */
9813 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
9814 request_desc_low = (cmd->cmd_slot << 16) + desc_type;
9815 cmd->cmd_rfm = NULL;
9816 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
9817 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
9818 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
9819 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
9820 }
9821 }
9822
9823
9824
9825 static int
9826 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
9827 uint8_t *data, uint32_t request_size, uint32_t reply_size,
9828 uint32_t data_size, uint32_t direction, uint8_t *dataout,
9829 uint32_t dataout_size, short timeout, int mode)
9830 {
9831 mptsas_pt_request_t pt;
9832 mptsas_dma_alloc_state_t data_dma_state;
9833 mptsas_dma_alloc_state_t dataout_dma_state;
9834 caddr_t memp;
9835 mptsas_cmd_t *cmd = NULL;
9836 struct scsi_pkt *pkt;
9837 uint32_t reply_len = 0, sense_len = 0;
9838 pMPI2RequestHeader_t request_hdrp;
9839 pMPI2RequestHeader_t request_msg;
9840 pMPI2DefaultReply_t reply_msg;
9841 Mpi2SCSIIOReply_t rep_msg;
9842 int i, status = 0, pt_flags = 0, rv = 0;
9843 int rvalue;
9844 uint8_t function;
9845
9846 ASSERT(mutex_owned(&mpt->m_mutex));
9847
9848 reply_msg = (pMPI2DefaultReply_t)(&rep_msg);
9849 bzero(reply_msg, sizeof (MPI2_DEFAULT_REPLY));
9850 request_msg = kmem_zalloc(request_size, KM_SLEEP);
9851
9852 mutex_exit(&mpt->m_mutex);
9853 /*
9854 * copy in the request buffer since it could be used by
9855 * another thread when the pt request into waitq
9856 */
9857 if (ddi_copyin(request, request_msg, request_size, mode)) {
9858 mutex_enter(&mpt->m_mutex);
9859 status = EFAULT;
9860 mptsas_log(mpt, CE_WARN, "failed to copy request data");
9861 goto out;
9862 }
9863 mutex_enter(&mpt->m_mutex);
9864
9865 function = request_msg->Function;
9866 if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
9867 pMpi2SCSITaskManagementRequest_t task;
9868 task = (pMpi2SCSITaskManagementRequest_t)request_msg;
9869 mptsas_setup_bus_reset_delay(mpt);
9870 rv = mptsas_ioc_task_management(mpt, task->TaskType,
9871 task->DevHandle, (int)task->LUN[1], reply, reply_size,
9872 mode);
9873
9874 if (rv != TRUE) {
9875 status = EIO;
9876 mptsas_log(mpt, CE_WARN, "task management failed");
9877 }
9878 goto out;
9879 }
9880
9881 if (data_size != 0) {
9882 data_dma_state.size = data_size;
9883 if (mptsas_dma_alloc(mpt, &data_dma_state) != DDI_SUCCESS) {
9884 status = ENOMEM;
9885 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
9886 "resource");
9887 goto out;
9888 }
9889 pt_flags |= MPTSAS_DATA_ALLOCATED;
9890 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
9891 mutex_exit(&mpt->m_mutex);
9892 for (i = 0; i < data_size; i++) {
9893 if (ddi_copyin(data + i, (uint8_t *)
9894 data_dma_state.memp + i, 1, mode)) {
9895 mutex_enter(&mpt->m_mutex);
9896 status = EFAULT;
9897 mptsas_log(mpt, CE_WARN, "failed to "
9898 "copy read data");
9899 goto out;
9900 }
9901 }
9902 mutex_enter(&mpt->m_mutex);
9903 }
9904 }
9905
9906 if (dataout_size != 0) {
9907 dataout_dma_state.size = dataout_size;
9908 if (mptsas_dma_alloc(mpt, &dataout_dma_state) != DDI_SUCCESS) {
9909 status = ENOMEM;
9910 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
9911 "resource");
9912 goto out;
9913 }
9914 pt_flags |= MPTSAS_DATAOUT_ALLOCATED;
9915 mutex_exit(&mpt->m_mutex);
9916 for (i = 0; i < dataout_size; i++) {
9917 if (ddi_copyin(dataout + i, (uint8_t *)
9918 dataout_dma_state.memp + i, 1, mode)) {
9919 mutex_enter(&mpt->m_mutex);
9920 mptsas_log(mpt, CE_WARN, "failed to copy out"
9921 " data");
9922 status = EFAULT;
9923 goto out;
9924 }
9925 }
9926 mutex_enter(&mpt->m_mutex);
9927 }
9928
9929 if ((rvalue = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
9930 status = EAGAIN;
9931 mptsas_log(mpt, CE_NOTE, "event ack command pool is full");
9932 goto out;
9933 }
9934 pt_flags |= MPTSAS_REQUEST_POOL_CMD;
9935
9936 bzero((caddr_t)cmd, sizeof (*cmd));
9937 bzero((caddr_t)pkt, scsi_pkt_size());
9938 bzero((caddr_t)&pt, sizeof (pt));
9939
9940 cmd->ioc_cmd_slot = (uint32_t)(rvalue);
9941
9942 pt.request = (uint8_t *)request_msg;
9943 pt.direction = direction;
9944 pt.request_size = request_size;
9945 pt.data_size = data_size;
9946 pt.dataout_size = dataout_size;
9947 pt.data_cookie = data_dma_state.cookie;
9948 pt.dataout_cookie = dataout_dma_state.cookie;
9949
9950 /*
9951 * Form a blank cmd/pkt to store the acknowledgement message
9952 */
9953 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb[0];
9954 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
9955 pkt->pkt_ha_private = (opaque_t)&pt;
9956 pkt->pkt_flags = FLAG_HEAD;
9957 pkt->pkt_time = timeout;
9958 cmd->cmd_pkt = pkt;
9959 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_PASSTHRU;
9960
9961 /*
9962 * Save the command in a slot
9963 */
9964 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
9965 /*
9966 * Once passthru command get slot, set cmd_flags
9967 * CFLAG_PREPARED.
9968 */
9969 cmd->cmd_flags |= CFLAG_PREPARED;
9970 mptsas_start_passthru(mpt, cmd);
9971 } else {
9972 mptsas_waitq_add(mpt, cmd);
9973 }
9974
9975 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
9976 cv_wait(&mpt->m_passthru_cv, &mpt->m_mutex);
9977 }
9978
9979 if (cmd->cmd_flags & CFLAG_PREPARED) {
9980 memp = mpt->m_req_frame + (mpt->m_req_frame_size *
9981 cmd->cmd_slot);
9982 request_hdrp = (pMPI2RequestHeader_t)memp;
9983 }
9984
9985 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
9986 status = ETIMEDOUT;
9987 mptsas_log(mpt, CE_WARN, "passthrough command timeout");
9988 pt_flags |= MPTSAS_CMD_TIMEOUT;
9989 goto out;
9990 }
9991
9992 if (cmd->cmd_rfm) {
9993 /*
9994 * cmd_rfm is zero means the command reply is a CONTEXT
9995 * reply and no PCI Write to post the free reply SMFA
9996 * because no reply message frame is used.
9997 * cmd_rfm is non-zero means the reply is a ADDRESS
9998 * reply and reply message frame is used.
9999 */
10000 pt_flags |= MPTSAS_ADDRESS_REPLY;
10001 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10002 DDI_DMA_SYNC_FORCPU);
10003 reply_msg = (pMPI2DefaultReply_t)
10004 (mpt->m_reply_frame + (cmd->cmd_rfm -
10005 mpt->m_reply_frame_dma_addr));
10006 }
10007
10008 mptsas_fma_check(mpt, cmd);
10009 if (pkt->pkt_reason == CMD_TRAN_ERR) {
10010 status = EAGAIN;
10011 mptsas_log(mpt, CE_WARN, "passthru fma error");
10012 goto out;
10013 }
10014 if (pkt->pkt_reason == CMD_RESET) {
10015 status = EAGAIN;
10016 mptsas_log(mpt, CE_WARN, "ioc reset abort passthru");
10017 goto out;
10018 }
10019
10020 if (pkt->pkt_reason == CMD_INCOMPLETE) {
10021 status = EIO;
10022 mptsas_log(mpt, CE_WARN, "passthrough command incomplete");
10023 goto out;
10024 }
10025
10026 mutex_exit(&mpt->m_mutex);
10027 if (cmd->cmd_flags & CFLAG_PREPARED) {
10028 function = request_hdrp->Function;
10029 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10030 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10031 reply_len = sizeof (MPI2_SCSI_IO_REPLY);
10032 sense_len = reply_size - reply_len;
10033 } else {
10034 reply_len = reply_size;
10035 sense_len = 0;
10036 }
10037
10038 for (i = 0; i < reply_len; i++) {
10039 if (ddi_copyout((uint8_t *)reply_msg + i, reply + i, 1,
10040 mode)) {
10041 mutex_enter(&mpt->m_mutex);
10042 status = EFAULT;
10043 mptsas_log(mpt, CE_WARN, "failed to copy out "
10044 "reply data");
10045 goto out;
10046 }
10047 }
10048 for (i = 0; i < sense_len; i++) {
10049 if (ddi_copyout((uint8_t *)request_hdrp + 64 + i,
10050 reply + reply_len + i, 1, mode)) {
10051 mutex_enter(&mpt->m_mutex);
10052 status = EFAULT;
10053 mptsas_log(mpt, CE_WARN, "failed to copy out "
10054 "sense data");
10055 goto out;
10056 }
10057 }
10058 }
10059
10060 if (data_size) {
10061 if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10062 (void) ddi_dma_sync(data_dma_state.handle, 0, 0,
10063 DDI_DMA_SYNC_FORCPU);
10064 for (i = 0; i < data_size; i++) {
10065 if (ddi_copyout((uint8_t *)(
10066 data_dma_state.memp + i), data + i, 1,
10067 mode)) {
10068 mutex_enter(&mpt->m_mutex);
10069 status = EFAULT;
10070 mptsas_log(mpt, CE_WARN, "failed to "
10071 "copy out the reply data");
10072 goto out;
10073 }
10074 }
10075 }
10076 }
10077 mutex_enter(&mpt->m_mutex);
10078 out:
10079 /*
10080 * Put the reply frame back on the free queue, increment the free
10081 * index, and write the new index to the free index register. But only
10082 * if this reply is an ADDRESS reply.
10083 */
10084 if (pt_flags & MPTSAS_ADDRESS_REPLY) {
10085 ddi_put32(mpt->m_acc_free_queue_hdl,
10086 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10087 cmd->cmd_rfm);
10088 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10089 DDI_DMA_SYNC_FORDEV);
10090 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10091 mpt->m_free_index = 0;
10092 }
10093 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10094 mpt->m_free_index);
10095 }
10096 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10097 mptsas_remove_cmd(mpt, cmd);
10098 pt_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10099 }
10100 if (pt_flags & MPTSAS_REQUEST_POOL_CMD)
10101 mptsas_return_to_pool(mpt, cmd);
10102 if (pt_flags & MPTSAS_DATA_ALLOCATED) {
10103 if (mptsas_check_dma_handle(data_dma_state.handle) !=
10104 DDI_SUCCESS) {
10105 ddi_fm_service_impact(mpt->m_dip,
10106 DDI_SERVICE_UNAFFECTED);
10107 status = EFAULT;
10108 }
10109 mptsas_dma_free(&data_dma_state);
10110 }
10111 if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
10112 if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
10113 DDI_SUCCESS) {
10114 ddi_fm_service_impact(mpt->m_dip,
10115 DDI_SERVICE_UNAFFECTED);
10116 status = EFAULT;
10117 }
10118 mptsas_dma_free(&dataout_dma_state);
10119 }
10120 if (pt_flags & MPTSAS_CMD_TIMEOUT) {
10121 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
10122 mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
10123 }
10124 }
10125 if (request_msg)
10126 kmem_free(request_msg, request_size);
10127
10128 return (status);
10129 }
10130
10131 static int
10132 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
10133 {
10134 /*
10135 * If timeout is 0, set timeout to default of 60 seconds.
10136 */
10137 if (data->Timeout == 0) {
10138 data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
10139 }
10140
10141 if (((data->DataSize == 0) &&
10142 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_NONE)) ||
10143 ((data->DataSize != 0) &&
10144 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_READ) ||
10145 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_WRITE) ||
10146 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) &&
10147 (data->DataOutSize != 0))))) {
10148 if (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) {
10149 data->DataDirection = MPTSAS_PASS_THRU_DIRECTION_READ;
10150 } else {
10151 data->DataOutSize = 0;
10152 }
10153 /*
10154 * Send passthru request messages
10155 */
10156 return (mptsas_do_passthru(mpt,
10157 (uint8_t *)((uintptr_t)data->PtrRequest),
10158 (uint8_t *)((uintptr_t)data->PtrReply),
10159 (uint8_t *)((uintptr_t)data->PtrData),
10160 data->RequestSize, data->ReplySize,
10161 data->DataSize, data->DataDirection,
10162 (uint8_t *)((uintptr_t)data->PtrDataOut),
10163 data->DataOutSize, data->Timeout, mode));
10164 } else {
10165 return (EINVAL);
10166 }
10167 }
10168
10169 static uint8_t
10170 mptsas_get_fw_diag_buffer_number(mptsas_t *mpt, uint32_t unique_id)
10171 {
10172 uint8_t index;
10173
10174 for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
10175 if (mpt->m_fw_diag_buffer_list[index].unique_id == unique_id) {
10176 return (index);
10177 }
10178 }
10179
10180 return (MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND);
10181 }
10182
10183 static void
10184 mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd)
10185 {
10186 pMpi2DiagBufferPostRequest_t pDiag_post_msg;
10187 pMpi2DiagReleaseRequest_t pDiag_release_msg;
10188 struct scsi_pkt *pkt = cmd->cmd_pkt;
10189 mptsas_diag_request_t *diag = pkt->pkt_ha_private;
10190 uint32_t request_desc_low, i;
10191
10192 ASSERT(mutex_owned(&mpt->m_mutex));
10193
10194 /*
10195 * Form the diag message depending on the post or release function.
10196 */
10197 if (diag->function == MPI2_FUNCTION_DIAG_BUFFER_POST) {
10198 pDiag_post_msg = (pMpi2DiagBufferPostRequest_t)
10199 (mpt->m_req_frame + (mpt->m_req_frame_size *
10200 cmd->cmd_slot));
10201 bzero(pDiag_post_msg, mpt->m_req_frame_size);
10202 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->Function,
10203 diag->function);
10204 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->BufferType,
10205 diag->pBuffer->buffer_type);
10206 ddi_put8(mpt->m_acc_req_frame_hdl,
10207 &pDiag_post_msg->ExtendedType,
10208 diag->pBuffer->extended_type);
10209 ddi_put32(mpt->m_acc_req_frame_hdl,
10210 &pDiag_post_msg->BufferLength,
10211 diag->pBuffer->buffer_data.size);
10212 for (i = 0; i < (sizeof (pDiag_post_msg->ProductSpecific) / 4);
10213 i++) {
10214 ddi_put32(mpt->m_acc_req_frame_hdl,
10215 &pDiag_post_msg->ProductSpecific[i],
10216 diag->pBuffer->product_specific[i]);
10217 }
10218 ddi_put32(mpt->m_acc_req_frame_hdl,
10219 &pDiag_post_msg->BufferAddress.Low,
10220 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
10221 & 0xffffffffull));
10222 ddi_put32(mpt->m_acc_req_frame_hdl,
10223 &pDiag_post_msg->BufferAddress.High,
10224 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
10225 >> 32));
10226 } else {
10227 pDiag_release_msg = (pMpi2DiagReleaseRequest_t)
10228 (mpt->m_req_frame + (mpt->m_req_frame_size *
10229 cmd->cmd_slot));
10230 bzero(pDiag_release_msg, mpt->m_req_frame_size);
10231 ddi_put8(mpt->m_acc_req_frame_hdl,
10232 &pDiag_release_msg->Function, diag->function);
10233 ddi_put8(mpt->m_acc_req_frame_hdl,
10234 &pDiag_release_msg->BufferType,
10235 diag->pBuffer->buffer_type);
10236 }
10237
10238 /*
10239 * Send the message
10240 */
10241 (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
10242 DDI_DMA_SYNC_FORDEV);
10243 request_desc_low = (cmd->cmd_slot << 16) +
10244 MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10245 cmd->cmd_rfm = NULL;
10246 MPTSAS_START_CMD(mpt, request_desc_low, 0);
10247 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
10248 DDI_SUCCESS) ||
10249 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
10250 DDI_SUCCESS)) {
10251 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10252 }
10253 }
10254
10255 static int
10256 mptsas_post_fw_diag_buffer(mptsas_t *mpt,
10257 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
10258 {
10259 mptsas_diag_request_t diag;
10260 int status, slot_num, post_flags = 0;
10261 mptsas_cmd_t *cmd = NULL;
10262 struct scsi_pkt *pkt;
10263 pMpi2DiagBufferPostReply_t reply;
10264 uint16_t iocstatus;
10265 uint32_t iocloginfo, transfer_length;
10266
10267 /*
10268 * If buffer is not enabled, just leave.
10269 */
10270 *return_code = MPTSAS_FW_DIAG_ERROR_POST_FAILED;
10271 if (!pBuffer->enabled) {
10272 status = DDI_FAILURE;
10273 goto out;
10274 }
10275
10276 /*
10277 * Clear some flags initially.
10278 */
10279 pBuffer->force_release = FALSE;
10280 pBuffer->valid_data = FALSE;
10281 pBuffer->owned_by_firmware = FALSE;
10282
10283 /*
10284 * Get a cmd buffer from the cmd buffer pool
10285 */
10286 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10287 status = DDI_FAILURE;
10288 mptsas_log(mpt, CE_NOTE, "command pool is full: Post FW Diag");
10289 goto out;
10290 }
10291 post_flags |= MPTSAS_REQUEST_POOL_CMD;
10292
10293 bzero((caddr_t)cmd, sizeof (*cmd));
10294 bzero((caddr_t)pkt, scsi_pkt_size());
10295
10296 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
10297
10298 diag.pBuffer = pBuffer;
10299 diag.function = MPI2_FUNCTION_DIAG_BUFFER_POST;
10300
10301 /*
10302 * Form a blank cmd/pkt to store the acknowledgement message
10303 */
10304 pkt->pkt_ha_private = (opaque_t)&diag;
10305 pkt->pkt_flags = FLAG_HEAD;
10306 pkt->pkt_time = 60;
10307 cmd->cmd_pkt = pkt;
10308 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
10309
10310 /*
10311 * Save the command in a slot
10312 */
10313 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10314 /*
10315 * Once passthru command get slot, set cmd_flags
10316 * CFLAG_PREPARED.
10317 */
10318 cmd->cmd_flags |= CFLAG_PREPARED;
10319 mptsas_start_diag(mpt, cmd);
10320 } else {
10321 mptsas_waitq_add(mpt, cmd);
10322 }
10323
10324 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10325 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
10326 }
10327
10328 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10329 status = DDI_FAILURE;
10330 mptsas_log(mpt, CE_WARN, "Post FW Diag command timeout");
10331 goto out;
10332 }
10333
10334 /*
10335 * cmd_rfm points to the reply message if a reply was given. Check the
10336 * IOCStatus to make sure everything went OK with the FW diag request
10337 * and set buffer flags.
10338 */
10339 if (cmd->cmd_rfm) {
10340 post_flags |= MPTSAS_ADDRESS_REPLY;
10341 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10342 DDI_DMA_SYNC_FORCPU);
10343 reply = (pMpi2DiagBufferPostReply_t)(mpt->m_reply_frame +
10344 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
10345
10346 /*
10347 * Get the reply message data
10348 */
10349 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
10350 &reply->IOCStatus);
10351 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
10352 &reply->IOCLogInfo);
10353 transfer_length = ddi_get32(mpt->m_acc_reply_frame_hdl,
10354 &reply->TransferLength);
10355
10356 /*
10357 * If post failed quit.
10358 */
10359 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
10360 status = DDI_FAILURE;
10361 NDBG13(("post FW Diag Buffer failed: IOCStatus=0x%x, "
10362 "IOCLogInfo=0x%x, TransferLength=0x%x", iocstatus,
10363 iocloginfo, transfer_length));
10364 goto out;
10365 }
10366
10367 /*
10368 * Post was successful.
10369 */
10370 pBuffer->valid_data = TRUE;
10371 pBuffer->owned_by_firmware = TRUE;
10372 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10373 status = DDI_SUCCESS;
10374 }
10375
10376 out:
10377 /*
10378 * Put the reply frame back on the free queue, increment the free
10379 * index, and write the new index to the free index register. But only
10380 * if this reply is an ADDRESS reply.
10381 */
10382 if (post_flags & MPTSAS_ADDRESS_REPLY) {
10383 ddi_put32(mpt->m_acc_free_queue_hdl,
10384 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10385 cmd->cmd_rfm);
10386 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10387 DDI_DMA_SYNC_FORDEV);
10388 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10389 mpt->m_free_index = 0;
10390 }
10391 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10392 mpt->m_free_index);
10393 }
10394 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10395 mptsas_remove_cmd(mpt, cmd);
10396 post_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10397 }
10398 if (post_flags & MPTSAS_REQUEST_POOL_CMD) {
10399 mptsas_return_to_pool(mpt, cmd);
10400 }
10401
10402 return (status);
10403 }
10404
10405 static int
10406 mptsas_release_fw_diag_buffer(mptsas_t *mpt,
10407 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
10408 uint32_t diag_type)
10409 {
10410 mptsas_diag_request_t diag;
10411 int status, slot_num, rel_flags = 0;
10412 mptsas_cmd_t *cmd = NULL;
10413 struct scsi_pkt *pkt;
10414 pMpi2DiagReleaseReply_t reply;
10415 uint16_t iocstatus;
10416 uint32_t iocloginfo;
10417
10418 /*
10419 * If buffer is not enabled, just leave.
10420 */
10421 *return_code = MPTSAS_FW_DIAG_ERROR_RELEASE_FAILED;
10422 if (!pBuffer->enabled) {
10423 mptsas_log(mpt, CE_NOTE, "This buffer type is not supported "
10424 "by the IOC");
10425 status = DDI_FAILURE;
10426 goto out;
10427 }
10428
10429 /*
10430 * Clear some flags initially.
10431 */
10432 pBuffer->force_release = FALSE;
10433 pBuffer->valid_data = FALSE;
10434 pBuffer->owned_by_firmware = FALSE;
10435
10436 /*
10437 * Get a cmd buffer from the cmd buffer pool
10438 */
10439 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10440 status = DDI_FAILURE;
10441 mptsas_log(mpt, CE_NOTE, "command pool is full: Release FW "
10442 "Diag");
10443 goto out;
10444 }
10445 rel_flags |= MPTSAS_REQUEST_POOL_CMD;
10446
10447 bzero((caddr_t)cmd, sizeof (*cmd));
10448 bzero((caddr_t)pkt, scsi_pkt_size());
10449
10450 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
10451
10452 diag.pBuffer = pBuffer;
10453 diag.function = MPI2_FUNCTION_DIAG_RELEASE;
10454
10455 /*
10456 * Form a blank cmd/pkt to store the acknowledgement message
10457 */
10458 pkt->pkt_ha_private = (opaque_t)&diag;
10459 pkt->pkt_flags = FLAG_HEAD;
10460 pkt->pkt_time = 60;
10461 cmd->cmd_pkt = pkt;
10462 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
10463
10464 /*
10465 * Save the command in a slot
10466 */
10467 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10468 /*
10469 * Once passthru command get slot, set cmd_flags
10470 * CFLAG_PREPARED.
10471 */
10472 cmd->cmd_flags |= CFLAG_PREPARED;
10473 mptsas_start_diag(mpt, cmd);
10474 } else {
10475 mptsas_waitq_add(mpt, cmd);
10476 }
10477
10478 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10479 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
10480 }
10481
10482 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10483 status = DDI_FAILURE;
10484 mptsas_log(mpt, CE_WARN, "Release FW Diag command timeout");
10485 goto out;
10486 }
10487
10488 /*
10489 * cmd_rfm points to the reply message if a reply was given. Check the
10490 * IOCStatus to make sure everything went OK with the FW diag request
10491 * and set buffer flags.
10492 */
10493 if (cmd->cmd_rfm) {
10494 rel_flags |= MPTSAS_ADDRESS_REPLY;
10495 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10496 DDI_DMA_SYNC_FORCPU);
10497 reply = (pMpi2DiagReleaseReply_t)(mpt->m_reply_frame +
10498 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
10499
10500 /*
10501 * Get the reply message data
10502 */
10503 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
10504 &reply->IOCStatus);
10505 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
10506 &reply->IOCLogInfo);
10507
10508 /*
10509 * If release failed quit.
10510 */
10511 if ((iocstatus != MPI2_IOCSTATUS_SUCCESS) ||
10512 pBuffer->owned_by_firmware) {
10513 status = DDI_FAILURE;
10514 NDBG13(("release FW Diag Buffer failed: "
10515 "IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus,
10516 iocloginfo));
10517 goto out;
10518 }
10519
10520 /*
10521 * Release was successful.
10522 */
10523 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10524 status = DDI_SUCCESS;
10525
10526 /*
10527 * If this was for an UNREGISTER diag type command, clear the
10528 * unique ID.
10529 */
10530 if (diag_type == MPTSAS_FW_DIAG_TYPE_UNREGISTER) {
10531 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
10532 }
10533 }
10534
10535 out:
10536 /*
10537 * Put the reply frame back on the free queue, increment the free
10538 * index, and write the new index to the free index register. But only
10539 * if this reply is an ADDRESS reply.
10540 */
10541 if (rel_flags & MPTSAS_ADDRESS_REPLY) {
10542 ddi_put32(mpt->m_acc_free_queue_hdl,
10543 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10544 cmd->cmd_rfm);
10545 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10546 DDI_DMA_SYNC_FORDEV);
10547 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10548 mpt->m_free_index = 0;
10549 }
10550 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10551 mpt->m_free_index);
10552 }
10553 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10554 mptsas_remove_cmd(mpt, cmd);
10555 rel_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10556 }
10557 if (rel_flags & MPTSAS_REQUEST_POOL_CMD) {
10558 mptsas_return_to_pool(mpt, cmd);
10559 }
10560
10561 return (status);
10562 }
10563
10564 static int
10565 mptsas_diag_register(mptsas_t *mpt, mptsas_fw_diag_register_t *diag_register,
10566 uint32_t *return_code)
10567 {
10568 mptsas_fw_diagnostic_buffer_t *pBuffer;
10569 uint8_t extended_type, buffer_type, i;
10570 uint32_t buffer_size;
10571 uint32_t unique_id;
10572 int status;
10573
10574 ASSERT(mutex_owned(&mpt->m_mutex));
10575
10576 extended_type = diag_register->ExtendedType;
10577 buffer_type = diag_register->BufferType;
10578 buffer_size = diag_register->RequestedBufferSize;
10579 unique_id = diag_register->UniqueId;
10580
10581 /*
10582 * Check for valid buffer type
10583 */
10584 if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
10585 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10586 return (DDI_FAILURE);
10587 }
10588
10589 /*
10590 * Get the current buffer and look up the unique ID. The unique ID
10591 * should not be found. If it is, the ID is already in use.
10592 */
10593 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10594 pBuffer = &mpt->m_fw_diag_buffer_list[buffer_type];
10595 if (i != MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10596 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10597 return (DDI_FAILURE);
10598 }
10599
10600 /*
10601 * The buffer's unique ID should not be registered yet, and the given
10602 * unique ID cannot be 0.
10603 */
10604 if ((pBuffer->unique_id != MPTSAS_FW_DIAG_INVALID_UID) ||
10605 (unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
10606 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10607 return (DDI_FAILURE);
10608 }
10609
10610 /*
10611 * If this buffer is already posted as immediate, just change owner.
10612 */
10613 if (pBuffer->immediate && pBuffer->owned_by_firmware &&
10614 (pBuffer->unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
10615 pBuffer->immediate = FALSE;
10616 pBuffer->unique_id = unique_id;
10617 return (DDI_SUCCESS);
10618 }
10619
10620 /*
10621 * Post a new buffer after checking if it's enabled. The DMA buffer
10622 * that is allocated will be contiguous (sgl_len = 1).
10623 */
10624 if (!pBuffer->enabled) {
10625 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
10626 return (DDI_FAILURE);
10627 }
10628 bzero(&pBuffer->buffer_data, sizeof (mptsas_dma_alloc_state_t));
10629 pBuffer->buffer_data.size = buffer_size;
10630 if (mptsas_dma_alloc(mpt, &pBuffer->buffer_data) != DDI_SUCCESS) {
10631 mptsas_log(mpt, CE_WARN, "failed to alloc DMA resource for "
10632 "diag buffer: size = %d bytes", buffer_size);
10633 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
10634 return (DDI_FAILURE);
10635 }
10636
10637 /*
10638 * Copy the given info to the diag buffer and post the buffer.
10639 */
10640 pBuffer->buffer_type = buffer_type;
10641 pBuffer->immediate = FALSE;
10642 if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
10643 for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
10644 i++) {
10645 pBuffer->product_specific[i] =
10646 diag_register->ProductSpecific[i];
10647 }
10648 }
10649 pBuffer->extended_type = extended_type;
10650 pBuffer->unique_id = unique_id;
10651 status = mptsas_post_fw_diag_buffer(mpt, pBuffer, return_code);
10652
10653 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
10654 DDI_SUCCESS) {
10655 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed in "
10656 "mptsas_diag_register.");
10657 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10658 status = DDI_FAILURE;
10659 }
10660
10661 /*
10662 * In case there was a failure, free the DMA buffer.
10663 */
10664 if (status == DDI_FAILURE) {
10665 mptsas_dma_free(&pBuffer->buffer_data);
10666 }
10667
10668 return (status);
10669 }
10670
10671 static int
10672 mptsas_diag_unregister(mptsas_t *mpt,
10673 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
10674 {
10675 mptsas_fw_diagnostic_buffer_t *pBuffer;
10676 uint8_t i;
10677 uint32_t unique_id;
10678 int status;
10679
10680 ASSERT(mutex_owned(&mpt->m_mutex));
10681
10682 unique_id = diag_unregister->UniqueId;
10683
10684 /*
10685 * Get the current buffer and look up the unique ID. The unique ID
10686 * should be there.
10687 */
10688 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10689 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10690 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10691 return (DDI_FAILURE);
10692 }
10693
10694 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10695
10696 /*
10697 * Try to release the buffer from FW before freeing it. If release
10698 * fails, don't free the DMA buffer in case FW tries to access it
10699 * later. If buffer is not owned by firmware, can't release it.
10700 */
10701 if (!pBuffer->owned_by_firmware) {
10702 status = DDI_SUCCESS;
10703 } else {
10704 status = mptsas_release_fw_diag_buffer(mpt, pBuffer,
10705 return_code, MPTSAS_FW_DIAG_TYPE_UNREGISTER);
10706 }
10707
10708 /*
10709 * At this point, return the current status no matter what happens with
10710 * the DMA buffer.
10711 */
10712 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
10713 if (status == DDI_SUCCESS) {
10714 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
10715 DDI_SUCCESS) {
10716 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed "
10717 "in mptsas_diag_unregister.");
10718 ddi_fm_service_impact(mpt->m_dip,
10719 DDI_SERVICE_UNAFFECTED);
10720 }
10721 mptsas_dma_free(&pBuffer->buffer_data);
10722 }
10723
10724 return (status);
10725 }
10726
10727 static int
10728 mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
10729 uint32_t *return_code)
10730 {
10731 mptsas_fw_diagnostic_buffer_t *pBuffer;
10732 uint8_t i;
10733 uint32_t unique_id;
10734
10735 ASSERT(mutex_owned(&mpt->m_mutex));
10736
10737 unique_id = diag_query->UniqueId;
10738
10739 /*
10740 * If ID is valid, query on ID.
10741 * If ID is invalid, query on buffer type.
10742 */
10743 if (unique_id == MPTSAS_FW_DIAG_INVALID_UID) {
10744 i = diag_query->BufferType;
10745 if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
10746 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10747 return (DDI_FAILURE);
10748 }
10749 } else {
10750 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10751 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10752 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10753 return (DDI_FAILURE);
10754 }
10755 }
10756
10757 /*
10758 * Fill query structure with the diag buffer info.
10759 */
10760 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10761 diag_query->BufferType = pBuffer->buffer_type;
10762 diag_query->ExtendedType = pBuffer->extended_type;
10763 if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
10764 for (i = 0; i < (sizeof (diag_query->ProductSpecific) / 4);
10765 i++) {
10766 diag_query->ProductSpecific[i] =
10767 pBuffer->product_specific[i];
10768 }
10769 }
10770 diag_query->TotalBufferSize = pBuffer->buffer_data.size;
10771 diag_query->DriverAddedBufferSize = 0;
10772 diag_query->UniqueId = pBuffer->unique_id;
10773 diag_query->ApplicationFlags = 0;
10774 diag_query->DiagnosticFlags = 0;
10775
10776 /*
10777 * Set/Clear application flags
10778 */
10779 if (pBuffer->immediate) {
10780 diag_query->ApplicationFlags &= ~MPTSAS_FW_DIAG_FLAG_APP_OWNED;
10781 } else {
10782 diag_query->ApplicationFlags |= MPTSAS_FW_DIAG_FLAG_APP_OWNED;
10783 }
10784 if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
10785 diag_query->ApplicationFlags |=
10786 MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
10787 } else {
10788 diag_query->ApplicationFlags &=
10789 ~MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
10790 }
10791 if (pBuffer->owned_by_firmware) {
10792 diag_query->ApplicationFlags |=
10793 MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
10794 } else {
10795 diag_query->ApplicationFlags &=
10796 ~MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
10797 }
10798
10799 return (DDI_SUCCESS);
10800 }
10801
10802 static int
10803 mptsas_diag_read_buffer(mptsas_t *mpt,
10804 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
10805 uint32_t *return_code, int ioctl_mode)
10806 {
10807 mptsas_fw_diagnostic_buffer_t *pBuffer;
10808 uint8_t i, *pData;
10809 uint32_t unique_id, byte;
10810 int status;
10811
10812 ASSERT(mutex_owned(&mpt->m_mutex));
10813
10814 unique_id = diag_read_buffer->UniqueId;
10815
10816 /*
10817 * Get the current buffer and look up the unique ID. The unique ID
10818 * should be there.
10819 */
10820 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10821 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10822 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10823 return (DDI_FAILURE);
10824 }
10825
10826 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10827
10828 /*
10829 * Make sure requested read is within limits
10830 */
10831 if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
10832 pBuffer->buffer_data.size) {
10833 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10834 return (DDI_FAILURE);
10835 }
10836
10837 /*
10838 * Copy the requested data from DMA to the diag_read_buffer. The DMA
10839 * buffer that was allocated is one contiguous buffer.
10840 */
10841 pData = (uint8_t *)(pBuffer->buffer_data.memp +
10842 diag_read_buffer->StartingOffset);
10843 (void) ddi_dma_sync(pBuffer->buffer_data.handle, 0, 0,
10844 DDI_DMA_SYNC_FORCPU);
10845 for (byte = 0; byte < diag_read_buffer->BytesToRead; byte++) {
10846 if (ddi_copyout(pData + byte, ioctl_buf + byte, 1, ioctl_mode)
10847 != 0) {
10848 return (DDI_FAILURE);
10849 }
10850 }
10851 diag_read_buffer->Status = 0;
10852
10853 /*
10854 * Set or clear the Force Release flag.
10855 */
10856 if (pBuffer->force_release) {
10857 diag_read_buffer->Flags |= MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
10858 } else {
10859 diag_read_buffer->Flags &= ~MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
10860 }
10861
10862 /*
10863 * If buffer is to be reregistered, make sure it's not already owned by
10864 * firmware first.
10865 */
10866 status = DDI_SUCCESS;
10867 if (!pBuffer->owned_by_firmware) {
10868 if (diag_read_buffer->Flags & MPTSAS_FW_DIAG_FLAG_REREGISTER) {
10869 status = mptsas_post_fw_diag_buffer(mpt, pBuffer,
10870 return_code);
10871 }
10872 }
10873
10874 return (status);
10875 }
10876
10877 static int
10878 mptsas_diag_release(mptsas_t *mpt, mptsas_fw_diag_release_t *diag_release,
10879 uint32_t *return_code)
10880 {
10881 mptsas_fw_diagnostic_buffer_t *pBuffer;
10882 uint8_t i;
10883 uint32_t unique_id;
10884 int status;
10885
10886 ASSERT(mutex_owned(&mpt->m_mutex));
10887
10888 unique_id = diag_release->UniqueId;
10889
10890 /*
10891 * Get the current buffer and look up the unique ID. The unique ID
10892 * should be there.
10893 */
10894 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
10895 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
10896 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
10897 return (DDI_FAILURE);
10898 }
10899
10900 pBuffer = &mpt->m_fw_diag_buffer_list[i];
10901
10902 /*
10903 * If buffer is not owned by firmware, it's already been released.
10904 */
10905 if (!pBuffer->owned_by_firmware) {
10906 *return_code = MPTSAS_FW_DIAG_ERROR_ALREADY_RELEASED;
10907 return (DDI_FAILURE);
10908 }
10909
10910 /*
10911 * Release the buffer.
10912 */
10913 status = mptsas_release_fw_diag_buffer(mpt, pBuffer, return_code,
10914 MPTSAS_FW_DIAG_TYPE_RELEASE);
10915 return (status);
10916 }
10917
10918 static int
10919 mptsas_do_diag_action(mptsas_t *mpt, uint32_t action, uint8_t *diag_action,
10920 uint32_t length, uint32_t *return_code, int ioctl_mode)
10921 {
10922 mptsas_fw_diag_register_t diag_register;
10923 mptsas_fw_diag_unregister_t diag_unregister;
10924 mptsas_fw_diag_query_t diag_query;
10925 mptsas_diag_read_buffer_t diag_read_buffer;
10926 mptsas_fw_diag_release_t diag_release;
10927 int status = DDI_SUCCESS;
10928 uint32_t original_return_code, read_buf_len;
10929
10930 ASSERT(mutex_owned(&mpt->m_mutex));
10931
10932 original_return_code = *return_code;
10933 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10934
10935 switch (action) {
10936 case MPTSAS_FW_DIAG_TYPE_REGISTER:
10937 if (!length) {
10938 *return_code =
10939 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10940 status = DDI_FAILURE;
10941 break;
10942 }
10943 if (ddi_copyin(diag_action, &diag_register,
10944 sizeof (diag_register), ioctl_mode) != 0) {
10945 return (DDI_FAILURE);
10946 }
10947 status = mptsas_diag_register(mpt, &diag_register,
10948 return_code);
10949 break;
10950
10951 case MPTSAS_FW_DIAG_TYPE_UNREGISTER:
10952 if (length < sizeof (diag_unregister)) {
10953 *return_code =
10954 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10955 status = DDI_FAILURE;
10956 break;
10957 }
10958 if (ddi_copyin(diag_action, &diag_unregister,
10959 sizeof (diag_unregister), ioctl_mode) != 0) {
10960 return (DDI_FAILURE);
10961 }
10962 status = mptsas_diag_unregister(mpt, &diag_unregister,
10963 return_code);
10964 break;
10965
10966 case MPTSAS_FW_DIAG_TYPE_QUERY:
10967 if (length < sizeof (diag_query)) {
10968 *return_code =
10969 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10970 status = DDI_FAILURE;
10971 break;
10972 }
10973 if (ddi_copyin(diag_action, &diag_query,
10974 sizeof (diag_query), ioctl_mode) != 0) {
10975 return (DDI_FAILURE);
10976 }
10977 status = mptsas_diag_query(mpt, &diag_query,
10978 return_code);
10979 if (status == DDI_SUCCESS) {
10980 if (ddi_copyout(&diag_query, diag_action,
10981 sizeof (diag_query), ioctl_mode) != 0) {
10982 return (DDI_FAILURE);
10983 }
10984 }
10985 break;
10986
10987 case MPTSAS_FW_DIAG_TYPE_READ_BUFFER:
10988 if (ddi_copyin(diag_action, &diag_read_buffer,
10989 sizeof (diag_read_buffer) - 4, ioctl_mode) != 0) {
10990 return (DDI_FAILURE);
10991 }
10992 read_buf_len = sizeof (diag_read_buffer) -
10993 sizeof (diag_read_buffer.DataBuffer) +
10994 diag_read_buffer.BytesToRead;
10995 if (length < read_buf_len) {
10996 *return_code =
10997 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
10998 status = DDI_FAILURE;
10999 break;
11000 }
11001 status = mptsas_diag_read_buffer(mpt,
11002 &diag_read_buffer, diag_action +
11003 sizeof (diag_read_buffer) - 4, return_code,
11004 ioctl_mode);
11005 if (status == DDI_SUCCESS) {
11006 if (ddi_copyout(&diag_read_buffer, diag_action,
11007 sizeof (diag_read_buffer) - 4, ioctl_mode)
11008 != 0) {
11009 return (DDI_FAILURE);
11010 }
11011 }
11012 break;
11013
11014 case MPTSAS_FW_DIAG_TYPE_RELEASE:
11015 if (length < sizeof (diag_release)) {
11016 *return_code =
11017 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11018 status = DDI_FAILURE;
11019 break;
11020 }
11021 if (ddi_copyin(diag_action, &diag_release,
11022 sizeof (diag_release), ioctl_mode) != 0) {
11023 return (DDI_FAILURE);
11024 }
11025 status = mptsas_diag_release(mpt, &diag_release,
11026 return_code);
11027 break;
11028
11029 default:
11030 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11031 status = DDI_FAILURE;
11032 break;
11033 }
11034
11035 if ((status == DDI_FAILURE) &&
11036 (original_return_code == MPTSAS_FW_DIAG_NEW) &&
11037 (*return_code != MPTSAS_FW_DIAG_ERROR_SUCCESS)) {
11038 status = DDI_SUCCESS;
11039 }
11040
11041 return (status);
11042 }
11043
11044 static int
11045 mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *user_data, int mode)
11046 {
11047 int status;
11048 mptsas_diag_action_t driver_data;
11049
11050 ASSERT(mutex_owned(&mpt->m_mutex));
11051
11052 /*
11053 * Copy the user data to a driver data buffer.
11054 */
11055 if (ddi_copyin(user_data, &driver_data, sizeof (mptsas_diag_action_t),
11056 mode) == 0) {
11057 /*
11058 * Send diag action request if Action is valid
11059 */
11060 if (driver_data.Action == MPTSAS_FW_DIAG_TYPE_REGISTER ||
11061 driver_data.Action == MPTSAS_FW_DIAG_TYPE_UNREGISTER ||
11062 driver_data.Action == MPTSAS_FW_DIAG_TYPE_QUERY ||
11063 driver_data.Action == MPTSAS_FW_DIAG_TYPE_READ_BUFFER ||
11064 driver_data.Action == MPTSAS_FW_DIAG_TYPE_RELEASE) {
11065 status = mptsas_do_diag_action(mpt, driver_data.Action,
11066 (void *)(uintptr_t)driver_data.PtrDiagAction,
11067 driver_data.Length, &driver_data.ReturnCode,
11068 mode);
11069 if (status == DDI_SUCCESS) {
11070 if (ddi_copyout(&driver_data.ReturnCode,
11071 &user_data->ReturnCode,
11072 sizeof (user_data->ReturnCode), mode)
11073 != 0) {
11074 status = EFAULT;
11075 } else {
11076 status = 0;
11077 }
11078 } else {
11079 status = EIO;
11080 }
11081 } else {
11082 status = EINVAL;
11083 }
11084 } else {
11085 status = EFAULT;
11086 }
11087
11088 return (status);
11089 }
11090
11091 /*
11092 * This routine handles the "event query" ioctl.
11093 */
11094 static int
11095 mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, int mode,
11096 int *rval)
11097 {
11098 int status;
11099 mptsas_event_query_t driverdata;
11100 uint8_t i;
11101
11102 driverdata.Entries = MPTSAS_EVENT_QUEUE_SIZE;
11103
11104 mutex_enter(&mpt->m_mutex);
11105 for (i = 0; i < 4; i++) {
11106 driverdata.Types[i] = mpt->m_event_mask[i];
11107 }
11108 mutex_exit(&mpt->m_mutex);
11109
11110 if (ddi_copyout(&driverdata, data, sizeof (driverdata), mode) != 0) {
11111 status = EFAULT;
11112 } else {
11113 *rval = MPTIOCTL_STATUS_GOOD;
11114 status = 0;
11115 }
11116
11117 return (status);
11118 }
11119
11120 /*
11121 * This routine handles the "event enable" ioctl.
11122 */
11123 static int
11124 mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, int mode,
11125 int *rval)
11126 {
11127 int status;
11128 mptsas_event_enable_t driverdata;
11129 uint8_t i;
11130
11131 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11132 mutex_enter(&mpt->m_mutex);
11133 for (i = 0; i < 4; i++) {
11134 mpt->m_event_mask[i] = driverdata.Types[i];
11135 }
11136 mutex_exit(&mpt->m_mutex);
11137
11138 *rval = MPTIOCTL_STATUS_GOOD;
11139 status = 0;
11140 } else {
11141 status = EFAULT;
11142 }
11143 return (status);
11144 }
11145
11146 /*
11147 * This routine handles the "event report" ioctl.
11148 */
11149 static int
11150 mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, int mode,
11151 int *rval)
11152 {
11153 int status;
11154 mptsas_event_report_t driverdata;
11155
11156 mutex_enter(&mpt->m_mutex);
11157
11158 if (ddi_copyin(&data->Size, &driverdata.Size, sizeof (driverdata.Size),
11159 mode) == 0) {
11160 if (driverdata.Size >= sizeof (mpt->m_events)) {
11161 if (ddi_copyout(mpt->m_events, data->Events,
11162 sizeof (mpt->m_events), mode) != 0) {
11163 status = EFAULT;
11164 } else {
11165 if (driverdata.Size > sizeof (mpt->m_events)) {
11166 driverdata.Size =
11167 sizeof (mpt->m_events);
11168 if (ddi_copyout(&driverdata.Size,
11169 &data->Size,
11170 sizeof (driverdata.Size),
11171 mode) != 0) {
11172 status = EFAULT;
11173 } else {
11174 *rval = MPTIOCTL_STATUS_GOOD;
11175 status = 0;
11176 }
11177 } else {
11178 *rval = MPTIOCTL_STATUS_GOOD;
11179 status = 0;
11180 }
11181 }
11182 } else {
11183 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
11184 status = 0;
11185 }
11186 } else {
11187 status = EFAULT;
11188 }
11189
11190 mutex_exit(&mpt->m_mutex);
11191 return (status);
11192 }
11193
11194 static void
11195 mptsas_lookup_pci_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
11196 {
11197 int *reg_data;
11198 uint_t reglen;
11199
11200 /*
11201 * Lookup the 'reg' property and extract the other data
11202 */
11203 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
11204 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
11205 DDI_PROP_SUCCESS) {
11206 /*
11207 * Extract the PCI data from the 'reg' property first DWORD.
11208 * The entry looks like the following:
11209 * First DWORD:
11210 * Bits 0 - 7 8-bit Register number
11211 * Bits 8 - 10 3-bit Function number
11212 * Bits 11 - 15 5-bit Device number
11213 * Bits 16 - 23 8-bit Bus number
11214 * Bits 24 - 25 2-bit Address Space type identifier
11215 *
11216 */
11217 adapter_data->PciInformation.u.bits.BusNumber =
11218 (reg_data[0] & 0x00FF0000) >> 16;
11219 adapter_data->PciInformation.u.bits.DeviceNumber =
11220 (reg_data[0] & 0x0000F800) >> 11;
11221 adapter_data->PciInformation.u.bits.FunctionNumber =
11222 (reg_data[0] & 0x00000700) >> 8;
11223 ddi_prop_free((void *)reg_data);
11224 } else {
11225 /*
11226 * If we can't determine the PCI data then we fill in FF's for
11227 * the data to indicate this.
11228 */
11229 adapter_data->PCIDeviceHwId = 0xFFFFFFFF;
11230 adapter_data->MpiPortNumber = 0xFFFFFFFF;
11231 adapter_data->PciInformation.u.AsDWORD = 0xFFFFFFFF;
11232 }
11233
11234 /*
11235 * Saved in the mpt->m_fwversion
11236 */
11237 adapter_data->MpiFirmwareVersion = mpt->m_fwversion;
11238 }
11239
11240 static void
11241 mptsas_read_adapter_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
11242 {
11243 char *driver_verstr = MPTSAS_MOD_STRING;
11244
11245 mptsas_lookup_pci_data(mpt, adapter_data);
11246 adapter_data->AdapterType = MPTIOCTL_ADAPTER_TYPE_SAS2;
11247 adapter_data->PCIDeviceHwId = (uint32_t)mpt->m_devid;
11248 adapter_data->PCIDeviceHwRev = (uint32_t)mpt->m_revid;
11249 adapter_data->SubSystemId = (uint32_t)mpt->m_ssid;
11250 adapter_data->SubsystemVendorId = (uint32_t)mpt->m_svid;
11251 (void) strcpy((char *)&adapter_data->DriverVersion[0], driver_verstr);
11252 adapter_data->BiosVersion = 0;
11253 (void) mptsas_get_bios_page3(mpt, &adapter_data->BiosVersion);
11254 }
11255
11256 static void
11257 mptsas_read_pci_info(mptsas_t *mpt, mptsas_pci_info_t *pci_info)
11258 {
11259 int *reg_data, i;
11260 uint_t reglen;
11261
11262 /*
11263 * Lookup the 'reg' property and extract the other data
11264 */
11265 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
11266 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
11267 DDI_PROP_SUCCESS) {
11268 /*
11269 * Extract the PCI data from the 'reg' property first DWORD.
11270 * The entry looks like the following:
11271 * First DWORD:
11272 * Bits 8 - 10 3-bit Function number
11273 * Bits 11 - 15 5-bit Device number
11274 * Bits 16 - 23 8-bit Bus number
11275 */
11276 pci_info->BusNumber = (reg_data[0] & 0x00FF0000) >> 16;
11277 pci_info->DeviceNumber = (reg_data[0] & 0x0000F800) >> 11;
11278 pci_info->FunctionNumber = (reg_data[0] & 0x00000700) >> 8;
11279 ddi_prop_free((void *)reg_data);
11280 } else {
11281 /*
11282 * If we can't determine the PCI info then we fill in FF's for
11283 * the data to indicate this.
11284 */
11285 pci_info->BusNumber = 0xFFFFFFFF;
11286 pci_info->DeviceNumber = 0xFF;
11287 pci_info->FunctionNumber = 0xFF;
11288 }
11289
11290 /*
11291 * Now get the interrupt vector and the pci header. The vector can
11292 * only be 0 right now. The header is the first 256 bytes of config
11293 * space.
11294 */
11295 pci_info->InterruptVector = 0;
11296 for (i = 0; i < sizeof (pci_info->PciHeader); i++) {
11297 pci_info->PciHeader[i] = pci_config_get8(mpt->m_config_handle,
11298 i);
11299 }
11300 }
11301
11302 static int
11303 mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data, int mode)
11304 {
11305 int status = 0;
11306 mptsas_reg_access_t driverdata;
11307
11308 mutex_enter(&mpt->m_mutex);
11309 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11310 switch (driverdata.Command) {
11311 /*
11312 * IO access is not supported.
11313 */
11314 case REG_IO_READ:
11315 case REG_IO_WRITE:
11316 mptsas_log(mpt, CE_WARN, "IO access is not "
11317 "supported. Use memory access.");
11318 status = EINVAL;
11319 break;
11320
11321 case REG_MEM_READ:
11322 driverdata.RegData = ddi_get32(mpt->m_datap,
11323 (uint32_t *)(void *)mpt->m_reg +
11324 driverdata.RegOffset);
11325 if (ddi_copyout(&driverdata.RegData,
11326 &data->RegData,
11327 sizeof (driverdata.RegData), mode) != 0) {
11328 mptsas_log(mpt, CE_WARN, "Register "
11329 "Read Failed");
11330 status = EFAULT;
11331 }
11332 break;
11333
11334 case REG_MEM_WRITE:
11335 ddi_put32(mpt->m_datap,
11336 (uint32_t *)(void *)mpt->m_reg +
11337 driverdata.RegOffset,
11338 driverdata.RegData);
11339 break;
11340
11341 default:
11342 status = EINVAL;
11343 break;
11344 }
11345 } else {
11346 status = EFAULT;
11347 }
11348
11349 mutex_exit(&mpt->m_mutex);
11350 return (status);
11351 }
11352
11353 static int
11354 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
11355 int *rval)
11356 {
11357 int status = 0;
11358 mptsas_t *mpt;
11359 mptsas_update_flash_t flashdata;
11360 mptsas_pass_thru_t passthru_data;
11361 mptsas_adapter_data_t adapter_data;
11362 mptsas_pci_info_t pci_info;
11363 int copylen;
11364
11365 int iport_flag = 0;
11366 dev_info_t *dip = NULL;
11367 mptsas_phymask_t phymask = 0;
11368
11369 *rval = MPTIOCTL_STATUS_GOOD;
11370 if (secpolicy_sys_config(credp, B_FALSE) != 0) {
11371 return (EPERM);
11372 }
11373
11374 mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
11375 if (mpt == NULL) {
11376 /*
11377 * Called from iport node, get the states
11378 */
11379 iport_flag = 1;
11380 dip = mptsas_get_dip_from_dev(dev, &phymask);
11381 if (dip == NULL) {
11382 return (ENXIO);
11383 }
11384 mpt = DIP2MPT(dip);
11385 }
11386 /* Make sure power level is D0 before accessing registers */
11387 mutex_enter(&mpt->m_mutex);
11388 if (mpt->m_options & MPTSAS_OPT_PM) {
11389 (void) pm_busy_component(mpt->m_dip, 0);
11390 if (mpt->m_power_level != PM_LEVEL_D0) {
11391 mutex_exit(&mpt->m_mutex);
11392 if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
11393 DDI_SUCCESS) {
11394 mptsas_log(mpt, CE_WARN,
11395 "mptsas%d: mptsas_ioctl: Raise power "
11396 "request failed.", mpt->m_instance);
11397 (void) pm_idle_component(mpt->m_dip, 0);
11398 return (ENXIO);
11399 }
11400 } else {
11401 mutex_exit(&mpt->m_mutex);
11402 }
11403 } else {
11404 mutex_exit(&mpt->m_mutex);
11405 }
11406
11407 if (iport_flag) {
11408 status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
11409 goto out;
11410 }
11411 switch (cmd) {
11412 case MPTIOCTL_UPDATE_FLASH:
11413 if (ddi_copyin((void *)data, &flashdata,
11414 sizeof (struct mptsas_update_flash), mode)) {
11415 status = EFAULT;
11416 break;
11417 }
11418
11419 mutex_enter(&mpt->m_mutex);
11420 if (mptsas_update_flash(mpt,
11421 (caddr_t)(long)flashdata.PtrBuffer,
11422 flashdata.ImageSize, flashdata.ImageType, mode)) {
11423 status = EFAULT;
11424 }
11425
11426 /*
11427 * Reset the chip to start using the new
11428 * firmware. Reset if failed also.
11429 */
11430 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
11431 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
11432 status = EFAULT;
11433 }
11434 mutex_exit(&mpt->m_mutex);
11435 break;
11436 case MPTIOCTL_PASS_THRU:
11437 /*
11438 * The user has requested to pass through a command to
11439 * be executed by the MPT firmware. Call our routine
11440 * which does this. Only allow one passthru IOCTL at
11441 * one time. Other threads will block on
11442 * m_passthru_mutex, which is of adaptive variant.
11443 */
11444 if (ddi_copyin((void *)data, &passthru_data,
11445 sizeof (mptsas_pass_thru_t), mode)) {
11446 status = EFAULT;
11447 break;
11448 }
11449 mutex_enter(&mpt->m_passthru_mutex);
11450 mutex_enter(&mpt->m_mutex);
11451 status = mptsas_pass_thru(mpt, &passthru_data, mode);
11452 mutex_exit(&mpt->m_mutex);
11453 mutex_exit(&mpt->m_passthru_mutex);
11454
11455 break;
11456 case MPTIOCTL_GET_ADAPTER_DATA:
11457 /*
11458 * The user has requested to read adapter data. Call
11459 * our routine which does this.
11460 */
11461 bzero(&adapter_data, sizeof (mptsas_adapter_data_t));
11462 if (ddi_copyin((void *)data, (void *)&adapter_data,
11463 sizeof (mptsas_adapter_data_t), mode)) {
11464 status = EFAULT;
11465 break;
11466 }
11467 if (adapter_data.StructureLength >=
11468 sizeof (mptsas_adapter_data_t)) {
11469 adapter_data.StructureLength = (uint32_t)
11470 sizeof (mptsas_adapter_data_t);
11471 copylen = sizeof (mptsas_adapter_data_t);
11472 mutex_enter(&mpt->m_mutex);
11473 mptsas_read_adapter_data(mpt, &adapter_data);
11474 mutex_exit(&mpt->m_mutex);
11475 } else {
11476 adapter_data.StructureLength = (uint32_t)
11477 sizeof (mptsas_adapter_data_t);
11478 copylen = sizeof (adapter_data.StructureLength);
11479 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
11480 }
11481 if (ddi_copyout((void *)(&adapter_data), (void *)data,
11482 copylen, mode) != 0) {
11483 status = EFAULT;
11484 }
11485 break;
11486 case MPTIOCTL_GET_PCI_INFO:
11487 /*
11488 * The user has requested to read pci info. Call
11489 * our routine which does this.
11490 */
11491 bzero(&pci_info, sizeof (mptsas_pci_info_t));
11492 mutex_enter(&mpt->m_mutex);
11493 mptsas_read_pci_info(mpt, &pci_info);
11494 mutex_exit(&mpt->m_mutex);
11495 if (ddi_copyout((void *)(&pci_info), (void *)data,
11496 sizeof (mptsas_pci_info_t), mode) != 0) {
11497 status = EFAULT;
11498 }
11499 break;
11500 case MPTIOCTL_RESET_ADAPTER:
11501 mutex_enter(&mpt->m_mutex);
11502 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
11503 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
11504 mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
11505 "failed");
11506 status = EFAULT;
11507 }
11508 mutex_exit(&mpt->m_mutex);
11509 break;
11510 case MPTIOCTL_DIAG_ACTION:
11511 /*
11512 * The user has done a diag buffer action. Call our
11513 * routine which does this. Only allow one diag action
11514 * at one time.
11515 */
11516 mutex_enter(&mpt->m_mutex);
11517 if (mpt->m_diag_action_in_progress) {
11518 mutex_exit(&mpt->m_mutex);
11519 return (EBUSY);
11520 }
11521 mpt->m_diag_action_in_progress = 1;
11522 status = mptsas_diag_action(mpt,
11523 (mptsas_diag_action_t *)data, mode);
11524 mpt->m_diag_action_in_progress = 0;
11525 mutex_exit(&mpt->m_mutex);
11526 break;
11527 case MPTIOCTL_EVENT_QUERY:
11528 /*
11529 * The user has done an event query. Call our routine
11530 * which does this.
11531 */
11532 status = mptsas_event_query(mpt,
11533 (mptsas_event_query_t *)data, mode, rval);
11534 break;
11535 case MPTIOCTL_EVENT_ENABLE:
11536 /*
11537 * The user has done an event enable. Call our routine
11538 * which does this.
11539 */
11540 status = mptsas_event_enable(mpt,
11541 (mptsas_event_enable_t *)data, mode, rval);
11542 break;
11543 case MPTIOCTL_EVENT_REPORT:
11544 /*
11545 * The user has done an event report. Call our routine
11546 * which does this.
11547 */
11548 status = mptsas_event_report(mpt,
11549 (mptsas_event_report_t *)data, mode, rval);
11550 break;
11551 case MPTIOCTL_REG_ACCESS:
11552 /*
11553 * The user has requested register access. Call our
11554 * routine which does this.
11555 */
11556 status = mptsas_reg_access(mpt,
11557 (mptsas_reg_access_t *)data, mode);
11558 break;
11559 default:
11560 status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
11561 rval);
11562 break;
11563 }
11564
11565 out:
11566 return (status);
11567 }
11568
11569 int
11570 mptsas_restart_ioc(mptsas_t *mpt)
11571 {
11572 int rval = DDI_SUCCESS;
11573 mptsas_target_t *ptgt = NULL;
11574
11575 ASSERT(mutex_owned(&mpt->m_mutex));
11576
11577 /*
11578 * Set a flag telling I/O path that we're processing a reset. This is
11579 * needed because after the reset is complete, the hash table still
11580 * needs to be rebuilt. If I/Os are started before the hash table is
11581 * rebuilt, I/O errors will occur. This flag allows I/Os to be marked
11582 * so that they can be retried.
11583 */
11584 mpt->m_in_reset = TRUE;
11585
11586 /*
11587 * Set all throttles to HOLD
11588 */
11589 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
11590 MPTSAS_HASH_FIRST);
11591 while (ptgt != NULL) {
11592 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
11593
11594 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
11595 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
11596 }
11597
11598 /*
11599 * Disable interrupts
11600 */
11601 MPTSAS_DISABLE_INTR(mpt);
11602
11603 /*
11604 * Abort all commands: outstanding commands, commands in waitq and
11605 * tx_waitq.
11606 */
11607 mptsas_flush_hba(mpt);
11608
11609 /*
11610 * Reinitialize the chip.
11611 */
11612 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
11613 rval = DDI_FAILURE;
11614 }
11615
11616 /*
11617 * Enable interrupts again
11618 */
11619 MPTSAS_ENABLE_INTR(mpt);
11620
11621 /*
11622 * If mptsas_init_chip was successful, update the driver data.
11623 */
11624 if (rval == DDI_SUCCESS) {
11625 mptsas_update_driver_data(mpt);
11626 }
11627
11628 /*
11629 * Reset the throttles
11630 */
11631 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
11632 MPTSAS_HASH_FIRST);
11633 while (ptgt != NULL) {
11634 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
11635
11636 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
11637 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
11638 }
11639
11640 mptsas_doneq_empty(mpt);
11641 mptsas_restart_hba(mpt);
11642
11643 if (rval != DDI_SUCCESS) {
11644 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
11645 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
11646 }
11647
11648 /*
11649 * Clear the reset flag so that I/Os can continue.
11650 */
11651 mpt->m_in_reset = FALSE;
11652
11653 return (rval);
11654 }
11655
11656 static int
11657 mptsas_init_chip(mptsas_t *mpt, int first_time)
11658 {
11659 ddi_dma_cookie_t cookie;
11660 uint32_t i;
11661 int rval;
11662
11663 /*
11664 * Check to see if the firmware image is valid
11665 */
11666 if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) &
11667 MPI2_DIAG_FLASH_BAD_SIG) {
11668 mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
11669 goto fail;
11670 }
11671
11672 /*
11673 * Reset the chip
11674 */
11675 rval = mptsas_ioc_reset(mpt, first_time);
11676 if (rval == MPTSAS_RESET_FAIL) {
11677 mptsas_log(mpt, CE_WARN, "hard reset failed!");
11678 goto fail;
11679 }
11680
11681 if ((rval == MPTSAS_SUCCESS_MUR) && (!first_time)) {
11682 goto mur;
11683 }
11684 /*
11685 * Setup configuration space
11686 */
11687 if (mptsas_config_space_init(mpt) == FALSE) {
11688 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
11689 "failed!");
11690 goto fail;
11691 }
11692
11693 /*
11694 * IOC facts can change after a diag reset so all buffers that are
11695 * based on these numbers must be de-allocated and re-allocated. Get
11696 * new IOC facts each time chip is initialized.
11697 */
11698 if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
11699 mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed");
11700 goto fail;
11701 }
11702
11703 if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
11704 goto fail;
11705 }
11706 /*
11707 * Allocate request message frames, reply free queue, reply descriptor
11708 * post queue, and reply message frames using latest IOC facts.
11709 */
11710 if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
11711 mptsas_log(mpt, CE_WARN, "mptsas_alloc_request_frames failed");
11712 goto fail;
11713 }
11714 if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
11715 mptsas_log(mpt, CE_WARN, "mptsas_alloc_free_queue failed!");
11716 goto fail;
11717 }
11718 if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
11719 mptsas_log(mpt, CE_WARN, "mptsas_alloc_post_queue failed!");
11720 goto fail;
11721 }
11722 if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
11723 mptsas_log(mpt, CE_WARN, "mptsas_alloc_reply_frames failed!");
11724 goto fail;
11725 }
11726
11727 mur:
11728 /*
11729 * Re-Initialize ioc to operational state
11730 */
11731 if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
11732 mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
11733 goto fail;
11734 }
11735
11736 mptsas_alloc_reply_args(mpt);
11737
11738 /*
11739 * Initialize reply post index. Reply free index is initialized after
11740 * the next loop.
11741 */
11742 mpt->m_post_index = 0;
11743
11744 /*
11745 * Initialize the Reply Free Queue with the physical addresses of our
11746 * reply frames.
11747 */
11748 cookie.dmac_address = mpt->m_reply_frame_dma_addr;
11749 for (i = 0; i < mpt->m_max_replies; i++) {
11750 ddi_put32(mpt->m_acc_free_queue_hdl,
11751 &((uint32_t *)(void *)mpt->m_free_queue)[i],
11752 cookie.dmac_address);
11753 cookie.dmac_address += mpt->m_reply_frame_size;
11754 }
11755 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11756 DDI_DMA_SYNC_FORDEV);
11757
11758 /*
11759 * Initialize the reply free index to one past the last frame on the
11760 * queue. This will signify that the queue is empty to start with.
11761 */
11762 mpt->m_free_index = i;
11763 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, i);
11764
11765 /*
11766 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
11767 */
11768 for (i = 0; i < mpt->m_post_queue_depth; i++) {
11769 ddi_put64(mpt->m_acc_post_queue_hdl,
11770 &((uint64_t *)(void *)mpt->m_post_queue)[i],
11771 0xFFFFFFFFFFFFFFFF);
11772 }
11773 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
11774 DDI_DMA_SYNC_FORDEV);
11775
11776 /*
11777 * Enable ports
11778 */
11779 if (mptsas_ioc_enable_port(mpt) == DDI_FAILURE) {
11780 mptsas_log(mpt, CE_WARN, "mptsas_ioc_enable_port failed");
11781 goto fail;
11782 }
11783
11784 /*
11785 * enable events
11786 */
11787 if (mptsas_ioc_enable_event_notification(mpt)) {
11788 goto fail;
11789 }
11790
11791 /*
11792 * We need checks in attach and these.
11793 * chip_init is called in mult. places
11794 */
11795
11796 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
11797 DDI_SUCCESS) ||
11798 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
11799 DDI_SUCCESS) ||
11800 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
11801 DDI_SUCCESS) ||
11802 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
11803 DDI_SUCCESS) ||
11804 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
11805 DDI_SUCCESS)) {
11806 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11807 goto fail;
11808 }
11809
11810 /* Check all acc handles */
11811 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
11812 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
11813 DDI_SUCCESS) ||
11814 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
11815 DDI_SUCCESS) ||
11816 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
11817 DDI_SUCCESS) ||
11818 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
11819 DDI_SUCCESS) ||
11820 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
11821 DDI_SUCCESS) ||
11822 (mptsas_check_acc_handle(mpt->m_config_handle) !=
11823 DDI_SUCCESS)) {
11824 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11825 goto fail;
11826 }
11827
11828 return (DDI_SUCCESS);
11829
11830 fail:
11831 return (DDI_FAILURE);
11832 }
11833
11834 static int
11835 mptsas_get_pci_cap(mptsas_t *mpt)
11836 {
11837 ushort_t caps_ptr, cap, cap_count;
11838
11839 if (mpt->m_config_handle == NULL)
11840 return (FALSE);
11841 /*
11842 * Check if capabilities list is supported and if so,
11843 * get initial capabilities pointer and clear bits 0,1.
11844 */
11845 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
11846 & PCI_STAT_CAP) {
11847 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
11848 PCI_CONF_CAP_PTR), 4);
11849 } else {
11850 caps_ptr = PCI_CAP_NEXT_PTR_NULL;
11851 }
11852
11853 /*
11854 * Walk capabilities if supported.
11855 */
11856 for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
11857
11858 /*
11859 * Check that we haven't exceeded the maximum number of
11860 * capabilities and that the pointer is in a valid range.
11861 */
11862 if (++cap_count > 48) {
11863 mptsas_log(mpt, CE_WARN,
11864 "too many device capabilities.\n");
11865 break;
11866 }
11867 if (caps_ptr < 64) {
11868 mptsas_log(mpt, CE_WARN,
11869 "capabilities pointer 0x%x out of range.\n",
11870 caps_ptr);
11871 break;
11872 }
11873
11874 /*
11875 * Get next capability and check that it is valid.
11876 * For now, we only support power management.
11877 */
11878 cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
11879 switch (cap) {
11880 case PCI_CAP_ID_PM:
11881 mptsas_log(mpt, CE_NOTE,
11882 "?mptsas%d supports power management.\n",
11883 mpt->m_instance);
11884 mpt->m_options |= MPTSAS_OPT_PM;
11885
11886 /* Save PMCSR offset */
11887 mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
11888 break;
11889 /*
11890 * The following capabilities are valid. Any others
11891 * will cause a message to be logged.
11892 */
11893 case PCI_CAP_ID_VPD:
11894 case PCI_CAP_ID_MSI:
11895 case PCI_CAP_ID_PCIX:
11896 case PCI_CAP_ID_PCI_E:
11897 case PCI_CAP_ID_MSI_X:
11898 break;
11899 default:
11900 mptsas_log(mpt, CE_NOTE,
11901 "?mptsas%d unrecognized capability "
11902 "0x%x.\n", mpt->m_instance, cap);
11903 break;
11904 }
11905
11906 /*
11907 * Get next capabilities pointer and clear bits 0,1.
11908 */
11909 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
11910 (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
11911 }
11912 return (TRUE);
11913 }
11914
11915 static int
11916 mptsas_init_pm(mptsas_t *mpt)
11917 {
11918 char pmc_name[16];
11919 char *pmc[] = {
11920 NULL,
11921 "0=Off (PCI D3 State)",
11922 "3=On (PCI D0 State)",
11923 NULL
11924 };
11925 uint16_t pmcsr_stat;
11926
11927 if (mptsas_get_pci_cap(mpt) == FALSE) {
11928 return (DDI_FAILURE);
11929 }
11930 /*
11931 * If PCI's capability does not support PM, then don't need
11932 * to registe the pm-components
11933 */
11934 if (!(mpt->m_options & MPTSAS_OPT_PM))
11935 return (DDI_SUCCESS);
11936 /*
11937 * If power management is supported by this chip, create
11938 * pm-components property for the power management framework
11939 */
11940 (void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
11941 pmc[0] = pmc_name;
11942 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
11943 "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
11944 mpt->m_options &= ~MPTSAS_OPT_PM;
11945 mptsas_log(mpt, CE_WARN,
11946 "mptsas%d: pm-component property creation failed.",
11947 mpt->m_instance);
11948 return (DDI_FAILURE);
11949 }
11950
11951 /*
11952 * Power on device.
11953 */
11954 (void) pm_busy_component(mpt->m_dip, 0);
11955 pmcsr_stat = pci_config_get16(mpt->m_config_handle,
11956 mpt->m_pmcsr_offset);
11957 if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
11958 mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
11959 mpt->m_instance);
11960 pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
11961 PCI_PMCSR_D0);
11962 }
11963 if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
11964 mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
11965 return (DDI_FAILURE);
11966 }
11967 mpt->m_power_level = PM_LEVEL_D0;
11968 /*
11969 * Set pm idle delay.
11970 */
11971 mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
11972 mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
11973
11974 return (DDI_SUCCESS);
11975 }
11976
11977 static int
11978 mptsas_register_intrs(mptsas_t *mpt)
11979 {
11980 dev_info_t *dip;
11981 int intr_types;
11982
11983 dip = mpt->m_dip;
11984
11985 /* Get supported interrupt types */
11986 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
11987 mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
11988 "failed\n");
11989 return (FALSE);
11990 }
11991
11992 NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
11993
11994 /*
11995 * Try MSI, but fall back to FIXED
11996 */
11997 if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
11998 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
11999 NDBG0(("Using MSI interrupt type"));
12000 mpt->m_intr_type = DDI_INTR_TYPE_MSI;
12001 return (TRUE);
12002 }
12003 }
12004 if (intr_types & DDI_INTR_TYPE_FIXED) {
12005 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
12006 NDBG0(("Using FIXED interrupt type"));
12007 mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
12008 return (TRUE);
12009 } else {
12010 NDBG0(("FIXED interrupt registration failed"));
12011 return (FALSE);
12012 }
12013 }
12014
12015 return (FALSE);
12016 }
12017
12018 static void
12019 mptsas_unregister_intrs(mptsas_t *mpt)
12020 {
12021 mptsas_rem_intrs(mpt);
12022 }
12023
12024 /*
12025 * mptsas_add_intrs:
12026 *
12027 * Register FIXED or MSI interrupts.
12028 */
12029 static int
12030 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
12031 {
12032 dev_info_t *dip = mpt->m_dip;
12033 int avail, actual, count = 0;
12034 int i, flag, ret;
12035
12036 NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
12037
12038 /* Get number of interrupts */
12039 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
12040 if ((ret != DDI_SUCCESS) || (count <= 0)) {
12041 mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
12042 "ret %d count %d\n", ret, count);
12043
12044 return (DDI_FAILURE);
12045 }
12046
12047 /* Get number of available interrupts */
12048 ret = ddi_intr_get_navail(dip, intr_type, &avail);
12049 if ((ret != DDI_SUCCESS) || (avail == 0)) {
12050 mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
12051 "ret %d avail %d\n", ret, avail);
12052
12053 return (DDI_FAILURE);
12054 }
12055
12056 if (0 && avail < count) {
12057 mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, "
12058 "navail() returned %d", count, avail);
12059 }
12060
12061 /* Mpt only have one interrupt routine */
12062 if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
12063 count = 1;
12064 }
12065
12066 /* Allocate an array of interrupt handles */
12067 mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
12068 mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
12069
12070 flag = DDI_INTR_ALLOC_NORMAL;
12071
12072 /* call ddi_intr_alloc() */
12073 ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
12074 count, &actual, flag);
12075
12076 if ((ret != DDI_SUCCESS) || (actual == 0)) {
12077 mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d\n",
12078 ret);
12079 kmem_free(mpt->m_htable, mpt->m_intr_size);
12080 return (DDI_FAILURE);
12081 }
12082
12083 /* use interrupt count returned or abort? */
12084 if (actual < count) {
12085 mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d\n",
12086 count, actual);
12087 }
12088
12089 mpt->m_intr_cnt = actual;
12090
12091 /*
12092 * Get priority for first msi, assume remaining are all the same
12093 */
12094 if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
12095 &mpt->m_intr_pri)) != DDI_SUCCESS) {
12096 mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d\n", ret);
12097
12098 /* Free already allocated intr */
12099 for (i = 0; i < actual; i++) {
12100 (void) ddi_intr_free(mpt->m_htable[i]);
12101 }
12102
12103 kmem_free(mpt->m_htable, mpt->m_intr_size);
12104 return (DDI_FAILURE);
12105 }
12106
12107 /* Test for high level mutex */
12108 if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
12109 mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
12110 "Hi level interrupt not supported\n");
12111
12112 /* Free already allocated intr */
12113 for (i = 0; i < actual; i++) {
12114 (void) ddi_intr_free(mpt->m_htable[i]);
12115 }
12116
12117 kmem_free(mpt->m_htable, mpt->m_intr_size);
12118 return (DDI_FAILURE);
12119 }
12120
12121 /* Call ddi_intr_add_handler() */
12122 for (i = 0; i < actual; i++) {
12123 if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
12124 (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
12125 mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
12126 "failed %d\n", ret);
12127
12128 /* Free already allocated intr */
12129 for (i = 0; i < actual; i++) {
12130 (void) ddi_intr_free(mpt->m_htable[i]);
12131 }
12132
12133 kmem_free(mpt->m_htable, mpt->m_intr_size);
12134 return (DDI_FAILURE);
12135 }
12136 }
12137
12138 if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
12139 != DDI_SUCCESS) {
12140 mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d\n", ret);
12141
12142 /* Free already allocated intr */
12143 for (i = 0; i < actual; i++) {
12144 (void) ddi_intr_free(mpt->m_htable[i]);
12145 }
12146
12147 kmem_free(mpt->m_htable, mpt->m_intr_size);
12148 return (DDI_FAILURE);
12149 }
12150
12151 /*
12152 * Enable interrupts
12153 */
12154 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
12155 /* Call ddi_intr_block_enable() for MSI interrupts */
12156 (void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
12157 } else {
12158 /* Call ddi_intr_enable for MSI or FIXED interrupts */
12159 for (i = 0; i < mpt->m_intr_cnt; i++) {
12160 (void) ddi_intr_enable(mpt->m_htable[i]);
12161 }
12162 }
12163 return (DDI_SUCCESS);
12164 }
12165
12166 /*
12167 * mptsas_rem_intrs:
12168 *
12169 * Unregister FIXED or MSI interrupts
12170 */
12171 static void
12172 mptsas_rem_intrs(mptsas_t *mpt)
12173 {
12174 int i;
12175
12176 NDBG6(("mptsas_rem_intrs"));
12177
12178 /* Disable all interrupts */
12179 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
12180 /* Call ddi_intr_block_disable() */
12181 (void) ddi_intr_block_disable(mpt->m_htable, mpt->m_intr_cnt);
12182 } else {
12183 for (i = 0; i < mpt->m_intr_cnt; i++) {
12184 (void) ddi_intr_disable(mpt->m_htable[i]);
12185 }
12186 }
12187
12188 /* Call ddi_intr_remove_handler() */
12189 for (i = 0; i < mpt->m_intr_cnt; i++) {
12190 (void) ddi_intr_remove_handler(mpt->m_htable[i]);
12191 (void) ddi_intr_free(mpt->m_htable[i]);
12192 }
12193
12194 kmem_free(mpt->m_htable, mpt->m_intr_size);
12195 }
12196
12197 /*
12198 * The IO fault service error handling callback function
12199 */
12200 /*ARGSUSED*/
12201 static int
12202 mptsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
12203 {
12204 /*
12205 * as the driver can always deal with an error in any dma or
12206 * access handle, we can just return the fme_status value.
12207 */
12208 pci_ereport_post(dip, err, NULL);
12209 return (err->fme_status);
12210 }
12211
12212 /*
12213 * mptsas_fm_init - initialize fma capabilities and register with IO
12214 * fault services.
12215 */
12216 static void
12217 mptsas_fm_init(mptsas_t *mpt)
12218 {
12219 /*
12220 * Need to change iblock to priority for new MSI intr
12221 */
12222 ddi_iblock_cookie_t fm_ibc;
12223
12224 /* Only register with IO Fault Services if we have some capability */
12225 if (mpt->m_fm_capabilities) {
12226 /* Adjust access and dma attributes for FMA */
12227 mpt->m_reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
12228 mpt->m_msg_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12229 mpt->m_io_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12230
12231 /*
12232 * Register capabilities with IO Fault Services.
12233 * mpt->m_fm_capabilities will be updated to indicate
12234 * capabilities actually supported (not requested.)
12235 */
12236 ddi_fm_init(mpt->m_dip, &mpt->m_fm_capabilities, &fm_ibc);
12237
12238 /*
12239 * Initialize pci ereport capabilities if ereport
12240 * capable (should always be.)
12241 */
12242 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
12243 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12244 pci_ereport_setup(mpt->m_dip);
12245 }
12246
12247 /*
12248 * Register error callback if error callback capable.
12249 */
12250 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12251 ddi_fm_handler_register(mpt->m_dip,
12252 mptsas_fm_error_cb, (void *) mpt);
12253 }
12254 }
12255 }
12256
12257 /*
12258 * mptsas_fm_fini - Releases fma capabilities and un-registers with IO
12259 * fault services.
12260 *
12261 */
12262 static void
12263 mptsas_fm_fini(mptsas_t *mpt)
12264 {
12265 /* Only unregister FMA capabilities if registered */
12266 if (mpt->m_fm_capabilities) {
12267
12268 /*
12269 * Un-register error callback if error callback capable.
12270 */
12271
12272 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12273 ddi_fm_handler_unregister(mpt->m_dip);
12274 }
12275
12276 /*
12277 * Release any resources allocated by pci_ereport_setup()
12278 */
12279
12280 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
12281 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12282 pci_ereport_teardown(mpt->m_dip);
12283 }
12284
12285 /* Unregister from IO Fault Services */
12286 ddi_fm_fini(mpt->m_dip);
12287
12288 /* Adjust access and dma attributes for FMA */
12289 mpt->m_reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
12290 mpt->m_msg_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12291 mpt->m_io_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12292
12293 }
12294 }
12295
12296 int
12297 mptsas_check_acc_handle(ddi_acc_handle_t handle)
12298 {
12299 ddi_fm_error_t de;
12300
12301 if (handle == NULL)
12302 return (DDI_FAILURE);
12303 ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
12304 return (de.fme_status);
12305 }
12306
12307 int
12308 mptsas_check_dma_handle(ddi_dma_handle_t handle)
12309 {
12310 ddi_fm_error_t de;
12311
12312 if (handle == NULL)
12313 return (DDI_FAILURE);
12314 ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
12315 return (de.fme_status);
12316 }
12317
12318 void
12319 mptsas_fm_ereport(mptsas_t *mpt, char *detail)
12320 {
12321 uint64_t ena;
12322 char buf[FM_MAX_CLASS];
12323
12324 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
12325 ena = fm_ena_generate(0, FM_ENA_FMT1);
12326 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities)) {
12327 ddi_fm_ereport_post(mpt->m_dip, buf, ena, DDI_NOSLEEP,
12328 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
12329 }
12330 }
12331
12332 static int
12333 mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
12334 uint16_t *dev_handle, mptsas_target_t **pptgt)
12335 {
12336 int rval;
12337 uint32_t dev_info;
12338 uint64_t sas_wwn;
12339 mptsas_phymask_t phymask;
12340 uint8_t physport, phynum, config, disk;
12341 mptsas_slots_t *slots = mpt->m_active;
12342 uint64_t devicename;
12343 uint16_t pdev_hdl;
12344 mptsas_target_t *tmp_tgt = NULL;
12345 uint16_t bay_num, enclosure;
12346
12347 ASSERT(*pptgt == NULL);
12348
12349 rval = mptsas_get_sas_device_page0(mpt, page_address, dev_handle,
12350 &sas_wwn, &dev_info, &physport, &phynum, &pdev_hdl,
12351 &bay_num, &enclosure);
12352 if (rval != DDI_SUCCESS) {
12353 rval = DEV_INFO_FAIL_PAGE0;
12354 return (rval);
12355 }
12356
12357 if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
12358 MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12359 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) == NULL) {
12360 rval = DEV_INFO_WRONG_DEVICE_TYPE;
12361 return (rval);
12362 }
12363
12364 /*
12365 * Check if the dev handle is for a Phys Disk. If so, set return value
12366 * and exit. Don't add Phys Disks to hash.
12367 */
12368 for (config = 0; config < slots->m_num_raid_configs; config++) {
12369 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
12370 if (*dev_handle == slots->m_raidconfig[config].
12371 m_physdisk_devhdl[disk]) {
12372 rval = DEV_INFO_PHYS_DISK;
12373 return (rval);
12374 }
12375 }
12376 }
12377
12378 /*
12379 * Get SATA Device Name from SAS device page0 for
12380 * sata device, if device name doesn't exist, set m_sas_wwn to
12381 * 0 for direct attached SATA. For the device behind the expander
12382 * we still can use STP address assigned by expander.
12383 */
12384 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12385 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
12386 mutex_exit(&mpt->m_mutex);
12387 /* alloc a tmp_tgt to send the cmd */
12388 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target),
12389 KM_SLEEP);
12390 tmp_tgt->m_devhdl = *dev_handle;
12391 tmp_tgt->m_deviceinfo = dev_info;
12392 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
12393 tmp_tgt->m_qfull_retry_interval =
12394 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
12395 tmp_tgt->m_t_throttle = MAX_THROTTLE;
12396 devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
12397 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
12398 mutex_enter(&mpt->m_mutex);
12399 if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
12400 sas_wwn = devicename;
12401 } else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
12402 sas_wwn = 0;
12403 }
12404 }
12405
12406 phymask = mptsas_physport_to_phymask(mpt, physport);
12407 *pptgt = mptsas_tgt_alloc(&slots->m_tgttbl, *dev_handle, sas_wwn,
12408 dev_info, phymask, phynum);
12409 if (*pptgt == NULL) {
12410 mptsas_log(mpt, CE_WARN, "Failed to allocated target"
12411 "structure!");
12412 rval = DEV_INFO_FAIL_ALLOC;
12413 return (rval);
12414 }
12415 (*pptgt)->m_enclosure = enclosure;
12416 (*pptgt)->m_slot_num = bay_num;
12417 return (DEV_INFO_SUCCESS);
12418 }
12419
12420 uint64_t
12421 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
12422 {
12423 uint64_t sata_guid = 0, *pwwn = NULL;
12424 int target = ptgt->m_devhdl;
12425 uchar_t *inq83 = NULL;
12426 int inq83_len = 0xFF;
12427 uchar_t *dblk = NULL;
12428 int inq83_retry = 3;
12429 int rval = DDI_FAILURE;
12430
12431 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
12432
12433 inq83_retry:
12434 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
12435 inq83_len, NULL, 1);
12436 if (rval != DDI_SUCCESS) {
12437 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
12438 "0x83 for target:%x, lun:%x failed!", target, lun);
12439 goto out;
12440 }
12441 /* According to SAT2, the first descriptor is logic unit name */
12442 dblk = &inq83[4];
12443 if ((dblk[1] & 0x30) != 0) {
12444 mptsas_log(mpt, CE_WARN, "!Descriptor is not lun associated.");
12445 goto out;
12446 }
12447 pwwn = (uint64_t *)(void *)(&dblk[4]);
12448 if ((dblk[4] & 0xf0) == 0x50) {
12449 sata_guid = BE_64(*pwwn);
12450 goto out;
12451 } else if (dblk[4] == 'A') {
12452 NDBG20(("SATA drive has no NAA format GUID."));
12453 goto out;
12454 } else {
12455 /* The data is not ready, wait and retry */
12456 inq83_retry--;
12457 if (inq83_retry <= 0) {
12458 goto out;
12459 }
12460 NDBG20(("The GUID is not ready, retry..."));
12461 delay(1 * drv_usectohz(1000000));
12462 goto inq83_retry;
12463 }
12464 out:
12465 kmem_free(inq83, inq83_len);
12466 return (sata_guid);
12467 }
12468
12469 static int
12470 mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, uchar_t page,
12471 unsigned char *buf, int len, int *reallen, uchar_t evpd)
12472 {
12473 uchar_t cdb[CDB_GROUP0];
12474 struct scsi_address ap;
12475 struct buf *data_bp = NULL;
12476 int resid = 0;
12477 int ret = DDI_FAILURE;
12478
12479 ASSERT(len <= 0xffff);
12480
12481 ap.a_target = MPTSAS_INVALID_DEVHDL;
12482 ap.a_lun = (uchar_t)(lun);
12483 ap.a_hba_tran = mpt->m_tran;
12484
12485 data_bp = scsi_alloc_consistent_buf(&ap,
12486 (struct buf *)NULL, len, B_READ, NULL_FUNC, NULL);
12487 if (data_bp == NULL) {
12488 return (ret);
12489 }
12490 bzero(cdb, CDB_GROUP0);
12491 cdb[0] = SCMD_INQUIRY;
12492 cdb[1] = evpd;
12493 cdb[2] = page;
12494 cdb[3] = (len & 0xff00) >> 8;
12495 cdb[4] = (len & 0x00ff);
12496 cdb[5] = 0;
12497
12498 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP0, data_bp,
12499 &resid);
12500 if (ret == DDI_SUCCESS) {
12501 if (reallen) {
12502 *reallen = len - resid;
12503 }
12504 bcopy((caddr_t)data_bp->b_un.b_addr, buf, len);
12505 }
12506 if (data_bp) {
12507 scsi_free_consistent_buf(data_bp);
12508 }
12509 return (ret);
12510 }
12511
12512 static int
12513 mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
12514 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
12515 int *resid)
12516 {
12517 struct scsi_pkt *pktp = NULL;
12518 scsi_hba_tran_t *tran_clone = NULL;
12519 mptsas_tgt_private_t *tgt_private = NULL;
12520 int ret = DDI_FAILURE;
12521
12522 /*
12523 * scsi_hba_tran_t->tran_tgt_private is used to pass the address
12524 * information to scsi_init_pkt, allocate a scsi_hba_tran structure
12525 * to simulate the cmds from sd
12526 */
12527 tran_clone = kmem_alloc(
12528 sizeof (scsi_hba_tran_t), KM_SLEEP);
12529 if (tran_clone == NULL) {
12530 goto out;
12531 }
12532 bcopy((caddr_t)mpt->m_tran,
12533 (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
12534 tgt_private = kmem_alloc(
12535 sizeof (mptsas_tgt_private_t), KM_SLEEP);
12536 if (tgt_private == NULL) {
12537 goto out;
12538 }
12539 tgt_private->t_lun = ap->a_lun;
12540 tgt_private->t_private = ptgt;
12541 tran_clone->tran_tgt_private = tgt_private;
12542 ap->a_hba_tran = tran_clone;
12543
12544 pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
12545 data_bp, cdblen, sizeof (struct scsi_arq_status),
12546 0, PKT_CONSISTENT, NULL, NULL);
12547 if (pktp == NULL) {
12548 goto out;
12549 }
12550 bcopy(cdb, pktp->pkt_cdbp, cdblen);
12551 pktp->pkt_flags = FLAG_NOPARITY;
12552 if (scsi_poll(pktp) < 0) {
12553 goto out;
12554 }
12555 if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
12556 goto out;
12557 }
12558 if (resid != NULL) {
12559 *resid = pktp->pkt_resid;
12560 }
12561
12562 ret = DDI_SUCCESS;
12563 out:
12564 if (pktp) {
12565 scsi_destroy_pkt(pktp);
12566 }
12567 if (tran_clone) {
12568 kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
12569 }
12570 if (tgt_private) {
12571 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
12572 }
12573 return (ret);
12574 }
12575 static int
12576 mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, int *lun)
12577 {
12578 char *cp = NULL;
12579 char *ptr = NULL;
12580 size_t s = 0;
12581 char *wwid_str = NULL;
12582 char *lun_str = NULL;
12583 long lunnum;
12584 long phyid = -1;
12585 int rc = DDI_FAILURE;
12586
12587 ptr = name;
12588 ASSERT(ptr[0] == 'w' || ptr[0] == 'p');
12589 ptr++;
12590 if ((cp = strchr(ptr, ',')) == NULL) {
12591 return (DDI_FAILURE);
12592 }
12593
12594 wwid_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12595 s = (uintptr_t)cp - (uintptr_t)ptr;
12596
12597 bcopy(ptr, wwid_str, s);
12598 wwid_str[s] = '\0';
12599
12600 ptr = ++cp;
12601
12602 if ((cp = strchr(ptr, '\0')) == NULL) {
12603 goto out;
12604 }
12605 lun_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12606 s = (uintptr_t)cp - (uintptr_t)ptr;
12607
12608 bcopy(ptr, lun_str, s);
12609 lun_str[s] = '\0';
12610
12611 if (name[0] == 'p') {
12612 rc = ddi_strtol(wwid_str, NULL, 0x10, &phyid);
12613 } else {
12614 rc = scsi_wwnstr_to_wwn(wwid_str, wwid);
12615 }
12616 if (rc != DDI_SUCCESS)
12617 goto out;
12618
12619 if (phyid != -1) {
12620 ASSERT(phyid < MPTSAS_MAX_PHYS);
12621 *phy = (uint8_t)phyid;
12622 }
12623 rc = ddi_strtol(lun_str, NULL, 0x10, &lunnum);
12624 if (rc != 0)
12625 goto out;
12626
12627 *lun = (int)lunnum;
12628 rc = DDI_SUCCESS;
12629 out:
12630 if (wwid_str)
12631 kmem_free(wwid_str, SCSI_MAXNAMELEN);
12632 if (lun_str)
12633 kmem_free(lun_str, SCSI_MAXNAMELEN);
12634
12635 return (rc);
12636 }
12637
12638 /*
12639 * mptsas_parse_smp_name() is to parse sas wwn string
12640 * which format is "wWWN"
12641 */
12642 static int
12643 mptsas_parse_smp_name(char *name, uint64_t *wwn)
12644 {
12645 char *ptr = name;
12646
12647 if (*ptr != 'w') {
12648 return (DDI_FAILURE);
12649 }
12650
12651 ptr++;
12652 if (scsi_wwnstr_to_wwn(ptr, wwn)) {
12653 return (DDI_FAILURE);
12654 }
12655 return (DDI_SUCCESS);
12656 }
12657
12658 static int
12659 mptsas_bus_config(dev_info_t *pdip, uint_t flag,
12660 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
12661 {
12662 int ret = NDI_FAILURE;
12663 int circ = 0;
12664 int circ1 = 0;
12665 mptsas_t *mpt;
12666 char *ptr = NULL;
12667 char *devnm = NULL;
12668 uint64_t wwid = 0;
12669 uint8_t phy = 0xFF;
12670 int lun = 0;
12671 uint_t mflags = flag;
12672 int bconfig = TRUE;
12673
12674 if (scsi_hba_iport_unit_address(pdip) == 0) {
12675 return (DDI_FAILURE);
12676 }
12677
12678 mpt = DIP2MPT(pdip);
12679 if (!mpt) {
12680 return (DDI_FAILURE);
12681 }
12682 /*
12683 * Hold the nexus across the bus_config
12684 */
12685 ndi_devi_enter(scsi_vhci_dip, &circ);
12686 ndi_devi_enter(pdip, &circ1);
12687 switch (op) {
12688 case BUS_CONFIG_ONE:
12689 /* parse wwid/target name out of name given */
12690 if ((ptr = strchr((char *)arg, '@')) == NULL) {
12691 ret = NDI_FAILURE;
12692 break;
12693 }
12694 ptr++;
12695 if (strncmp((char *)arg, "smp", 3) == 0) {
12696 /*
12697 * This is a SMP target device
12698 */
12699 ret = mptsas_parse_smp_name(ptr, &wwid);
12700 if (ret != DDI_SUCCESS) {
12701 ret = NDI_FAILURE;
12702 break;
12703 }
12704 ret = mptsas_config_smp(pdip, wwid, childp);
12705 } else if ((ptr[0] == 'w') || (ptr[0] == 'p')) {
12706 /*
12707 * OBP could pass down a non-canonical form
12708 * bootpath without LUN part when LUN is 0.
12709 * So driver need adjust the string.
12710 */
12711 if (strchr(ptr, ',') == NULL) {
12712 devnm = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
12713 (void) sprintf(devnm, "%s,0", (char *)arg);
12714 ptr = strchr(devnm, '@');
12715 ptr++;
12716 }
12717
12718 /*
12719 * The device path is wWWID format and the device
12720 * is not SMP target device.
12721 */
12722 ret = mptsas_parse_address(ptr, &wwid, &phy, &lun);
12723 if (ret != DDI_SUCCESS) {
12724 ret = NDI_FAILURE;
12725 break;
12726 }
12727 *childp = NULL;
12728 if (ptr[0] == 'w') {
12729 ret = mptsas_config_one_addr(pdip, wwid,
12730 lun, childp);
12731 } else if (ptr[0] == 'p') {
12732 ret = mptsas_config_one_phy(pdip, phy, lun,
12733 childp);
12734 }
12735
12736 /*
12737 * If this is CD/DVD device in OBP path, the
12738 * ndi_busop_bus_config can be skipped as config one
12739 * operation is done above.
12740 */
12741 if ((ret == NDI_SUCCESS) && (*childp != NULL) &&
12742 (strcmp(ddi_node_name(*childp), "cdrom") == 0) &&
12743 (strncmp((char *)arg, "disk", 4) == 0)) {
12744 bconfig = FALSE;
12745 ndi_hold_devi(*childp);
12746 }
12747 } else {
12748 ret = NDI_FAILURE;
12749 break;
12750 }
12751
12752 /*
12753 * DDI group instructed us to use this flag.
12754 */
12755 mflags |= NDI_MDI_FALLBACK;
12756 break;
12757 case BUS_CONFIG_DRIVER:
12758 case BUS_CONFIG_ALL:
12759 mptsas_config_all(pdip);
12760 ret = NDI_SUCCESS;
12761 break;
12762 }
12763
12764 if ((ret == NDI_SUCCESS) && bconfig) {
12765 ret = ndi_busop_bus_config(pdip, mflags, op,
12766 (devnm == NULL) ? arg : devnm, childp, 0);
12767 }
12768
12769 ndi_devi_exit(pdip, circ1);
12770 ndi_devi_exit(scsi_vhci_dip, circ);
12771 if (devnm != NULL)
12772 kmem_free(devnm, SCSI_MAXNAMELEN);
12773 return (ret);
12774 }
12775
12776 static int
12777 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
12778 mptsas_target_t *ptgt)
12779 {
12780 int rval = DDI_FAILURE;
12781 struct scsi_inquiry *sd_inq = NULL;
12782 mptsas_t *mpt = DIP2MPT(pdip);
12783
12784 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
12785
12786 rval = mptsas_inquiry(mpt, ptgt, lun, 0, (uchar_t *)sd_inq,
12787 SUN_INQSIZE, 0, (uchar_t)0);
12788
12789 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
12790 rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
12791 } else {
12792 rval = DDI_FAILURE;
12793 }
12794
12795 kmem_free(sd_inq, SUN_INQSIZE);
12796 return (rval);
12797 }
12798
12799 static int
12800 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
12801 dev_info_t **lundip)
12802 {
12803 int rval;
12804 mptsas_t *mpt = DIP2MPT(pdip);
12805 int phymask;
12806 mptsas_target_t *ptgt = NULL;
12807
12808 /*
12809 * Get the physical port associated to the iport
12810 */
12811 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
12812 "phymask", 0);
12813
12814 ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
12815 if (ptgt == NULL) {
12816 /*
12817 * didn't match any device by searching
12818 */
12819 return (DDI_FAILURE);
12820 }
12821 /*
12822 * If the LUN already exists and the status is online,
12823 * we just return the pointer to dev_info_t directly.
12824 * For the mdi_pathinfo node, we'll handle it in
12825 * mptsas_create_virt_lun()
12826 * TODO should be also in mptsas_handle_dr
12827 */
12828
12829 *lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
12830 if (*lundip != NULL) {
12831 /*
12832 * TODO Another senario is, we hotplug the same disk
12833 * on the same slot, the devhdl changed, is this
12834 * possible?
12835 * tgt_private->t_private != ptgt
12836 */
12837 if (sasaddr != ptgt->m_sas_wwn) {
12838 /*
12839 * The device has changed although the devhdl is the
12840 * same (Enclosure mapping mode, change drive on the
12841 * same slot)
12842 */
12843 return (DDI_FAILURE);
12844 }
12845 return (DDI_SUCCESS);
12846 }
12847
12848 if (phymask == 0) {
12849 /*
12850 * Configure IR volume
12851 */
12852 rval = mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
12853 return (rval);
12854 }
12855 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
12856
12857 return (rval);
12858 }
12859
12860 static int
12861 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
12862 dev_info_t **lundip)
12863 {
12864 int rval;
12865 mptsas_t *mpt = DIP2MPT(pdip);
12866 int phymask;
12867 mptsas_target_t *ptgt = NULL;
12868
12869 /*
12870 * Get the physical port associated to the iport
12871 */
12872 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
12873 "phymask", 0);
12874
12875 ptgt = mptsas_phy_to_tgt(mpt, phymask, phy);
12876 if (ptgt == NULL) {
12877 /*
12878 * didn't match any device by searching
12879 */
12880 return (DDI_FAILURE);
12881 }
12882
12883 /*
12884 * If the LUN already exists and the status is online,
12885 * we just return the pointer to dev_info_t directly.
12886 * For the mdi_pathinfo node, we'll handle it in
12887 * mptsas_create_virt_lun().
12888 */
12889
12890 *lundip = mptsas_find_child_phy(pdip, phy);
12891 if (*lundip != NULL) {
12892 return (DDI_SUCCESS);
12893 }
12894
12895 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
12896
12897 return (rval);
12898 }
12899
12900 static int
12901 mptsas_retrieve_lundata(int lun_cnt, uint8_t *buf, uint16_t *lun_num,
12902 uint8_t *lun_addr_type)
12903 {
12904 uint32_t lun_idx = 0;
12905
12906 ASSERT(lun_num != NULL);
12907 ASSERT(lun_addr_type != NULL);
12908
12909 lun_idx = (lun_cnt + 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
12910 /* determine report luns addressing type */
12911 switch (buf[lun_idx] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) {
12912 /*
12913 * Vendors in the field have been found to be concatenating
12914 * bus/target/lun to equal the complete lun value instead
12915 * of switching to flat space addressing
12916 */
12917 /* 00b - peripheral device addressing method */
12918 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL:
12919 /* FALLTHRU */
12920 /* 10b - logical unit addressing method */
12921 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT:
12922 /* FALLTHRU */
12923 /* 01b - flat space addressing method */
12924 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE:
12925 /* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
12926 *lun_addr_type = (buf[lun_idx] &
12927 MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) >> 6;
12928 *lun_num = (buf[lun_idx] & 0x3F) << 8;
12929 *lun_num |= buf[lun_idx + 1];
12930 return (DDI_SUCCESS);
12931 default:
12932 return (DDI_FAILURE);
12933 }
12934 }
12935
12936 static int
12937 mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt)
12938 {
12939 struct buf *repluns_bp = NULL;
12940 struct scsi_address ap;
12941 uchar_t cdb[CDB_GROUP5];
12942 int ret = DDI_FAILURE;
12943 int retry = 0;
12944 int lun_list_len = 0;
12945 uint16_t lun_num = 0;
12946 uint8_t lun_addr_type = 0;
12947 uint32_t lun_cnt = 0;
12948 uint32_t lun_total = 0;
12949 dev_info_t *cdip = NULL;
12950 uint16_t *saved_repluns = NULL;
12951 char *buffer = NULL;
12952 int buf_len = 128;
12953 mptsas_t *mpt = DIP2MPT(pdip);
12954 uint64_t sas_wwn = 0;
12955 uint8_t phy = 0xFF;
12956 uint32_t dev_info = 0;
12957
12958 mutex_enter(&mpt->m_mutex);
12959 sas_wwn = ptgt->m_sas_wwn;
12960 phy = ptgt->m_phynum;
12961 dev_info = ptgt->m_deviceinfo;
12962 mutex_exit(&mpt->m_mutex);
12963
12964 if (sas_wwn == 0) {
12965 /*
12966 * It's a SATA without Device Name
12967 * So don't try multi-LUNs
12968 */
12969 if (mptsas_find_child_phy(pdip, phy)) {
12970 return (DDI_SUCCESS);
12971 } else {
12972 /*
12973 * need configure and create node
12974 */
12975 return (DDI_FAILURE);
12976 }
12977 }
12978
12979 /*
12980 * WWN (SAS address or Device Name exist)
12981 */
12982 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
12983 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
12984 /*
12985 * SATA device with Device Name
12986 * So don't try multi-LUNs
12987 */
12988 if (mptsas_find_child_addr(pdip, sas_wwn, 0)) {
12989 return (DDI_SUCCESS);
12990 } else {
12991 return (DDI_FAILURE);
12992 }
12993 }
12994
12995 do {
12996 ap.a_target = MPTSAS_INVALID_DEVHDL;
12997 ap.a_lun = 0;
12998 ap.a_hba_tran = mpt->m_tran;
12999 repluns_bp = scsi_alloc_consistent_buf(&ap,
13000 (struct buf *)NULL, buf_len, B_READ, NULL_FUNC, NULL);
13001 if (repluns_bp == NULL) {
13002 retry++;
13003 continue;
13004 }
13005 bzero(cdb, CDB_GROUP5);
13006 cdb[0] = SCMD_REPORT_LUNS;
13007 cdb[6] = (buf_len & 0xff000000) >> 24;
13008 cdb[7] = (buf_len & 0x00ff0000) >> 16;
13009 cdb[8] = (buf_len & 0x0000ff00) >> 8;
13010 cdb[9] = (buf_len & 0x000000ff);
13011
13012 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP5,
13013 repluns_bp, NULL);
13014 if (ret != DDI_SUCCESS) {
13015 scsi_free_consistent_buf(repluns_bp);
13016 retry++;
13017 continue;
13018 }
13019 lun_list_len = BE_32(*(int *)((void *)(
13020 repluns_bp->b_un.b_addr)));
13021 if (buf_len >= lun_list_len + 8) {
13022 ret = DDI_SUCCESS;
13023 break;
13024 }
13025 scsi_free_consistent_buf(repluns_bp);
13026 buf_len = lun_list_len + 8;
13027
13028 } while (retry < 3);
13029
13030 if (ret != DDI_SUCCESS)
13031 return (ret);
13032 buffer = (char *)repluns_bp->b_un.b_addr;
13033 /*
13034 * find out the number of luns returned by the SCSI ReportLun call
13035 * and allocate buffer space
13036 */
13037 lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
13038 saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
13039 if (saved_repluns == NULL) {
13040 scsi_free_consistent_buf(repluns_bp);
13041 return (DDI_FAILURE);
13042 }
13043 for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
13044 if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
13045 &lun_num, &lun_addr_type) != DDI_SUCCESS) {
13046 continue;
13047 }
13048 saved_repluns[lun_cnt] = lun_num;
13049 if (cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num))
13050 ret = DDI_SUCCESS;
13051 else
13052 ret = mptsas_probe_lun(pdip, lun_num, &cdip,
13053 ptgt);
13054 if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
13055 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
13056 MPTSAS_DEV_GONE);
13057 }
13058 }
13059 mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
13060 kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
13061 scsi_free_consistent_buf(repluns_bp);
13062 return (DDI_SUCCESS);
13063 }
13064
13065 static int
13066 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
13067 {
13068 int rval = DDI_FAILURE;
13069 struct scsi_inquiry *sd_inq = NULL;
13070 mptsas_t *mpt = DIP2MPT(pdip);
13071 mptsas_target_t *ptgt = NULL;
13072
13073 mutex_enter(&mpt->m_mutex);
13074 ptgt = mptsas_search_by_devhdl(&mpt->m_active->m_tgttbl, target);
13075 mutex_exit(&mpt->m_mutex);
13076 if (ptgt == NULL) {
13077 mptsas_log(mpt, CE_WARN, "Volume with VolDevHandle of 0x%x "
13078 "not found.", target);
13079 return (rval);
13080 }
13081
13082 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
13083 rval = mptsas_inquiry(mpt, ptgt, 0, 0, (uchar_t *)sd_inq,
13084 SUN_INQSIZE, 0, (uchar_t)0);
13085
13086 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
13087 rval = mptsas_create_phys_lun(pdip, sd_inq, NULL, dip, ptgt,
13088 0);
13089 } else {
13090 rval = DDI_FAILURE;
13091 }
13092
13093 kmem_free(sd_inq, SUN_INQSIZE);
13094 return (rval);
13095 }
13096
13097 /*
13098 * configure all RAID volumes for virtual iport
13099 */
13100 static void
13101 mptsas_config_all_viport(dev_info_t *pdip)
13102 {
13103 mptsas_t *mpt = DIP2MPT(pdip);
13104 int config, vol;
13105 int target;
13106 dev_info_t *lundip = NULL;
13107 mptsas_slots_t *slots = mpt->m_active;
13108
13109 /*
13110 * Get latest RAID info and search for any Volume DevHandles. If any
13111 * are found, configure the volume.
13112 */
13113 mutex_enter(&mpt->m_mutex);
13114 for (config = 0; config < slots->m_num_raid_configs; config++) {
13115 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
13116 if (slots->m_raidconfig[config].m_raidvol[vol].m_israid
13117 == 1) {
13118 target = slots->m_raidconfig[config].
13119 m_raidvol[vol].m_raidhandle;
13120 mutex_exit(&mpt->m_mutex);
13121 (void) mptsas_config_raid(pdip, target,
13122 &lundip);
13123 mutex_enter(&mpt->m_mutex);
13124 }
13125 }
13126 }
13127 mutex_exit(&mpt->m_mutex);
13128 }
13129
13130 static void
13131 mptsas_offline_missed_luns(dev_info_t *pdip, uint16_t *repluns,
13132 int lun_cnt, mptsas_target_t *ptgt)
13133 {
13134 dev_info_t *child = NULL, *savechild = NULL;
13135 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
13136 uint64_t sas_wwn, wwid;
13137 uint8_t phy;
13138 int lun;
13139 int i;
13140 int find;
13141 char *addr;
13142 char *nodename;
13143 mptsas_t *mpt = DIP2MPT(pdip);
13144
13145 mutex_enter(&mpt->m_mutex);
13146 wwid = ptgt->m_sas_wwn;
13147 mutex_exit(&mpt->m_mutex);
13148
13149 child = ddi_get_child(pdip);
13150 while (child) {
13151 find = 0;
13152 savechild = child;
13153 child = ddi_get_next_sibling(child);
13154
13155 nodename = ddi_node_name(savechild);
13156 if (strcmp(nodename, "smp") == 0) {
13157 continue;
13158 }
13159
13160 addr = ddi_get_name_addr(savechild);
13161 if (addr == NULL) {
13162 continue;
13163 }
13164
13165 if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
13166 DDI_SUCCESS) {
13167 continue;
13168 }
13169
13170 if (wwid == sas_wwn) {
13171 for (i = 0; i < lun_cnt; i++) {
13172 if (repluns[i] == lun) {
13173 find = 1;
13174 break;
13175 }
13176 }
13177 } else {
13178 continue;
13179 }
13180 if (find == 0) {
13181 /*
13182 * The lun has not been there already
13183 */
13184 (void) mptsas_offline_lun(pdip, savechild, NULL,
13185 NDI_DEVI_REMOVE);
13186 }
13187 }
13188
13189 pip = mdi_get_next_client_path(pdip, NULL);
13190 while (pip) {
13191 find = 0;
13192 savepip = pip;
13193 addr = MDI_PI(pip)->pi_addr;
13194
13195 pip = mdi_get_next_client_path(pdip, pip);
13196
13197 if (addr == NULL) {
13198 continue;
13199 }
13200
13201 if (mptsas_parse_address(addr, &sas_wwn, &phy,
13202 &lun) != DDI_SUCCESS) {
13203 continue;
13204 }
13205
13206 if (sas_wwn == wwid) {
13207 for (i = 0; i < lun_cnt; i++) {
13208 if (repluns[i] == lun) {
13209 find = 1;
13210 break;
13211 }
13212 }
13213 } else {
13214 continue;
13215 }
13216
13217 if (find == 0) {
13218 /*
13219 * The lun has not been there already
13220 */
13221 (void) mptsas_offline_lun(pdip, NULL, savepip,
13222 NDI_DEVI_REMOVE);
13223 }
13224 }
13225 }
13226
13227 void
13228 mptsas_update_hashtab(struct mptsas *mpt)
13229 {
13230 uint32_t page_address;
13231 int rval = 0;
13232 uint16_t dev_handle;
13233 mptsas_target_t *ptgt = NULL;
13234 mptsas_smp_t smp_node;
13235
13236 /*
13237 * Get latest RAID info.
13238 */
13239 (void) mptsas_get_raid_info(mpt);
13240
13241 dev_handle = mpt->m_smp_devhdl;
13242 for (; mpt->m_done_traverse_smp == 0; ) {
13243 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
13244 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)dev_handle;
13245 if (mptsas_get_sas_expander_page0(mpt, page_address, &smp_node)
13246 != DDI_SUCCESS) {
13247 break;
13248 }
13249 mpt->m_smp_devhdl = dev_handle = smp_node.m_devhdl;
13250 (void) mptsas_smp_alloc(&mpt->m_active->m_smptbl, &smp_node);
13251 }
13252
13253 /*
13254 * Config target devices
13255 */
13256 dev_handle = mpt->m_dev_handle;
13257
13258 /*
13259 * Do loop to get sas device page 0 by GetNextHandle till the
13260 * the last handle. If the sas device is a SATA/SSP target,
13261 * we try to config it.
13262 */
13263 for (; mpt->m_done_traverse_dev == 0; ) {
13264 ptgt = NULL;
13265 page_address =
13266 (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
13267 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
13268 (uint32_t)dev_handle;
13269 rval = mptsas_get_target_device_info(mpt, page_address,
13270 &dev_handle, &ptgt);
13271 if ((rval == DEV_INFO_FAIL_PAGE0) ||
13272 (rval == DEV_INFO_FAIL_ALLOC)) {
13273 break;
13274 }
13275
13276 mpt->m_dev_handle = dev_handle;
13277 }
13278
13279 }
13280
13281 void
13282 mptsas_invalid_hashtab(mptsas_hash_table_t *hashtab)
13283 {
13284 mptsas_hash_data_t *data;
13285 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST);
13286 while (data != NULL) {
13287 data->devhdl = MPTSAS_INVALID_DEVHDL;
13288 data->device_info = 0;
13289 /*
13290 * For tgttbl, clear dr_flag.
13291 */
13292 data->dr_flag = MPTSAS_DR_INACTIVE;
13293 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT);
13294 }
13295 }
13296
13297 void
13298 mptsas_update_driver_data(struct mptsas *mpt)
13299 {
13300 /*
13301 * TODO after hard reset, update the driver data structures
13302 * 1. update port/phymask mapping table mpt->m_phy_info
13303 * 2. invalid all the entries in hash table
13304 * m_devhdl = 0xffff and m_deviceinfo = 0
13305 * 3. call sas_device_page/expander_page to update hash table
13306 */
13307 mptsas_update_phymask(mpt);
13308 /*
13309 * Invalid the existing entries
13310 */
13311 mptsas_invalid_hashtab(&mpt->m_active->m_tgttbl);
13312 mptsas_invalid_hashtab(&mpt->m_active->m_smptbl);
13313 mpt->m_done_traverse_dev = 0;
13314 mpt->m_done_traverse_smp = 0;
13315 mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
13316 mptsas_update_hashtab(mpt);
13317 }
13318
13319 static void
13320 mptsas_config_all(dev_info_t *pdip)
13321 {
13322 dev_info_t *smpdip = NULL;
13323 mptsas_t *mpt = DIP2MPT(pdip);
13324 int phymask = 0;
13325 mptsas_phymask_t phy_mask;
13326 mptsas_target_t *ptgt = NULL;
13327 mptsas_smp_t *psmp;
13328
13329 /*
13330 * Get the phymask associated to the iport
13331 */
13332 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13333 "phymask", 0);
13334
13335 /*
13336 * Enumerate RAID volumes here (phymask == 0).
13337 */
13338 if (phymask == 0) {
13339 mptsas_config_all_viport(pdip);
13340 return;
13341 }
13342
13343 mutex_enter(&mpt->m_mutex);
13344
13345 if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp) {
13346 mptsas_update_hashtab(mpt);
13347 }
13348
13349 psmp = (mptsas_smp_t *)mptsas_hash_traverse(&mpt->m_active->m_smptbl,
13350 MPTSAS_HASH_FIRST);
13351 while (psmp != NULL) {
13352 phy_mask = psmp->m_phymask;
13353 if (phy_mask == phymask) {
13354 smpdip = NULL;
13355 mutex_exit(&mpt->m_mutex);
13356 (void) mptsas_online_smp(pdip, psmp, &smpdip);
13357 mutex_enter(&mpt->m_mutex);
13358 }
13359 psmp = (mptsas_smp_t *)mptsas_hash_traverse(
13360 &mpt->m_active->m_smptbl, MPTSAS_HASH_NEXT);
13361 }
13362
13363 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
13364 MPTSAS_HASH_FIRST);
13365 while (ptgt != NULL) {
13366 phy_mask = ptgt->m_phymask;
13367 if (phy_mask == phymask) {
13368 mutex_exit(&mpt->m_mutex);
13369 (void) mptsas_config_target(pdip, ptgt);
13370 mutex_enter(&mpt->m_mutex);
13371 }
13372
13373 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
13374 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
13375 }
13376 mutex_exit(&mpt->m_mutex);
13377 }
13378
13379 static int
13380 mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt)
13381 {
13382 int rval = DDI_FAILURE;
13383 dev_info_t *tdip;
13384
13385 rval = mptsas_config_luns(pdip, ptgt);
13386 if (rval != DDI_SUCCESS) {
13387 /*
13388 * The return value means the SCMD_REPORT_LUNS
13389 * did not execute successfully. The target maybe
13390 * doesn't support such command.
13391 */
13392 rval = mptsas_probe_lun(pdip, 0, &tdip, ptgt);
13393 }
13394 return (rval);
13395 }
13396
13397 /*
13398 * Return fail if not all the childs/paths are freed.
13399 * if there is any path under the HBA, the return value will be always fail
13400 * because we didn't call mdi_pi_free for path
13401 */
13402 static int
13403 mptsas_offline_target(dev_info_t *pdip, char *name)
13404 {
13405 dev_info_t *child = NULL, *prechild = NULL;
13406 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
13407 int tmp_rval, rval = DDI_SUCCESS;
13408 char *addr, *cp;
13409 size_t s;
13410 mptsas_t *mpt = DIP2MPT(pdip);
13411
13412 child = ddi_get_child(pdip);
13413 while (child) {
13414 addr = ddi_get_name_addr(child);
13415 prechild = child;
13416 child = ddi_get_next_sibling(child);
13417
13418 if (addr == NULL) {
13419 continue;
13420 }
13421 if ((cp = strchr(addr, ',')) == NULL) {
13422 continue;
13423 }
13424
13425 s = (uintptr_t)cp - (uintptr_t)addr;
13426
13427 if (strncmp(addr, name, s) != 0) {
13428 continue;
13429 }
13430
13431 tmp_rval = mptsas_offline_lun(pdip, prechild, NULL,
13432 NDI_DEVI_REMOVE);
13433 if (tmp_rval != DDI_SUCCESS) {
13434 rval = DDI_FAILURE;
13435 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
13436 prechild, MPTSAS_DEV_GONE) !=
13437 DDI_PROP_SUCCESS) {
13438 mptsas_log(mpt, CE_WARN, "mptsas driver "
13439 "unable to create property for "
13440 "SAS %s (MPTSAS_DEV_GONE)", addr);
13441 }
13442 }
13443 }
13444
13445 pip = mdi_get_next_client_path(pdip, NULL);
13446 while (pip) {
13447 addr = MDI_PI(pip)->pi_addr;
13448 savepip = pip;
13449 pip = mdi_get_next_client_path(pdip, pip);
13450 if (addr == NULL) {
13451 continue;
13452 }
13453
13454 if ((cp = strchr(addr, ',')) == NULL) {
13455 continue;
13456 }
13457
13458 s = (uintptr_t)cp - (uintptr_t)addr;
13459
13460 if (strncmp(addr, name, s) != 0) {
13461 continue;
13462 }
13463
13464 (void) mptsas_offline_lun(pdip, NULL, savepip,
13465 NDI_DEVI_REMOVE);
13466 /*
13467 * driver will not invoke mdi_pi_free, so path will not
13468 * be freed forever, return DDI_FAILURE.
13469 */
13470 rval = DDI_FAILURE;
13471 }
13472 return (rval);
13473 }
13474
13475 static int
13476 mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
13477 mdi_pathinfo_t *rpip, uint_t flags)
13478 {
13479 int rval = DDI_FAILURE;
13480 char *devname;
13481 dev_info_t *cdip, *parent;
13482
13483 if (rpip != NULL) {
13484 parent = scsi_vhci_dip;
13485 cdip = mdi_pi_get_client(rpip);
13486 } else if (rdip != NULL) {
13487 parent = pdip;
13488 cdip = rdip;
13489 } else {
13490 return (DDI_FAILURE);
13491 }
13492
13493 /*
13494 * Make sure node is attached otherwise
13495 * it won't have related cache nodes to
13496 * clean up. i_ddi_devi_attached is
13497 * similiar to i_ddi_node_state(cdip) >=
13498 * DS_ATTACHED.
13499 */
13500 if (i_ddi_devi_attached(cdip)) {
13501
13502 /* Get full devname */
13503 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
13504 (void) ddi_deviname(cdip, devname);
13505 /* Clean cache */
13506 (void) devfs_clean(parent, devname + 1,
13507 DV_CLEAN_FORCE);
13508 kmem_free(devname, MAXNAMELEN + 1);
13509 }
13510 if (rpip != NULL) {
13511 if (MDI_PI_IS_OFFLINE(rpip)) {
13512 rval = DDI_SUCCESS;
13513 } else {
13514 rval = mdi_pi_offline(rpip, 0);
13515 }
13516 } else {
13517 rval = ndi_devi_offline(cdip, flags);
13518 }
13519
13520 return (rval);
13521 }
13522
13523 static dev_info_t *
13524 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
13525 {
13526 dev_info_t *child = NULL;
13527 char *smp_wwn = NULL;
13528
13529 child = ddi_get_child(parent);
13530 while (child) {
13531 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
13532 DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
13533 != DDI_SUCCESS) {
13534 child = ddi_get_next_sibling(child);
13535 continue;
13536 }
13537
13538 if (strcmp(smp_wwn, str_wwn) == 0) {
13539 ddi_prop_free(smp_wwn);
13540 break;
13541 }
13542 child = ddi_get_next_sibling(child);
13543 ddi_prop_free(smp_wwn);
13544 }
13545 return (child);
13546 }
13547
13548 static int
13549 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, uint_t flags)
13550 {
13551 int rval = DDI_FAILURE;
13552 char *devname;
13553 char wwn_str[MPTSAS_WWN_STRLEN];
13554 dev_info_t *cdip;
13555
13556 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_sasaddr);
13557
13558 cdip = mptsas_find_smp_child(pdip, wwn_str);
13559
13560 if (cdip == NULL)
13561 return (DDI_SUCCESS);
13562
13563 /*
13564 * Make sure node is attached otherwise
13565 * it won't have related cache nodes to
13566 * clean up. i_ddi_devi_attached is
13567 * similiar to i_ddi_node_state(cdip) >=
13568 * DS_ATTACHED.
13569 */
13570 if (i_ddi_devi_attached(cdip)) {
13571
13572 /* Get full devname */
13573 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
13574 (void) ddi_deviname(cdip, devname);
13575 /* Clean cache */
13576 (void) devfs_clean(pdip, devname + 1,
13577 DV_CLEAN_FORCE);
13578 kmem_free(devname, MAXNAMELEN + 1);
13579 }
13580
13581 rval = ndi_devi_offline(cdip, flags);
13582
13583 return (rval);
13584 }
13585
13586 static dev_info_t *
13587 mptsas_find_child(dev_info_t *pdip, char *name)
13588 {
13589 dev_info_t *child = NULL;
13590 char *rname = NULL;
13591 int rval = DDI_FAILURE;
13592
13593 rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13594
13595 child = ddi_get_child(pdip);
13596 while (child) {
13597 rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
13598 if (rval != DDI_SUCCESS) {
13599 child = ddi_get_next_sibling(child);
13600 bzero(rname, SCSI_MAXNAMELEN);
13601 continue;
13602 }
13603
13604 if (strcmp(rname, name) == 0) {
13605 break;
13606 }
13607 child = ddi_get_next_sibling(child);
13608 bzero(rname, SCSI_MAXNAMELEN);
13609 }
13610
13611 kmem_free(rname, SCSI_MAXNAMELEN);
13612
13613 return (child);
13614 }
13615
13616
13617 static dev_info_t *
13618 mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, int lun)
13619 {
13620 dev_info_t *child = NULL;
13621 char *name = NULL;
13622 char *addr = NULL;
13623
13624 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13625 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13626 (void) sprintf(name, "%016"PRIx64, sasaddr);
13627 (void) sprintf(addr, "w%s,%x", name, lun);
13628 child = mptsas_find_child(pdip, addr);
13629 kmem_free(name, SCSI_MAXNAMELEN);
13630 kmem_free(addr, SCSI_MAXNAMELEN);
13631 return (child);
13632 }
13633
13634 static dev_info_t *
13635 mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy)
13636 {
13637 dev_info_t *child;
13638 char *addr;
13639
13640 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13641 (void) sprintf(addr, "p%x,0", phy);
13642 child = mptsas_find_child(pdip, addr);
13643 kmem_free(addr, SCSI_MAXNAMELEN);
13644 return (child);
13645 }
13646
13647 static mdi_pathinfo_t *
13648 mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy)
13649 {
13650 mdi_pathinfo_t *path;
13651 char *addr = NULL;
13652
13653 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13654 (void) sprintf(addr, "p%x,0", phy);
13655 path = mdi_pi_find(pdip, NULL, addr);
13656 kmem_free(addr, SCSI_MAXNAMELEN);
13657 return (path);
13658 }
13659
13660 static mdi_pathinfo_t *
13661 mptsas_find_path_addr(dev_info_t *parent, uint64_t sasaddr, int lun)
13662 {
13663 mdi_pathinfo_t *path;
13664 char *name = NULL;
13665 char *addr = NULL;
13666
13667 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13668 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13669 (void) sprintf(name, "%016"PRIx64, sasaddr);
13670 (void) sprintf(addr, "w%s,%x", name, lun);
13671 path = mdi_pi_find(parent, NULL, addr);
13672 kmem_free(name, SCSI_MAXNAMELEN);
13673 kmem_free(addr, SCSI_MAXNAMELEN);
13674
13675 return (path);
13676 }
13677
13678 static int
13679 mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
13680 dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
13681 {
13682 int i = 0;
13683 uchar_t *inq83 = NULL;
13684 int inq83_len1 = 0xFF;
13685 int inq83_len = 0;
13686 int rval = DDI_FAILURE;
13687 ddi_devid_t devid;
13688 char *guid = NULL;
13689 int target = ptgt->m_devhdl;
13690 mdi_pathinfo_t *pip = NULL;
13691 mptsas_t *mpt = DIP2MPT(pdip);
13692
13693 /*
13694 * For DVD/CD ROM and tape devices and optical
13695 * devices, we won't try to enumerate them under
13696 * scsi_vhci, so no need to try page83
13697 */
13698 if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
13699 sd_inq->inq_dtype == DTYPE_OPTICAL ||
13700 sd_inq->inq_dtype == DTYPE_ESI))
13701 goto create_lun;
13702
13703 /*
13704 * The LCA returns good SCSI status, but corrupt page 83 data the first
13705 * time it is queried. The solution is to keep trying to request page83
13706 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
13707 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
13708 * give up to get VPD page at this stage and fail the enumeration.
13709 */
13710
13711 inq83 = kmem_zalloc(inq83_len1, KM_SLEEP);
13712
13713 for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
13714 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
13715 inq83_len1, &inq83_len, 1);
13716 if (rval != 0) {
13717 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
13718 "0x83 for target:%x, lun:%x failed!", target, lun);
13719 if (mptsas_physical_bind_failed_page_83 != B_FALSE)
13720 goto create_lun;
13721 goto out;
13722 }
13723 /*
13724 * create DEVID from inquiry data
13725 */
13726 if ((rval = ddi_devid_scsi_encode(
13727 DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
13728 sizeof (struct scsi_inquiry), NULL, 0, inq83,
13729 (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
13730 /*
13731 * extract GUID from DEVID
13732 */
13733 guid = ddi_devid_to_guid(devid);
13734
13735 /*
13736 * Do not enable MPXIO if the strlen(guid) is greater
13737 * than MPTSAS_MAX_GUID_LEN, this constrain would be
13738 * handled by framework later.
13739 */
13740 if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
13741 ddi_devid_free_guid(guid);
13742 guid = NULL;
13743 if (mpt->m_mpxio_enable == TRUE) {
13744 mptsas_log(mpt, CE_NOTE, "!Target:%x, "
13745 "lun:%x doesn't have a valid GUID, "
13746 "multipathing for this drive is "
13747 "not enabled", target, lun);
13748 }
13749 }
13750
13751 /*
13752 * devid no longer needed
13753 */
13754 ddi_devid_free(devid);
13755 break;
13756 } else if (rval == DDI_NOT_WELL_FORMED) {
13757 /*
13758 * return value of ddi_devid_scsi_encode equal to
13759 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
13760 * to retry inquiry page 0x83 and get GUID.
13761 */
13762 NDBG20(("Not well formed devid, retry..."));
13763 delay(1 * drv_usectohz(1000000));
13764 continue;
13765 } else {
13766 mptsas_log(mpt, CE_WARN, "!Encode devid failed for "
13767 "path target:%x, lun:%x", target, lun);
13768 rval = DDI_FAILURE;
13769 goto create_lun;
13770 }
13771 }
13772
13773 if (i == mptsas_inq83_retry_timeout) {
13774 mptsas_log(mpt, CE_WARN, "!Repeated page83 requests timeout "
13775 "for path target:%x, lun:%x", target, lun);
13776 }
13777
13778 rval = DDI_FAILURE;
13779
13780 create_lun:
13781 if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
13782 rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
13783 ptgt, lun);
13784 }
13785 if (rval != DDI_SUCCESS) {
13786 rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
13787 ptgt, lun);
13788
13789 }
13790 out:
13791 if (guid != NULL) {
13792 /*
13793 * guid no longer needed
13794 */
13795 ddi_devid_free_guid(guid);
13796 }
13797 if (inq83 != NULL)
13798 kmem_free(inq83, inq83_len1);
13799 return (rval);
13800 }
13801
13802 static int
13803 mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid,
13804 dev_info_t **lun_dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, int lun)
13805 {
13806 int target;
13807 char *nodename = NULL;
13808 char **compatible = NULL;
13809 int ncompatible = 0;
13810 int mdi_rtn = MDI_FAILURE;
13811 int rval = DDI_FAILURE;
13812 char *old_guid = NULL;
13813 mptsas_t *mpt = DIP2MPT(pdip);
13814 char *lun_addr = NULL;
13815 char *wwn_str = NULL;
13816 char *attached_wwn_str = NULL;
13817 char *component = NULL;
13818 uint8_t phy = 0xFF;
13819 uint64_t sas_wwn;
13820 int64_t lun64 = 0;
13821 uint32_t devinfo;
13822 uint16_t dev_hdl;
13823 uint16_t pdev_hdl;
13824 uint64_t dev_sas_wwn;
13825 uint64_t pdev_sas_wwn;
13826 uint32_t pdev_info;
13827 uint8_t physport;
13828 uint8_t phy_id;
13829 uint32_t page_address;
13830 uint16_t bay_num, enclosure;
13831 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
13832 uint32_t dev_info;
13833
13834 mutex_enter(&mpt->m_mutex);
13835 target = ptgt->m_devhdl;
13836 sas_wwn = ptgt->m_sas_wwn;
13837 devinfo = ptgt->m_deviceinfo;
13838 phy = ptgt->m_phynum;
13839 mutex_exit(&mpt->m_mutex);
13840
13841 if (sas_wwn) {
13842 *pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
13843 } else {
13844 *pip = mptsas_find_path_phy(pdip, phy);
13845 }
13846
13847 if (*pip != NULL) {
13848 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
13849 ASSERT(*lun_dip != NULL);
13850 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
13851 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
13852 MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
13853 if (strncmp(guid, old_guid, strlen(guid)) == 0) {
13854 /*
13855 * Same path back online again.
13856 */
13857 (void) ddi_prop_free(old_guid);
13858 if ((!MDI_PI_IS_ONLINE(*pip)) &&
13859 (!MDI_PI_IS_STANDBY(*pip)) &&
13860 (ptgt->m_tgt_unconfigured == 0)) {
13861 rval = mdi_pi_online(*pip, 0);
13862 } else {
13863 rval = DDI_SUCCESS;
13864 }
13865 if (rval != DDI_SUCCESS) {
13866 mptsas_log(mpt, CE_WARN, "path:target: "
13867 "%x, lun:%x online failed!", target,
13868 lun);
13869 *pip = NULL;
13870 *lun_dip = NULL;
13871 }
13872 return (rval);
13873 } else {
13874 /*
13875 * The GUID of the LUN has changed which maybe
13876 * because customer mapped another volume to the
13877 * same LUN.
13878 */
13879 mptsas_log(mpt, CE_WARN, "The GUID of the "
13880 "target:%x, lun:%x was changed, maybe "
13881 "because someone mapped another volume "
13882 "to the same LUN", target, lun);
13883 (void) ddi_prop_free(old_guid);
13884 if (!MDI_PI_IS_OFFLINE(*pip)) {
13885 rval = mdi_pi_offline(*pip, 0);
13886 if (rval != MDI_SUCCESS) {
13887 mptsas_log(mpt, CE_WARN, "path:"
13888 "target:%x, lun:%x offline "
13889 "failed!", target, lun);
13890 *pip = NULL;
13891 *lun_dip = NULL;
13892 return (DDI_FAILURE);
13893 }
13894 }
13895 if (mdi_pi_free(*pip, 0) != MDI_SUCCESS) {
13896 mptsas_log(mpt, CE_WARN, "path:target:"
13897 "%x, lun:%x free failed!", target,
13898 lun);
13899 *pip = NULL;
13900 *lun_dip = NULL;
13901 return (DDI_FAILURE);
13902 }
13903 }
13904 } else {
13905 mptsas_log(mpt, CE_WARN, "Can't get client-guid "
13906 "property for path:target:%x, lun:%x", target, lun);
13907 *pip = NULL;
13908 *lun_dip = NULL;
13909 return (DDI_FAILURE);
13910 }
13911 }
13912 scsi_hba_nodename_compatible_get(inq, NULL,
13913 inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
13914
13915 /*
13916 * if nodename can't be determined then print a message and skip it
13917 */
13918 if (nodename == NULL) {
13919 mptsas_log(mpt, CE_WARN, "mptsas driver found no compatible "
13920 "driver for target%d lun %d dtype:0x%02x", target, lun,
13921 inq->inq_dtype);
13922 return (DDI_FAILURE);
13923 }
13924
13925 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
13926 /* The property is needed by MPAPI */
13927 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
13928
13929 lun_addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13930 if (guid) {
13931 (void) sprintf(lun_addr, "w%s,%x", wwn_str, lun);
13932 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
13933 } else {
13934 (void) sprintf(lun_addr, "p%x,%x", phy, lun);
13935 (void) sprintf(wwn_str, "p%x", phy);
13936 }
13937
13938 mdi_rtn = mdi_pi_alloc_compatible(pdip, nodename,
13939 guid, lun_addr, compatible, ncompatible,
13940 0, pip);
13941 if (mdi_rtn == MDI_SUCCESS) {
13942
13943 if (mdi_prop_update_string(*pip, MDI_GUID,
13944 guid) != DDI_SUCCESS) {
13945 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13946 "create prop for target %d lun %d (MDI_GUID)",
13947 target, lun);
13948 mdi_rtn = MDI_FAILURE;
13949 goto virt_create_done;
13950 }
13951
13952 if (mdi_prop_update_int(*pip, LUN_PROP,
13953 lun) != DDI_SUCCESS) {
13954 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13955 "create prop for target %d lun %d (LUN_PROP)",
13956 target, lun);
13957 mdi_rtn = MDI_FAILURE;
13958 goto virt_create_done;
13959 }
13960 lun64 = (int64_t)lun;
13961 if (mdi_prop_update_int64(*pip, LUN64_PROP,
13962 lun64) != DDI_SUCCESS) {
13963 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13964 "create prop for target %d (LUN64_PROP)",
13965 target);
13966 mdi_rtn = MDI_FAILURE;
13967 goto virt_create_done;
13968 }
13969 if (mdi_prop_update_string_array(*pip, "compatible",
13970 compatible, ncompatible) !=
13971 DDI_PROP_SUCCESS) {
13972 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13973 "create prop for target %d lun %d (COMPATIBLE)",
13974 target, lun);
13975 mdi_rtn = MDI_FAILURE;
13976 goto virt_create_done;
13977 }
13978 if (sas_wwn && (mdi_prop_update_string(*pip,
13979 SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS)) {
13980 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13981 "create prop for target %d lun %d "
13982 "(target-port)", target, lun);
13983 mdi_rtn = MDI_FAILURE;
13984 goto virt_create_done;
13985 } else if ((sas_wwn == 0) && (mdi_prop_update_int(*pip,
13986 "sata-phy", phy) != DDI_PROP_SUCCESS)) {
13987 /*
13988 * Direct attached SATA device without DeviceName
13989 */
13990 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
13991 "create prop for SAS target %d lun %d "
13992 "(sata-phy)", target, lun);
13993 mdi_rtn = MDI_FAILURE;
13994 goto virt_create_done;
13995 }
13996 mutex_enter(&mpt->m_mutex);
13997
13998 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
13999 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14000 (uint32_t)ptgt->m_devhdl;
14001 rval = mptsas_get_sas_device_page0(mpt, page_address,
14002 &dev_hdl, &dev_sas_wwn, &dev_info, &physport,
14003 &phy_id, &pdev_hdl, &bay_num, &enclosure);
14004 if (rval != DDI_SUCCESS) {
14005 mutex_exit(&mpt->m_mutex);
14006 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14007 "parent device for handle %d", page_address);
14008 mdi_rtn = MDI_FAILURE;
14009 goto virt_create_done;
14010 }
14011
14012 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14013 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
14014 rval = mptsas_get_sas_device_page0(mpt, page_address,
14015 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
14016 &phy_id, &pdev_hdl, &bay_num, &enclosure);
14017 if (rval != DDI_SUCCESS) {
14018 mutex_exit(&mpt->m_mutex);
14019 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
14020 "device info for handle %d", page_address);
14021 mdi_rtn = MDI_FAILURE;
14022 goto virt_create_done;
14023 }
14024
14025 mutex_exit(&mpt->m_mutex);
14026
14027 /*
14028 * If this device direct attached to the controller
14029 * set the attached-port to the base wwid
14030 */
14031 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14032 != DEVINFO_DIRECT_ATTACHED) {
14033 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14034 pdev_sas_wwn);
14035 } else {
14036 /*
14037 * Update the iport's attached-port to guid
14038 */
14039 if (sas_wwn == 0) {
14040 (void) sprintf(wwn_str, "p%x", phy);
14041 } else {
14042 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14043 }
14044 if (ddi_prop_update_string(DDI_DEV_T_NONE,
14045 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14046 DDI_PROP_SUCCESS) {
14047 mptsas_log(mpt, CE_WARN,
14048 "mptsas unable to create "
14049 "property for iport target-port"
14050 " %s (sas_wwn)",
14051 wwn_str);
14052 mdi_rtn = MDI_FAILURE;
14053 goto virt_create_done;
14054 }
14055
14056 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14057 mpt->un.m_base_wwid);
14058 }
14059
14060 if (mdi_prop_update_string(*pip,
14061 SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
14062 DDI_PROP_SUCCESS) {
14063 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14064 "property for iport attached-port %s (sas_wwn)",
14065 attached_wwn_str);
14066 mdi_rtn = MDI_FAILURE;
14067 goto virt_create_done;
14068 }
14069
14070
14071 if (inq->inq_dtype == 0) {
14072 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
14073 /*
14074 * set obp path for pathinfo
14075 */
14076 (void) snprintf(component, MAXPATHLEN,
14077 "disk@%s", lun_addr);
14078
14079 if (mdi_pi_pathname_obp_set(*pip, component) !=
14080 DDI_SUCCESS) {
14081 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
14082 "unable to set obp-path for object %s",
14083 component);
14084 mdi_rtn = MDI_FAILURE;
14085 goto virt_create_done;
14086 }
14087 }
14088
14089 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
14090 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14091 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14092 if ((ndi_prop_update_int(DDI_DEV_T_NONE, *lun_dip,
14093 "pm-capable", 1)) !=
14094 DDI_PROP_SUCCESS) {
14095 mptsas_log(mpt, CE_WARN, "mptsas driver"
14096 "failed to create pm-capable "
14097 "property, target %d", target);
14098 mdi_rtn = MDI_FAILURE;
14099 goto virt_create_done;
14100 }
14101 }
14102 /*
14103 * Create the phy-num property
14104 */
14105 if (mdi_prop_update_int(*pip, "phy-num",
14106 ptgt->m_phynum) != DDI_SUCCESS) {
14107 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14108 "create phy-num property for target %d lun %d",
14109 target, lun);
14110 mdi_rtn = MDI_FAILURE;
14111 goto virt_create_done;
14112 }
14113 NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
14114 mdi_rtn = mdi_pi_online(*pip, 0);
14115 if (mdi_rtn == MDI_NOT_SUPPORTED) {
14116 mdi_rtn = MDI_FAILURE;
14117 }
14118 virt_create_done:
14119 if (*pip && mdi_rtn != MDI_SUCCESS) {
14120 (void) mdi_pi_free(*pip, 0);
14121 *pip = NULL;
14122 *lun_dip = NULL;
14123 }
14124 }
14125
14126 scsi_hba_nodename_compatible_free(nodename, compatible);
14127 if (lun_addr != NULL) {
14128 kmem_free(lun_addr, SCSI_MAXNAMELEN);
14129 }
14130 if (wwn_str != NULL) {
14131 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
14132 }
14133 if (component != NULL) {
14134 kmem_free(component, MAXPATHLEN);
14135 }
14136
14137 return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14138 }
14139
14140 static int
14141 mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq,
14142 char *guid, dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
14143 {
14144 int target;
14145 int rval;
14146 int ndi_rtn = NDI_FAILURE;
14147 uint64_t be_sas_wwn;
14148 char *nodename = NULL;
14149 char **compatible = NULL;
14150 int ncompatible = 0;
14151 int instance = 0;
14152 mptsas_t *mpt = DIP2MPT(pdip);
14153 char *wwn_str = NULL;
14154 char *component = NULL;
14155 char *attached_wwn_str = NULL;
14156 uint8_t phy = 0xFF;
14157 uint64_t sas_wwn;
14158 uint32_t devinfo;
14159 uint16_t dev_hdl;
14160 uint16_t pdev_hdl;
14161 uint64_t pdev_sas_wwn;
14162 uint64_t dev_sas_wwn;
14163 uint32_t pdev_info;
14164 uint8_t physport;
14165 uint8_t phy_id;
14166 uint32_t page_address;
14167 uint16_t bay_num, enclosure;
14168 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
14169 uint32_t dev_info;
14170 int64_t lun64 = 0;
14171
14172 mutex_enter(&mpt->m_mutex);
14173 target = ptgt->m_devhdl;
14174 sas_wwn = ptgt->m_sas_wwn;
14175 devinfo = ptgt->m_deviceinfo;
14176 phy = ptgt->m_phynum;
14177 mutex_exit(&mpt->m_mutex);
14178
14179 /*
14180 * generate compatible property with binding-set "mpt"
14181 */
14182 scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL,
14183 &nodename, &compatible, &ncompatible);
14184
14185 /*
14186 * if nodename can't be determined then print a message and skip it
14187 */
14188 if (nodename == NULL) {
14189 mptsas_log(mpt, CE_WARN, "mptsas found no compatible driver "
14190 "for target %d lun %d", target, lun);
14191 return (DDI_FAILURE);
14192 }
14193
14194 ndi_rtn = ndi_devi_alloc(pdip, nodename,
14195 DEVI_SID_NODEID, lun_dip);
14196
14197 /*
14198 * if lun alloc success, set props
14199 */
14200 if (ndi_rtn == NDI_SUCCESS) {
14201
14202 if (ndi_prop_update_int(DDI_DEV_T_NONE,
14203 *lun_dip, LUN_PROP, lun) !=
14204 DDI_PROP_SUCCESS) {
14205 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14206 "property for target %d lun %d (LUN_PROP)",
14207 target, lun);
14208 ndi_rtn = NDI_FAILURE;
14209 goto phys_create_done;
14210 }
14211
14212 lun64 = (int64_t)lun;
14213 if (ndi_prop_update_int64(DDI_DEV_T_NONE,
14214 *lun_dip, LUN64_PROP, lun64) !=
14215 DDI_PROP_SUCCESS) {
14216 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14217 "property for target %d lun64 %d (LUN64_PROP)",
14218 target, lun);
14219 ndi_rtn = NDI_FAILURE;
14220 goto phys_create_done;
14221 }
14222 if (ndi_prop_update_string_array(DDI_DEV_T_NONE,
14223 *lun_dip, "compatible", compatible, ncompatible)
14224 != DDI_PROP_SUCCESS) {
14225 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14226 "property for target %d lun %d (COMPATIBLE)",
14227 target, lun);
14228 ndi_rtn = NDI_FAILURE;
14229 goto phys_create_done;
14230 }
14231
14232 /*
14233 * We need the SAS WWN for non-multipath devices, so
14234 * we'll use the same property as that multipathing
14235 * devices need to present for MPAPI. If we don't have
14236 * a WWN (e.g. parallel SCSI), don't create the prop.
14237 */
14238 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
14239 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14240 if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE,
14241 *lun_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str)
14242 != DDI_PROP_SUCCESS) {
14243 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14244 "create property for SAS target %d lun %d "
14245 "(target-port)", target, lun);
14246 ndi_rtn = NDI_FAILURE;
14247 goto phys_create_done;
14248 }
14249
14250 be_sas_wwn = BE_64(sas_wwn);
14251 if (sas_wwn && ndi_prop_update_byte_array(
14252 DDI_DEV_T_NONE, *lun_dip, "port-wwn",
14253 (uchar_t *)&be_sas_wwn, 8) != DDI_PROP_SUCCESS) {
14254 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14255 "create property for SAS target %d lun %d "
14256 "(port-wwn)", target, lun);
14257 ndi_rtn = NDI_FAILURE;
14258 goto phys_create_done;
14259 } else if ((sas_wwn == 0) && (ndi_prop_update_int(
14260 DDI_DEV_T_NONE, *lun_dip, "sata-phy", phy) !=
14261 DDI_PROP_SUCCESS)) {
14262 /*
14263 * Direct attached SATA device without DeviceName
14264 */
14265 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14266 "create property for SAS target %d lun %d "
14267 "(sata-phy)", target, lun);
14268 ndi_rtn = NDI_FAILURE;
14269 goto phys_create_done;
14270 }
14271
14272 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14273 *lun_dip, SAS_PROP) != DDI_PROP_SUCCESS) {
14274 mptsas_log(mpt, CE_WARN, "mptsas unable to"
14275 "create property for SAS target %d lun %d"
14276 " (SAS_PROP)", target, lun);
14277 ndi_rtn = NDI_FAILURE;
14278 goto phys_create_done;
14279 }
14280 if (guid && (ndi_prop_update_string(DDI_DEV_T_NONE,
14281 *lun_dip, NDI_GUID, guid) != DDI_SUCCESS)) {
14282 mptsas_log(mpt, CE_WARN, "mptsas unable "
14283 "to create guid property for target %d "
14284 "lun %d", target, lun);
14285 ndi_rtn = NDI_FAILURE;
14286 goto phys_create_done;
14287 }
14288
14289 /*
14290 * The following code is to set properties for SM-HBA support,
14291 * it doesn't apply to RAID volumes
14292 */
14293 if (ptgt->m_phymask == 0)
14294 goto phys_raid_lun;
14295
14296 mutex_enter(&mpt->m_mutex);
14297
14298 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14299 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14300 (uint32_t)ptgt->m_devhdl;
14301 rval = mptsas_get_sas_device_page0(mpt, page_address,
14302 &dev_hdl, &dev_sas_wwn, &dev_info,
14303 &physport, &phy_id, &pdev_hdl,
14304 &bay_num, &enclosure);
14305 if (rval != DDI_SUCCESS) {
14306 mutex_exit(&mpt->m_mutex);
14307 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
14308 "parent device for handle %d.", page_address);
14309 ndi_rtn = NDI_FAILURE;
14310 goto phys_create_done;
14311 }
14312
14313 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14314 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
14315 rval = mptsas_get_sas_device_page0(mpt, page_address,
14316 &dev_hdl, &pdev_sas_wwn, &pdev_info,
14317 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
14318 if (rval != DDI_SUCCESS) {
14319 mutex_exit(&mpt->m_mutex);
14320 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14321 "device for handle %d.", page_address);
14322 ndi_rtn = NDI_FAILURE;
14323 goto phys_create_done;
14324 }
14325
14326 mutex_exit(&mpt->m_mutex);
14327
14328 /*
14329 * If this device direct attached to the controller
14330 * set the attached-port to the base wwid
14331 */
14332 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14333 != DEVINFO_DIRECT_ATTACHED) {
14334 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14335 pdev_sas_wwn);
14336 } else {
14337 /*
14338 * Update the iport's attached-port to guid
14339 */
14340 if (sas_wwn == 0) {
14341 (void) sprintf(wwn_str, "p%x", phy);
14342 } else {
14343 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14344 }
14345 if (ddi_prop_update_string(DDI_DEV_T_NONE,
14346 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14347 DDI_PROP_SUCCESS) {
14348 mptsas_log(mpt, CE_WARN,
14349 "mptsas unable to create "
14350 "property for iport target-port"
14351 " %s (sas_wwn)",
14352 wwn_str);
14353 ndi_rtn = NDI_FAILURE;
14354 goto phys_create_done;
14355 }
14356
14357 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14358 mpt->un.m_base_wwid);
14359 }
14360
14361 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14362 *lun_dip, SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
14363 DDI_PROP_SUCCESS) {
14364 mptsas_log(mpt, CE_WARN,
14365 "mptsas unable to create "
14366 "property for iport attached-port %s (sas_wwn)",
14367 attached_wwn_str);
14368 ndi_rtn = NDI_FAILURE;
14369 goto phys_create_done;
14370 }
14371
14372 if (IS_SATA_DEVICE(dev_info)) {
14373 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14374 *lun_dip, MPTSAS_VARIANT, "sata") !=
14375 DDI_PROP_SUCCESS) {
14376 mptsas_log(mpt, CE_WARN,
14377 "mptsas unable to create "
14378 "property for device variant ");
14379 ndi_rtn = NDI_FAILURE;
14380 goto phys_create_done;
14381 }
14382 }
14383
14384 if (IS_ATAPI_DEVICE(dev_info)) {
14385 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14386 *lun_dip, MPTSAS_VARIANT, "atapi") !=
14387 DDI_PROP_SUCCESS) {
14388 mptsas_log(mpt, CE_WARN,
14389 "mptsas unable to create "
14390 "property for device variant ");
14391 ndi_rtn = NDI_FAILURE;
14392 goto phys_create_done;
14393 }
14394 }
14395
14396 phys_raid_lun:
14397 /*
14398 * if this is a SAS controller, and the target is a SATA
14399 * drive, set the 'pm-capable' property for sd and if on
14400 * an OPL platform, also check if this is an ATAPI
14401 * device.
14402 */
14403 instance = ddi_get_instance(mpt->m_dip);
14404 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14405 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14406 NDBG2(("mptsas%d: creating pm-capable property, "
14407 "target %d", instance, target));
14408
14409 if ((ndi_prop_update_int(DDI_DEV_T_NONE,
14410 *lun_dip, "pm-capable", 1)) !=
14411 DDI_PROP_SUCCESS) {
14412 mptsas_log(mpt, CE_WARN, "mptsas "
14413 "failed to create pm-capable "
14414 "property, target %d", target);
14415 ndi_rtn = NDI_FAILURE;
14416 goto phys_create_done;
14417 }
14418
14419 }
14420
14421 if ((inq->inq_dtype == 0) || (inq->inq_dtype == 5)) {
14422 /*
14423 * add 'obp-path' properties for devinfo
14424 */
14425 bzero(wwn_str, sizeof (wwn_str));
14426 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
14427 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
14428 if (guid) {
14429 (void) snprintf(component, MAXPATHLEN,
14430 "disk@w%s,%x", wwn_str, lun);
14431 } else {
14432 (void) snprintf(component, MAXPATHLEN,
14433 "disk@p%x,%x", phy, lun);
14434 }
14435 if (ddi_pathname_obp_set(*lun_dip, component)
14436 != DDI_SUCCESS) {
14437 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
14438 "unable to set obp-path for SAS "
14439 "object %s", component);
14440 ndi_rtn = NDI_FAILURE;
14441 goto phys_create_done;
14442 }
14443 }
14444 /*
14445 * Create the phy-num property for non-raid disk
14446 */
14447 if (ptgt->m_phymask != 0) {
14448 if (ndi_prop_update_int(DDI_DEV_T_NONE,
14449 *lun_dip, "phy-num", ptgt->m_phynum) !=
14450 DDI_PROP_SUCCESS) {
14451 mptsas_log(mpt, CE_WARN, "mptsas driver "
14452 "failed to create phy-num property for "
14453 "target %d", target);
14454 ndi_rtn = NDI_FAILURE;
14455 goto phys_create_done;
14456 }
14457 }
14458 phys_create_done:
14459 /*
14460 * If props were setup ok, online the lun
14461 */
14462 if (ndi_rtn == NDI_SUCCESS) {
14463 /*
14464 * Try to online the new node
14465 */
14466 ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
14467 }
14468
14469 /*
14470 * If success set rtn flag, else unwire alloc'd lun
14471 */
14472 if (ndi_rtn != NDI_SUCCESS) {
14473 NDBG12(("mptsas driver unable to online "
14474 "target %d lun %d", target, lun));
14475 ndi_prop_remove_all(*lun_dip);
14476 (void) ndi_devi_free(*lun_dip);
14477 *lun_dip = NULL;
14478 }
14479 }
14480
14481 scsi_hba_nodename_compatible_free(nodename, compatible);
14482
14483 if (wwn_str != NULL) {
14484 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
14485 }
14486 if (component != NULL) {
14487 kmem_free(component, MAXPATHLEN);
14488 }
14489
14490
14491 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14492 }
14493
14494 static int
14495 mptsas_probe_smp(dev_info_t *pdip, uint64_t wwn)
14496 {
14497 mptsas_t *mpt = DIP2MPT(pdip);
14498 struct smp_device smp_sd;
14499
14500 /* XXX An HBA driver should not be allocating an smp_device. */
14501 bzero(&smp_sd, sizeof (struct smp_device));
14502 smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran;
14503 bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE);
14504
14505 if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS)
14506 return (NDI_FAILURE);
14507 return (NDI_SUCCESS);
14508 }
14509
14510 static int
14511 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
14512 {
14513 mptsas_t *mpt = DIP2MPT(pdip);
14514 mptsas_smp_t *psmp = NULL;
14515 int rval;
14516 int phymask;
14517
14518 /*
14519 * Get the physical port associated to the iport
14520 * PHYMASK TODO
14521 */
14522 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14523 "phymask", 0);
14524 /*
14525 * Find the smp node in hash table with specified sas address and
14526 * physical port
14527 */
14528 psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
14529 if (psmp == NULL) {
14530 return (DDI_FAILURE);
14531 }
14532
14533 rval = mptsas_online_smp(pdip, psmp, smp_dip);
14534
14535 return (rval);
14536 }
14537
14538 static int
14539 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
14540 dev_info_t **smp_dip)
14541 {
14542 char wwn_str[MPTSAS_WWN_STRLEN];
14543 char attached_wwn_str[MPTSAS_WWN_STRLEN];
14544 int ndi_rtn = NDI_FAILURE;
14545 int rval = 0;
14546 mptsas_smp_t dev_info;
14547 uint32_t page_address;
14548 mptsas_t *mpt = DIP2MPT(pdip);
14549 uint16_t dev_hdl;
14550 uint64_t sas_wwn;
14551 uint64_t smp_sas_wwn;
14552 uint8_t physport;
14553 uint8_t phy_id;
14554 uint16_t pdev_hdl;
14555 uint8_t numphys = 0;
14556 uint16_t i = 0;
14557 char phymask[MPTSAS_MAX_PHYS];
14558 char *iport = NULL;
14559 mptsas_phymask_t phy_mask = 0;
14560 uint16_t attached_devhdl;
14561 uint16_t bay_num, enclosure;
14562
14563 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_sasaddr);
14564
14565 /*
14566 * Probe smp device, prevent the node of removed device from being
14567 * configured succesfully
14568 */
14569 if (mptsas_probe_smp(pdip, smp_node->m_sasaddr) != NDI_SUCCESS) {
14570 return (DDI_FAILURE);
14571 }
14572
14573 if ((*smp_dip = mptsas_find_smp_child(pdip, wwn_str)) != NULL) {
14574 return (DDI_SUCCESS);
14575 }
14576
14577 ndi_rtn = ndi_devi_alloc(pdip, "smp", DEVI_SID_NODEID, smp_dip);
14578
14579 /*
14580 * if lun alloc success, set props
14581 */
14582 if (ndi_rtn == NDI_SUCCESS) {
14583 /*
14584 * Set the flavor of the child to be SMP flavored
14585 */
14586 ndi_flavor_set(*smp_dip, SCSA_FLAVOR_SMP);
14587
14588 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14589 *smp_dip, SMP_WWN, wwn_str) !=
14590 DDI_PROP_SUCCESS) {
14591 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14592 "property for smp device %s (sas_wwn)",
14593 wwn_str);
14594 ndi_rtn = NDI_FAILURE;
14595 goto smp_create_done;
14596 }
14597 (void) sprintf(wwn_str, "w%"PRIx64, smp_node->m_sasaddr);
14598 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14599 *smp_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str) !=
14600 DDI_PROP_SUCCESS) {
14601 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14602 "property for iport target-port %s (sas_wwn)",
14603 wwn_str);
14604 ndi_rtn = NDI_FAILURE;
14605 goto smp_create_done;
14606 }
14607
14608 mutex_enter(&mpt->m_mutex);
14609
14610 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
14611 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | smp_node->m_devhdl;
14612 rval = mptsas_get_sas_expander_page0(mpt, page_address,
14613 &dev_info);
14614 if (rval != DDI_SUCCESS) {
14615 mutex_exit(&mpt->m_mutex);
14616 mptsas_log(mpt, CE_WARN,
14617 "mptsas unable to get expander "
14618 "parent device info for %x", page_address);
14619 ndi_rtn = NDI_FAILURE;
14620 goto smp_create_done;
14621 }
14622
14623 smp_node->m_pdevhdl = dev_info.m_pdevhdl;
14624 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14625 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14626 (uint32_t)dev_info.m_pdevhdl;
14627 rval = mptsas_get_sas_device_page0(mpt, page_address,
14628 &dev_hdl, &sas_wwn, &smp_node->m_pdevinfo,
14629 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
14630 if (rval != DDI_SUCCESS) {
14631 mutex_exit(&mpt->m_mutex);
14632 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14633 "device info for %x", page_address);
14634 ndi_rtn = NDI_FAILURE;
14635 goto smp_create_done;
14636 }
14637
14638 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14639 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14640 (uint32_t)dev_info.m_devhdl;
14641 rval = mptsas_get_sas_device_page0(mpt, page_address,
14642 &dev_hdl, &smp_sas_wwn, &smp_node->m_deviceinfo,
14643 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
14644 if (rval != DDI_SUCCESS) {
14645 mutex_exit(&mpt->m_mutex);
14646 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14647 "device info for %x", page_address);
14648 ndi_rtn = NDI_FAILURE;
14649 goto smp_create_done;
14650 }
14651 mutex_exit(&mpt->m_mutex);
14652
14653 /*
14654 * If this smp direct attached to the controller
14655 * set the attached-port to the base wwid
14656 */
14657 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14658 != DEVINFO_DIRECT_ATTACHED) {
14659 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
14660 sas_wwn);
14661 } else {
14662 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
14663 mpt->un.m_base_wwid);
14664 }
14665
14666 if (ndi_prop_update_string(DDI_DEV_T_NONE,
14667 *smp_dip, SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwn_str) !=
14668 DDI_PROP_SUCCESS) {
14669 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14670 "property for smp attached-port %s (sas_wwn)",
14671 attached_wwn_str);
14672 ndi_rtn = NDI_FAILURE;
14673 goto smp_create_done;
14674 }
14675
14676 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14677 *smp_dip, SMP_PROP) != DDI_PROP_SUCCESS) {
14678 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14679 "create property for SMP %s (SMP_PROP) ",
14680 wwn_str);
14681 ndi_rtn = NDI_FAILURE;
14682 goto smp_create_done;
14683 }
14684
14685 /*
14686 * check the smp to see whether it direct
14687 * attached to the controller
14688 */
14689 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14690 != DEVINFO_DIRECT_ATTACHED) {
14691 goto smp_create_done;
14692 }
14693 numphys = ddi_prop_get_int(DDI_DEV_T_ANY, pdip,
14694 DDI_PROP_DONTPASS, MPTSAS_NUM_PHYS, -1);
14695 if (numphys > 0) {
14696 goto smp_create_done;
14697 }
14698 /*
14699 * this iport is an old iport, we need to
14700 * reconfig the props for it.
14701 */
14702 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
14703 MPTSAS_VIRTUAL_PORT, 0) !=
14704 DDI_PROP_SUCCESS) {
14705 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
14706 MPTSAS_VIRTUAL_PORT);
14707 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
14708 "prop update failed");
14709 goto smp_create_done;
14710 }
14711
14712 mutex_enter(&mpt->m_mutex);
14713 numphys = 0;
14714 iport = ddi_get_name_addr(pdip);
14715 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
14716 bzero(phymask, sizeof (phymask));
14717 (void) sprintf(phymask,
14718 "%x", mpt->m_phy_info[i].phy_mask);
14719 if (strcmp(phymask, iport) == 0) {
14720 phy_mask = mpt->m_phy_info[i].phy_mask;
14721 break;
14722 }
14723 }
14724
14725 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
14726 if ((phy_mask >> i) & 0x01) {
14727 numphys++;
14728 }
14729 }
14730 /*
14731 * Update PHY info for smhba
14732 */
14733 if (mptsas_smhba_phy_init(mpt)) {
14734 mutex_exit(&mpt->m_mutex);
14735 mptsas_log(mpt, CE_WARN, "mptsas phy update "
14736 "failed");
14737 goto smp_create_done;
14738 }
14739 mutex_exit(&mpt->m_mutex);
14740
14741 mptsas_smhba_set_phy_props(mpt, iport, pdip,
14742 numphys, &attached_devhdl);
14743
14744 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
14745 MPTSAS_NUM_PHYS, numphys) !=
14746 DDI_PROP_SUCCESS) {
14747 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
14748 MPTSAS_NUM_PHYS);
14749 mptsas_log(mpt, CE_WARN, "mptsas update "
14750 "num phys props failed");
14751 goto smp_create_done;
14752 }
14753 /*
14754 * Add parent's props for SMHBA support
14755 */
14756 if (ddi_prop_update_string(DDI_DEV_T_NONE, pdip,
14757 SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14758 DDI_PROP_SUCCESS) {
14759 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
14760 SCSI_ADDR_PROP_ATTACHED_PORT);
14761 mptsas_log(mpt, CE_WARN, "mptsas update iport"
14762 "attached-port failed");
14763 goto smp_create_done;
14764 }
14765
14766 smp_create_done:
14767 /*
14768 * If props were setup ok, online the lun
14769 */
14770 if (ndi_rtn == NDI_SUCCESS) {
14771 /*
14772 * Try to online the new node
14773 */
14774 ndi_rtn = ndi_devi_online(*smp_dip, NDI_ONLINE_ATTACH);
14775 }
14776
14777 /*
14778 * If success set rtn flag, else unwire alloc'd lun
14779 */
14780 if (ndi_rtn != NDI_SUCCESS) {
14781 NDBG12(("mptsas unable to online "
14782 "SMP target %s", wwn_str));
14783 ndi_prop_remove_all(*smp_dip);
14784 (void) ndi_devi_free(*smp_dip);
14785 }
14786 }
14787
14788 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14789 }
14790
14791 /* smp transport routine */
14792 static int mptsas_smp_start(struct smp_pkt *smp_pkt)
14793 {
14794 uint64_t wwn;
14795 Mpi2SmpPassthroughRequest_t req;
14796 Mpi2SmpPassthroughReply_t rep;
14797 uint32_t direction = 0;
14798 mptsas_t *mpt;
14799 int ret;
14800 uint64_t tmp64;
14801
14802 mpt = (mptsas_t *)smp_pkt->smp_pkt_address->
14803 smp_a_hba_tran->smp_tran_hba_private;
14804
14805 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
14806 /*
14807 * Need to compose a SMP request message
14808 * and call mptsas_do_passthru() function
14809 */
14810 bzero(&req, sizeof (req));
14811 bzero(&rep, sizeof (rep));
14812 req.PassthroughFlags = 0;
14813 req.PhysicalPort = 0xff;
14814 req.ChainOffset = 0;
14815 req.Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
14816
14817 if ((smp_pkt->smp_pkt_reqsize & 0xffff0000ul) != 0) {
14818 smp_pkt->smp_pkt_reason = ERANGE;
14819 return (DDI_FAILURE);
14820 }
14821 req.RequestDataLength = LE_16((uint16_t)(smp_pkt->smp_pkt_reqsize - 4));
14822
14823 req.MsgFlags = 0;
14824 tmp64 = LE_64(wwn);
14825 bcopy(&tmp64, &req.SASAddress, SAS_WWN_BYTE_SIZE);
14826 if (smp_pkt->smp_pkt_rspsize > 0) {
14827 direction |= MPTSAS_PASS_THRU_DIRECTION_READ;
14828 }
14829 if (smp_pkt->smp_pkt_reqsize > 0) {
14830 direction |= MPTSAS_PASS_THRU_DIRECTION_WRITE;
14831 }
14832
14833 mutex_enter(&mpt->m_mutex);
14834 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep,
14835 (uint8_t *)smp_pkt->smp_pkt_rsp,
14836 offsetof(Mpi2SmpPassthroughRequest_t, SGL), sizeof (rep),
14837 smp_pkt->smp_pkt_rspsize - 4, direction,
14838 (uint8_t *)smp_pkt->smp_pkt_req, smp_pkt->smp_pkt_reqsize - 4,
14839 smp_pkt->smp_pkt_timeout, FKIOCTL);
14840 mutex_exit(&mpt->m_mutex);
14841 if (ret != 0) {
14842 cmn_err(CE_WARN, "smp_start do passthru error %d", ret);
14843 smp_pkt->smp_pkt_reason = (uchar_t)(ret);
14844 return (DDI_FAILURE);
14845 }
14846 /* do passthrough success, check the smp status */
14847 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
14848 switch (LE_16(rep.IOCStatus)) {
14849 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
14850 smp_pkt->smp_pkt_reason = ENODEV;
14851 break;
14852 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
14853 smp_pkt->smp_pkt_reason = EOVERFLOW;
14854 break;
14855 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
14856 smp_pkt->smp_pkt_reason = EIO;
14857 break;
14858 default:
14859 mptsas_log(mpt, CE_NOTE, "smp_start: get unknown ioc"
14860 "status:%x", LE_16(rep.IOCStatus));
14861 smp_pkt->smp_pkt_reason = EIO;
14862 break;
14863 }
14864 return (DDI_FAILURE);
14865 }
14866 if (rep.SASStatus != MPI2_SASSTATUS_SUCCESS) {
14867 mptsas_log(mpt, CE_NOTE, "smp_start: get error SAS status:%x",
14868 rep.SASStatus);
14869 smp_pkt->smp_pkt_reason = EIO;
14870 return (DDI_FAILURE);
14871 }
14872
14873 return (DDI_SUCCESS);
14874 }
14875
14876 /*
14877 * If we didn't get a match, we need to get sas page0 for each device, and
14878 * untill we get a match. If failed, return NULL
14879 */
14880 static mptsas_target_t *
14881 mptsas_phy_to_tgt(mptsas_t *mpt, int phymask, uint8_t phy)
14882 {
14883 int i, j = 0;
14884 int rval = 0;
14885 uint16_t cur_handle;
14886 uint32_t page_address;
14887 mptsas_target_t *ptgt = NULL;
14888
14889 /*
14890 * PHY named device must be direct attached and attaches to
14891 * narrow port, if the iport is not parent of the device which
14892 * we are looking for.
14893 */
14894 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
14895 if ((1 << i) & phymask)
14896 j++;
14897 }
14898
14899 if (j > 1)
14900 return (NULL);
14901
14902 /*
14903 * Must be a narrow port and single device attached to the narrow port
14904 * So the physical port num of device which is equal to the iport's
14905 * port num is the device what we are looking for.
14906 */
14907
14908 if (mpt->m_phy_info[phy].phy_mask != phymask)
14909 return (NULL);
14910
14911 mutex_enter(&mpt->m_mutex);
14912
14913 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
14914 MPTSAS_HASH_FIRST);
14915 while (ptgt != NULL) {
14916 if ((ptgt->m_sas_wwn == 0) && (ptgt->m_phynum == phy)) {
14917 mutex_exit(&mpt->m_mutex);
14918 return (ptgt);
14919 }
14920
14921 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
14922 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
14923 }
14924
14925 if (mpt->m_done_traverse_dev) {
14926 mutex_exit(&mpt->m_mutex);
14927 return (NULL);
14928 }
14929
14930 /* If didn't get a match, come here */
14931 cur_handle = mpt->m_dev_handle;
14932 for (; ; ) {
14933 ptgt = NULL;
14934 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
14935 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)cur_handle;
14936 rval = mptsas_get_target_device_info(mpt, page_address,
14937 &cur_handle, &ptgt);
14938 if ((rval == DEV_INFO_FAIL_PAGE0) ||
14939 (rval == DEV_INFO_FAIL_ALLOC)) {
14940 break;
14941 }
14942 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
14943 (rval == DEV_INFO_PHYS_DISK)) {
14944 continue;
14945 }
14946 mpt->m_dev_handle = cur_handle;
14947
14948 if ((ptgt->m_sas_wwn == 0) && (ptgt->m_phynum == phy)) {
14949 break;
14950 }
14951 }
14952
14953 mutex_exit(&mpt->m_mutex);
14954 return (ptgt);
14955 }
14956
14957 /*
14958 * The ptgt->m_sas_wwn contains the wwid for each disk.
14959 * For Raid volumes, we need to check m_raidvol[x].m_raidwwid
14960 * If we didn't get a match, we need to get sas page0 for each device, and
14961 * untill we get a match
14962 * If failed, return NULL
14963 */
14964 static mptsas_target_t *
14965 mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask, uint64_t wwid)
14966 {
14967 int rval = 0;
14968 uint16_t cur_handle;
14969 uint32_t page_address;
14970 mptsas_target_t *tmp_tgt = NULL;
14971
14972 mutex_enter(&mpt->m_mutex);
14973 tmp_tgt = (struct mptsas_target *)mptsas_hash_search(
14974 &mpt->m_active->m_tgttbl, wwid, phymask);
14975 if (tmp_tgt != NULL) {
14976 mutex_exit(&mpt->m_mutex);
14977 return (tmp_tgt);
14978 }
14979
14980 if (phymask == 0) {
14981 /*
14982 * It's IR volume
14983 */
14984 rval = mptsas_get_raid_info(mpt);
14985 if (rval) {
14986 tmp_tgt = (struct mptsas_target *)mptsas_hash_search(
14987 &mpt->m_active->m_tgttbl, wwid, phymask);
14988 }
14989 mutex_exit(&mpt->m_mutex);
14990 return (tmp_tgt);
14991 }
14992
14993 if (mpt->m_done_traverse_dev) {
14994 mutex_exit(&mpt->m_mutex);
14995 return (NULL);
14996 }
14997
14998 /* If didn't get a match, come here */
14999 cur_handle = mpt->m_dev_handle;
15000 for (; ; ) {
15001 tmp_tgt = NULL;
15002 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
15003 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | cur_handle;
15004 rval = mptsas_get_target_device_info(mpt, page_address,
15005 &cur_handle, &tmp_tgt);
15006 if ((rval == DEV_INFO_FAIL_PAGE0) ||
15007 (rval == DEV_INFO_FAIL_ALLOC)) {
15008 tmp_tgt = NULL;
15009 break;
15010 }
15011 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
15012 (rval == DEV_INFO_PHYS_DISK)) {
15013 continue;
15014 }
15015 mpt->m_dev_handle = cur_handle;
15016 if ((tmp_tgt->m_sas_wwn) && (tmp_tgt->m_sas_wwn == wwid) &&
15017 (tmp_tgt->m_phymask == phymask)) {
15018 break;
15019 }
15020 }
15021
15022 mutex_exit(&mpt->m_mutex);
15023 return (tmp_tgt);
15024 }
15025
15026 static mptsas_smp_t *
15027 mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask, uint64_t wwid)
15028 {
15029 int rval = 0;
15030 uint16_t cur_handle;
15031 uint32_t page_address;
15032 mptsas_smp_t smp_node, *psmp = NULL;
15033
15034 mutex_enter(&mpt->m_mutex);
15035 psmp = (struct mptsas_smp *)mptsas_hash_search(&mpt->m_active->m_smptbl,
15036 wwid, phymask);
15037 if (psmp != NULL) {
15038 mutex_exit(&mpt->m_mutex);
15039 return (psmp);
15040 }
15041
15042 if (mpt->m_done_traverse_smp) {
15043 mutex_exit(&mpt->m_mutex);
15044 return (NULL);
15045 }
15046
15047 /* If didn't get a match, come here */
15048 cur_handle = mpt->m_smp_devhdl;
15049 for (; ; ) {
15050 psmp = NULL;
15051 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
15052 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)cur_handle;
15053 rval = mptsas_get_sas_expander_page0(mpt, page_address,
15054 &smp_node);
15055 if (rval != DDI_SUCCESS) {
15056 break;
15057 }
15058 mpt->m_smp_devhdl = cur_handle = smp_node.m_devhdl;
15059 psmp = mptsas_smp_alloc(&mpt->m_active->m_smptbl, &smp_node);
15060 ASSERT(psmp);
15061 if ((psmp->m_sasaddr) && (psmp->m_sasaddr == wwid) &&
15062 (psmp->m_phymask == phymask)) {
15063 break;
15064 }
15065 }
15066
15067 mutex_exit(&mpt->m_mutex);
15068 return (psmp);
15069 }
15070
15071 /* helper functions using hash */
15072
15073 /*
15074 * Can't have duplicate entries for same devhdl,
15075 * if there are invalid entries, the devhdl should be set to 0xffff
15076 */
15077 static void *
15078 mptsas_search_by_devhdl(mptsas_hash_table_t *hashtab, uint16_t devhdl)
15079 {
15080 mptsas_hash_data_t *data;
15081
15082 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST);
15083 while (data != NULL) {
15084 if (data->devhdl == devhdl) {
15085 break;
15086 }
15087 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT);
15088 }
15089 return (data);
15090 }
15091
15092 mptsas_target_t *
15093 mptsas_tgt_alloc(mptsas_hash_table_t *hashtab, uint16_t devhdl, uint64_t wwid,
15094 uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum)
15095 {
15096 mptsas_target_t *tmp_tgt = NULL;
15097
15098 tmp_tgt = mptsas_hash_search(hashtab, wwid, phymask);
15099 if (tmp_tgt != NULL) {
15100 NDBG20(("Hash item already exist"));
15101 tmp_tgt->m_deviceinfo = devinfo;
15102 tmp_tgt->m_devhdl = devhdl;
15103 return (tmp_tgt);
15104 }
15105 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
15106 if (tmp_tgt == NULL) {
15107 cmn_err(CE_WARN, "Fatal, allocated tgt failed");
15108 return (NULL);
15109 }
15110 tmp_tgt->m_devhdl = devhdl;
15111 tmp_tgt->m_sas_wwn = wwid;
15112 tmp_tgt->m_deviceinfo = devinfo;
15113 tmp_tgt->m_phymask = phymask;
15114 tmp_tgt->m_phynum = phynum;
15115 /* Initialized the tgt structure */
15116 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
15117 tmp_tgt->m_qfull_retry_interval =
15118 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
15119 tmp_tgt->m_t_throttle = MAX_THROTTLE;
15120
15121 mptsas_hash_add(hashtab, tmp_tgt);
15122
15123 return (tmp_tgt);
15124 }
15125
15126 static void
15127 mptsas_tgt_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
15128 mptsas_phymask_t phymask)
15129 {
15130 mptsas_target_t *tmp_tgt;
15131 tmp_tgt = mptsas_hash_rem(hashtab, wwid, phymask);
15132 if (tmp_tgt == NULL) {
15133 cmn_err(CE_WARN, "Tgt not found, nothing to free");
15134 } else {
15135 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
15136 }
15137 }
15138
15139 /*
15140 * Return the entry in the hash table
15141 */
15142 static mptsas_smp_t *
15143 mptsas_smp_alloc(mptsas_hash_table_t *hashtab, mptsas_smp_t *data)
15144 {
15145 uint64_t key1 = data->m_sasaddr;
15146 mptsas_phymask_t key2 = data->m_phymask;
15147 mptsas_smp_t *ret_data;
15148
15149 ret_data = mptsas_hash_search(hashtab, key1, key2);
15150 if (ret_data != NULL) {
15151 bcopy(data, ret_data, sizeof (mptsas_smp_t));
15152 return (ret_data);
15153 }
15154
15155 ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
15156 bcopy(data, ret_data, sizeof (mptsas_smp_t));
15157 mptsas_hash_add(hashtab, ret_data);
15158 return (ret_data);
15159 }
15160
15161 static void
15162 mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
15163 mptsas_phymask_t phymask)
15164 {
15165 mptsas_smp_t *tmp_smp;
15166 tmp_smp = mptsas_hash_rem(hashtab, wwid, phymask);
15167 if (tmp_smp == NULL) {
15168 cmn_err(CE_WARN, "Smp element not found, nothing to free");
15169 } else {
15170 kmem_free(tmp_smp, sizeof (struct mptsas_smp));
15171 }
15172 }
15173
15174 /*
15175 * Hash operation functions
15176 * key1 is the sas_wwn, key2 is the phymask
15177 */
15178 static void
15179 mptsas_hash_init(mptsas_hash_table_t *hashtab)
15180 {
15181 if (hashtab == NULL) {
15182 return;
15183 }
15184 bzero(hashtab->head, sizeof (mptsas_hash_node_t) *
15185 MPTSAS_HASH_ARRAY_SIZE);
15186 hashtab->cur = NULL;
15187 hashtab->line = 0;
15188 }
15189
15190 static void
15191 mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen)
15192 {
15193 uint16_t line = 0;
15194 mptsas_hash_node_t *cur = NULL, *last = NULL;
15195
15196 if (hashtab == NULL) {
15197 return;
15198 }
15199 for (line = 0; line < MPTSAS_HASH_ARRAY_SIZE; line++) {
15200 cur = hashtab->head[line];
15201 while (cur != NULL) {
15202 last = cur;
15203 cur = cur->next;
15204 kmem_free(last->data, datalen);
15205 kmem_free(last, sizeof (mptsas_hash_node_t));
15206 }
15207 }
15208 }
15209
15210 /*
15211 * You must guarantee the element doesn't exist in the hash table
15212 * before you call mptsas_hash_add()
15213 */
15214 static void
15215 mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data)
15216 {
15217 uint64_t key1 = ((mptsas_hash_data_t *)data)->key1;
15218 mptsas_phymask_t key2 = ((mptsas_hash_data_t *)data)->key2;
15219 mptsas_hash_node_t **head = NULL;
15220 mptsas_hash_node_t *node = NULL;
15221
15222 if (hashtab == NULL) {
15223 return;
15224 }
15225 ASSERT(mptsas_hash_search(hashtab, key1, key2) == NULL);
15226 node = kmem_zalloc(sizeof (mptsas_hash_node_t), KM_NOSLEEP);
15227 node->data = data;
15228
15229 head = &(hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]);
15230 if (*head == NULL) {
15231 *head = node;
15232 } else {
15233 node->next = *head;
15234 *head = node;
15235 }
15236 }
15237
15238 static void *
15239 mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1,
15240 mptsas_phymask_t key2)
15241 {
15242 mptsas_hash_node_t **head = NULL;
15243 mptsas_hash_node_t *last = NULL, *cur = NULL;
15244 mptsas_hash_data_t *data;
15245 if (hashtab == NULL) {
15246 return (NULL);
15247 }
15248 head = &(hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]);
15249 cur = *head;
15250 while (cur != NULL) {
15251 data = cur->data;
15252 if ((data->key1 == key1) && (data->key2 == key2)) {
15253 if (last == NULL) {
15254 (*head) = cur->next;
15255 } else {
15256 last->next = cur->next;
15257 }
15258 kmem_free(cur, sizeof (mptsas_hash_node_t));
15259 return (data);
15260 } else {
15261 last = cur;
15262 cur = cur->next;
15263 }
15264 }
15265 return (NULL);
15266 }
15267
15268 static void *
15269 mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1,
15270 mptsas_phymask_t key2)
15271 {
15272 mptsas_hash_node_t *cur = NULL;
15273 mptsas_hash_data_t *data;
15274 if (hashtab == NULL) {
15275 return (NULL);
15276 }
15277 cur = hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE];
15278 while (cur != NULL) {
15279 data = cur->data;
15280 if ((data->key1 == key1) && (data->key2 == key2)) {
15281 return (data);
15282 } else {
15283 cur = cur->next;
15284 }
15285 }
15286 return (NULL);
15287 }
15288
15289 static void *
15290 mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos)
15291 {
15292 mptsas_hash_node_t *this = NULL;
15293
15294 if (hashtab == NULL) {
15295 return (NULL);
15296 }
15297
15298 if (pos == MPTSAS_HASH_FIRST) {
15299 hashtab->line = 0;
15300 hashtab->cur = NULL;
15301 this = hashtab->head[0];
15302 } else {
15303 if (hashtab->cur == NULL) {
15304 return (NULL);
15305 } else {
15306 this = hashtab->cur->next;
15307 }
15308 }
15309
15310 while (this == NULL) {
15311 hashtab->line++;
15312 if (hashtab->line >= MPTSAS_HASH_ARRAY_SIZE) {
15313 /* the traverse reaches the end */
15314 hashtab->cur = NULL;
15315 return (NULL);
15316 } else {
15317 this = hashtab->head[hashtab->line];
15318 }
15319 }
15320 hashtab->cur = this;
15321 return (this->data);
15322 }
15323
15324 /*
15325 * Functions for SGPIO LED support
15326 */
15327 static dev_info_t *
15328 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
15329 {
15330 dev_info_t *dip;
15331 int prop;
15332 dip = e_ddi_hold_devi_by_dev(dev, 0);
15333 if (dip == NULL)
15334 return (dip);
15335 prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
15336 "phymask", 0);
15337 *phymask = (mptsas_phymask_t)prop;
15338 ddi_release_devi(dip);
15339 return (dip);
15340 }
15341
15342 int
15343 mptsas_dma_addr_create(mptsas_t *mpt, ddi_dma_attr_t dma_attr,
15344 ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp, caddr_t *dma_memp,
15345 uint32_t alloc_size, ddi_dma_cookie_t *cookiep)
15346 {
15347 ddi_dma_cookie_t new_cookie;
15348 size_t alloc_len;
15349 uint_t ncookie;
15350
15351 if (cookiep == NULL)
15352 cookiep = &new_cookie;
15353
15354 if (ddi_dma_alloc_handle(mpt->m_dip, &dma_attr, DDI_DMA_SLEEP,
15355 NULL, dma_hdp) != DDI_SUCCESS) {
15356 dma_hdp = NULL;
15357 return (FALSE);
15358 }
15359
15360 if (ddi_dma_mem_alloc(*dma_hdp, alloc_size, &mpt->m_dev_acc_attr,
15361 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, dma_memp, &alloc_len,
15362 acc_hdp) != DDI_SUCCESS) {
15363 ddi_dma_free_handle(dma_hdp);
15364 dma_hdp = NULL;
15365 return (FALSE);
15366 }
15367
15368 if (ddi_dma_addr_bind_handle(*dma_hdp, NULL, *dma_memp, alloc_len,
15369 (DDI_DMA_RDWR | DDI_DMA_CONSISTENT), DDI_DMA_SLEEP, NULL,
15370 cookiep, &ncookie) != DDI_DMA_MAPPED) {
15371 (void) ddi_dma_mem_free(acc_hdp);
15372 ddi_dma_free_handle(dma_hdp);
15373 dma_hdp = NULL;
15374 return (FALSE);
15375 }
15376
15377 return (TRUE);
15378 }
15379
15380 void
15381 mptsas_dma_addr_destroy(ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp)
15382 {
15383 if (*dma_hdp == NULL)
15384 return;
15385
15386 (void) ddi_dma_unbind_handle(*dma_hdp);
15387 (void) ddi_dma_mem_free(acc_hdp);
15388 ddi_dma_free_handle(dma_hdp);
15389 dma_hdp = NULL;
15390 }