1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright 2014 OmniTI Computer Consulting, Inc. All rights reserved.
27 * Copyright (c) 2014, Tegile Systems Inc. All rights reserved.
28 */
29
30 /*
31 * Copyright (c) 2000 to 2010, LSI Corporation.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms of all code within
35 * this file that is exclusively owned by LSI, with or without
36 * modification, is permitted provided that, in addition to the CDDL 1.0
37 * License requirements, the following conditions are met:
38 *
39 * Neither the name of the author nor the names of its contributors may be
40 * used to endorse or promote products derived from this software without
41 * specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
46 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
47 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
48 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
49 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
50 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
51 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
52 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 * DAMAGE.
55 */
56
57 /*
58 * mptsas - This is a driver based on LSI Logic's MPT2.0/2.5 interface.
59 *
60 */
61
62 #if defined(lint) || defined(DEBUG)
63 #define MPTSAS_DEBUG
64 #endif
65
66 /*
67 * standard header files.
68 */
69 #include <sys/note.h>
70 #include <sys/scsi/scsi.h>
71 #include <sys/pci.h>
72 #include <sys/file.h>
73 #include <sys/policy.h>
74 #include <sys/model.h>
75 #include <sys/sysevent.h>
76 #include <sys/sysevent/eventdefs.h>
77 #include <sys/sysevent/dr.h>
78 #include <sys/sata/sata_defs.h>
79 #include <sys/scsi/generic/sas.h>
80 #include <sys/scsi/impl/scsi_sas.h>
81
82 #pragma pack(1)
83 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
84 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
85 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
86 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
87 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
88 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
89 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
90 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
91 #pragma pack()
92
93 /*
94 * private header files.
95 *
96 */
97 #include <sys/scsi/impl/scsi_reset_notify.h>
98 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
99 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
100 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
101 #include <sys/scsi/adapters/mpt_sas/mptsas_hash.h>
102 #include <sys/raidioctl.h>
103
104 #include <sys/fs/dv_node.h> /* devfs_clean */
105
106 /*
107 * FMA header files
108 */
109 #include <sys/ddifm.h>
110 #include <sys/fm/protocol.h>
111 #include <sys/fm/util.h>
112 #include <sys/fm/io/ddi.h>
113
114 /*
115 * autoconfiguration data and routines.
116 */
117 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
118 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
119 static int mptsas_power(dev_info_t *dip, int component, int level);
120
121 /*
122 * cb_ops function
123 */
124 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
125 cred_t *credp, int *rval);
126 #ifdef __sparc
127 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd);
128 #else /* __sparc */
129 static int mptsas_quiesce(dev_info_t *devi);
130 #endif /* __sparc */
131
132 /*
133 * Resource initilaization for hardware
134 */
135 static void mptsas_setup_cmd_reg(mptsas_t *mpt);
136 static void mptsas_disable_bus_master(mptsas_t *mpt);
137 static void mptsas_hba_fini(mptsas_t *mpt);
138 static void mptsas_cfg_fini(mptsas_t *mptsas_blkp);
139 static int mptsas_hba_setup(mptsas_t *mpt);
140 static void mptsas_hba_teardown(mptsas_t *mpt);
141 static int mptsas_config_space_init(mptsas_t *mpt);
142 static void mptsas_config_space_fini(mptsas_t *mpt);
143 static void mptsas_iport_register(mptsas_t *mpt);
144 static int mptsas_smp_setup(mptsas_t *mpt);
145 static void mptsas_smp_teardown(mptsas_t *mpt);
146 static int mptsas_cache_create(mptsas_t *mpt);
147 static void mptsas_cache_destroy(mptsas_t *mpt);
148 static int mptsas_alloc_request_frames(mptsas_t *mpt);
149 static int mptsas_alloc_reply_frames(mptsas_t *mpt);
150 static int mptsas_alloc_free_queue(mptsas_t *mpt);
151 static int mptsas_alloc_post_queue(mptsas_t *mpt);
152 static void mptsas_alloc_reply_args(mptsas_t *mpt);
153 static int mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
154 static void mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
155 static int mptsas_init_chip(mptsas_t *mpt, int first_time);
156
157 /*
158 * SCSA function prototypes
159 */
160 static int mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
161 static int mptsas_scsi_reset(struct scsi_address *ap, int level);
162 static int mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
163 static int mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
164 static int mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
165 int tgtonly);
166 static void mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
167 static struct scsi_pkt *mptsas_scsi_init_pkt(struct scsi_address *ap,
168 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
169 int tgtlen, int flags, int (*callback)(), caddr_t arg);
170 static void mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
171 static void mptsas_scsi_destroy_pkt(struct scsi_address *ap,
172 struct scsi_pkt *pkt);
173 static int mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
174 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
175 static void mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
176 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
177 static int mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
178 void (*callback)(caddr_t), caddr_t arg);
179 static int mptsas_get_name(struct scsi_device *sd, char *name, int len);
180 static int mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len);
181 static int mptsas_scsi_quiesce(dev_info_t *dip);
182 static int mptsas_scsi_unquiesce(dev_info_t *dip);
183 static int mptsas_bus_config(dev_info_t *pdip, uint_t flags,
184 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
185
186 /*
187 * SMP functions
188 */
189 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
190
191 /*
192 * internal function prototypes.
193 */
194 static void mptsas_list_add(mptsas_t *mpt);
195 static void mptsas_list_del(mptsas_t *mpt);
196
197 static int mptsas_quiesce_bus(mptsas_t *mpt);
198 static int mptsas_unquiesce_bus(mptsas_t *mpt);
199
200 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
201 static void mptsas_free_handshake_msg(mptsas_t *mpt);
202
203 static void mptsas_ncmds_checkdrain(void *arg);
204
205 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
206 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
207 static int mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
208 static void mptsas_accept_tx_waitq(mptsas_t *mpt);
209
210 static int mptsas_do_detach(dev_info_t *dev);
211 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
212 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
213 struct scsi_pkt *pkt);
214 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp);
215
216 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
217 static void mptsas_handle_event(void *args);
218 static int mptsas_handle_event_sync(void *args);
219 static void mptsas_handle_dr(void *args);
220 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
221 dev_info_t *pdip);
222
223 static void mptsas_restart_cmd(void *);
224
225 static void mptsas_flush_hba(mptsas_t *mpt);
226 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
227 uint8_t tasktype);
228 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
229 uchar_t reason, uint_t stat);
230
231 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
232 static void mptsas_process_intr(mptsas_t *mpt,
233 pMpi2ReplyDescriptorsUnion_t reply_desc_union);
234 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
235 pMpi2ReplyDescriptorsUnion_t reply_desc);
236 static void mptsas_handle_address_reply(mptsas_t *mpt,
237 pMpi2ReplyDescriptorsUnion_t reply_desc);
238 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
239 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
240 uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
241
242 static void mptsas_watch(void *arg);
243 static void mptsas_watchsubr(mptsas_t *mpt);
244 static void mptsas_cmd_timeout(mptsas_t *mpt, mptsas_target_t *ptgt);
245
246 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
247 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
248 uint8_t *data, uint32_t request_size, uint32_t reply_size,
249 uint32_t data_size, uint32_t direction, uint8_t *dataout,
250 uint32_t dataout_size, short timeout, int mode);
251 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
252
253 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt,
254 uint32_t unique_id);
255 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd);
256 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt,
257 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
258 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt,
259 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
260 uint32_t diag_type);
261 static int mptsas_diag_register(mptsas_t *mpt,
262 mptsas_fw_diag_register_t *diag_register, uint32_t *return_code);
263 static int mptsas_diag_unregister(mptsas_t *mpt,
264 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
265 static int mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
266 uint32_t *return_code);
267 static int mptsas_diag_read_buffer(mptsas_t *mpt,
268 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
269 uint32_t *return_code, int ioctl_mode);
270 static int mptsas_diag_release(mptsas_t *mpt,
271 mptsas_fw_diag_release_t *diag_release, uint32_t *return_code);
272 static int mptsas_do_diag_action(mptsas_t *mpt, uint32_t action,
273 uint8_t *diag_action, uint32_t length, uint32_t *return_code,
274 int ioctl_mode);
275 static int mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *data,
276 int mode);
277
278 static int mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
279 int cmdlen, int tgtlen, int statuslen, int kf);
280 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
281
282 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
283 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
284
285 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
286 int kmflags);
287 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
288
289 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
290 mptsas_cmd_t *cmd);
291 static void mptsas_check_task_mgt(mptsas_t *mpt,
292 pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
293 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
294 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
295 int *resid);
296
297 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
298 static void mptsas_free_active_slots(mptsas_t *mpt);
299 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
300
301 static void mptsas_restart_hba(mptsas_t *mpt);
302 static void mptsas_restart_waitq(mptsas_t *mpt);
303
304 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
305 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);
306 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
307
308 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
309 static void mptsas_doneq_empty(mptsas_t *mpt);
310 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
311
312 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
313 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
314 static mptsas_cmd_t *mptsas_tx_waitq_rm(mptsas_t *mpt);
315 static void mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
316
317
318 static void mptsas_start_watch_reset_delay();
319 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
320 static void mptsas_watch_reset_delay(void *arg);
321 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
322
323 /*
324 * helper functions
325 */
326 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
327
328 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
329 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
330 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
331 int lun);
332 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
333 int lun);
334 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
335 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
336
337 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
338 int *lun);
339 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
340
341 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt,
342 mptsas_phymask_t phymask, uint8_t phy);
343 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt,
344 mptsas_phymask_t phymask, uint64_t wwid);
345 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt,
346 mptsas_phymask_t phymask, uint64_t wwid);
347
348 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
349 uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
350
351 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
352 uint16_t *handle, mptsas_target_t **pptgt);
353 static void mptsas_update_phymask(mptsas_t *mpt);
354
355 static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
356 uint32_t *status, uint8_t cmd);
357 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
358 mptsas_phymask_t *phymask);
359 static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr,
360 mptsas_phymask_t phymask);
361 static int mptsas_flush_led_status(mptsas_t *mpt, mptsas_target_t *ptgt);
362
363
364 /*
365 * Enumeration / DR functions
366 */
367 static void mptsas_config_all(dev_info_t *pdip);
368 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
369 dev_info_t **lundip);
370 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
371 dev_info_t **lundip);
372
373 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
374 static int mptsas_offline_target(dev_info_t *pdip, char *name);
375
376 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
377 dev_info_t **dip);
378
379 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
380 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
381 dev_info_t **dip, mptsas_target_t *ptgt);
382
383 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
384 dev_info_t **dip, mptsas_target_t *ptgt, int lun);
385
386 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
387 char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
388 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
389 char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
390 int lun);
391
392 static void mptsas_offline_missed_luns(dev_info_t *pdip,
393 uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
394 static int mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
395 mdi_pathinfo_t *rpip, uint_t flags);
396
397 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
398 dev_info_t **smp_dip);
399 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
400 uint_t flags);
401
402 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
403 int mode, int *rval);
404 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
405 int mode, int *rval);
406 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
407 int mode, int *rval);
408 static void mptsas_record_event(void *args);
409 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
410 int mode);
411
412 mptsas_target_t *mptsas_tgt_alloc(mptsas_t *, uint16_t, uint64_t,
413 uint32_t, mptsas_phymask_t, uint8_t);
414 static mptsas_smp_t *mptsas_smp_alloc(mptsas_t *, mptsas_smp_t *);
415 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
416 dev_info_t **smp_dip);
417
418 /*
419 * Power management functions
420 */
421 static int mptsas_get_pci_cap(mptsas_t *mpt);
422 static int mptsas_init_pm(mptsas_t *mpt);
423
424 /*
425 * MPT MSI tunable:
426 *
427 * By default MSI is enabled on all supported platforms.
428 */
429 boolean_t mptsas_enable_msi = B_TRUE;
430 boolean_t mptsas_physical_bind_failed_page_83 = B_FALSE;
431
432 /*
433 * Global switch for use of MPI2.5 FAST PATH.
434 * We don't really know what FAST PATH actually does, so if it is suspected
435 * to cause problems it can be turned off by setting this variable to B_FALSE.
436 */
437 boolean_t mptsas_use_fastpath = B_TRUE;
438
439 static int mptsas_register_intrs(mptsas_t *);
440 static void mptsas_unregister_intrs(mptsas_t *);
441 static int mptsas_add_intrs(mptsas_t *, int);
442 static void mptsas_rem_intrs(mptsas_t *);
443
444 /*
445 * FMA Prototypes
446 */
447 static void mptsas_fm_init(mptsas_t *mpt);
448 static void mptsas_fm_fini(mptsas_t *mpt);
449 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
450
451 extern pri_t minclsyspri, maxclsyspri;
452
453 /*
454 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
455 * under this device that the paths to a physical device are created when
456 * MPxIO is used.
457 */
458 extern dev_info_t *scsi_vhci_dip;
459
460 /*
461 * Tunable timeout value for Inquiry VPD page 0x83
462 * By default the value is 30 seconds.
463 */
464 int mptsas_inq83_retry_timeout = 30;
465
466 /*
467 * This is used to allocate memory for message frame storage, not for
468 * data I/O DMA. All message frames must be stored in the first 4G of
469 * physical memory.
470 */
471 ddi_dma_attr_t mptsas_dma_attrs = {
472 DMA_ATTR_V0, /* attribute layout version */
473 0x0ull, /* address low - should be 0 (longlong) */
474 0xffffffffull, /* address high - 32-bit max range */
475 0x00ffffffull, /* count max - max DMA object size */
476 4, /* allocation alignment requirements */
477 0x78, /* burstsizes - binary encoded values */
478 1, /* minxfer - gran. of DMA engine */
479 0x00ffffffull, /* maxxfer - gran. of DMA engine */
480 0xffffffffull, /* max segment size (DMA boundary) */
481 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
482 512, /* granularity - device transfer size */
483 0 /* flags, set to 0 */
484 };
485
486 /*
487 * This is used for data I/O DMA memory allocation. (full 64-bit DMA
488 * physical addresses are supported.)
489 */
490 ddi_dma_attr_t mptsas_dma_attrs64 = {
491 DMA_ATTR_V0, /* attribute layout version */
492 0x0ull, /* address low - should be 0 (longlong) */
493 0xffffffffffffffffull, /* address high - 64-bit max */
494 0x00ffffffull, /* count max - max DMA object size */
495 4, /* allocation alignment requirements */
496 0x78, /* burstsizes - binary encoded values */
497 1, /* minxfer - gran. of DMA engine */
498 0x00ffffffull, /* maxxfer - gran. of DMA engine */
499 0xffffffffull, /* max segment size (DMA boundary) */
500 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
501 512, /* granularity - device transfer size */
502 DDI_DMA_RELAXED_ORDERING /* flags, enable relaxed ordering */
503 };
504
505 ddi_device_acc_attr_t mptsas_dev_attr = {
506 DDI_DEVICE_ATTR_V1,
507 DDI_STRUCTURE_LE_ACC,
508 DDI_STRICTORDER_ACC,
509 DDI_DEFAULT_ACC
510 };
511
512 static struct cb_ops mptsas_cb_ops = {
513 scsi_hba_open, /* open */
514 scsi_hba_close, /* close */
515 nodev, /* strategy */
516 nodev, /* print */
517 nodev, /* dump */
518 nodev, /* read */
519 nodev, /* write */
520 mptsas_ioctl, /* ioctl */
521 nodev, /* devmap */
522 nodev, /* mmap */
523 nodev, /* segmap */
524 nochpoll, /* chpoll */
525 ddi_prop_op, /* cb_prop_op */
526 NULL, /* streamtab */
527 D_MP, /* cb_flag */
528 CB_REV, /* rev */
529 nodev, /* aread */
530 nodev /* awrite */
531 };
532
533 static struct dev_ops mptsas_ops = {
534 DEVO_REV, /* devo_rev, */
535 0, /* refcnt */
536 ddi_no_info, /* info */
537 nulldev, /* identify */
538 nulldev, /* probe */
539 mptsas_attach, /* attach */
540 mptsas_detach, /* detach */
541 #ifdef __sparc
542 mptsas_reset,
543 #else
544 nodev, /* reset */
545 #endif /* __sparc */
546 &mptsas_cb_ops, /* driver operations */
547 NULL, /* bus operations */
548 mptsas_power, /* power management */
549 #ifdef __sparc
550 ddi_quiesce_not_needed
551 #else
552 mptsas_quiesce /* quiesce */
553 #endif /* __sparc */
554 };
555
556
557 #define MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24"
558
559 static struct modldrv modldrv = {
560 &mod_driverops, /* Type of module. This one is a driver */
561 MPTSAS_MOD_STRING, /* Name of the module. */
562 &mptsas_ops, /* driver ops */
563 };
564
565 static struct modlinkage modlinkage = {
566 MODREV_1, &modldrv, NULL
567 };
568 #define TARGET_PROP "target"
569 #define LUN_PROP "lun"
570 #define LUN64_PROP "lun64"
571 #define SAS_PROP "sas-mpt"
572 #define MDI_GUID "wwn"
573 #define NDI_GUID "guid"
574 #define MPTSAS_DEV_GONE "mptsas_dev_gone"
575
576 /*
577 * Local static data
578 */
579 #if defined(MPTSAS_DEBUG)
580 /*
581 * Flags to indicate which debug messages are to be printed and which go to the
582 * debug log ring buffer. Default is to not print anything, and to log
583 * everything except the watchsubr() output which normally happens every second.
584 */
585 uint32_t mptsas_debugprt_flags = 0x0;
586 uint32_t mptsas_debuglog_flags = ~(1U << 30);
587 #endif /* defined(MPTSAS_DEBUG) */
588 uint32_t mptsas_debug_resets = 0;
589
590 static kmutex_t mptsas_global_mutex;
591 static void *mptsas_state; /* soft state ptr */
592 static krwlock_t mptsas_global_rwlock;
593
594 static kmutex_t mptsas_log_mutex;
595 static char mptsas_log_buf[256];
596 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex, mptsas_log_buf))
597
598 static mptsas_t *mptsas_head, *mptsas_tail;
599 static clock_t mptsas_scsi_watchdog_tick;
600 static clock_t mptsas_tick;
601 static timeout_id_t mptsas_reset_watch;
602 static timeout_id_t mptsas_timeout_id;
603 static int mptsas_timeouts_enabled = 0;
604 /*
605 * warlock directives
606 */
607 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \
608 mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status))
609 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt))
610 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
611 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private))
612 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private))
613
614 /*
615 * SM - HBA statics
616 */
617 char *mptsas_driver_rev = MPTSAS_MOD_STRING;
618
619 #ifdef MPTSAS_DEBUG
620 void debug_enter(char *);
621 #endif
622
623 /*
624 * Notes:
625 * - scsi_hba_init(9F) initializes SCSI HBA modules
626 * - must call scsi_hba_fini(9F) if modload() fails
627 */
628 int
629 _init(void)
630 {
631 int status;
632 /* CONSTCOND */
633 ASSERT(NO_COMPETING_THREADS);
634
635 NDBG0(("_init"));
636
637 status = ddi_soft_state_init(&mptsas_state, MPTSAS_SIZE,
638 MPTSAS_INITIAL_SOFT_SPACE);
639 if (status != 0) {
640 return (status);
641 }
642
643 if ((status = scsi_hba_init(&modlinkage)) != 0) {
644 ddi_soft_state_fini(&mptsas_state);
645 return (status);
646 }
647
648 mutex_init(&mptsas_global_mutex, NULL, MUTEX_DRIVER, NULL);
649 rw_init(&mptsas_global_rwlock, NULL, RW_DRIVER, NULL);
650 mutex_init(&mptsas_log_mutex, NULL, MUTEX_DRIVER, NULL);
651
652 if ((status = mod_install(&modlinkage)) != 0) {
653 mutex_destroy(&mptsas_log_mutex);
654 rw_destroy(&mptsas_global_rwlock);
655 mutex_destroy(&mptsas_global_mutex);
656 ddi_soft_state_fini(&mptsas_state);
657 scsi_hba_fini(&modlinkage);
658 }
659
660 return (status);
661 }
662
663 /*
664 * Notes:
665 * - scsi_hba_fini(9F) uninitializes SCSI HBA modules
666 */
667 int
668 _fini(void)
669 {
670 int status;
671 /* CONSTCOND */
672 ASSERT(NO_COMPETING_THREADS);
673
674 NDBG0(("_fini"));
675
676 if ((status = mod_remove(&modlinkage)) == 0) {
677 ddi_soft_state_fini(&mptsas_state);
678 scsi_hba_fini(&modlinkage);
679 mutex_destroy(&mptsas_global_mutex);
680 rw_destroy(&mptsas_global_rwlock);
681 mutex_destroy(&mptsas_log_mutex);
682 }
683 return (status);
684 }
685
686 /*
687 * The loadable-module _info(9E) entry point
688 */
689 int
690 _info(struct modinfo *modinfop)
691 {
692 /* CONSTCOND */
693 ASSERT(NO_COMPETING_THREADS);
694 NDBG0(("mptsas _info"));
695
696 return (mod_info(&modlinkage, modinfop));
697 }
698
699 static int
700 mptsas_target_eval_devhdl(const void *op, void *arg)
701 {
702 uint16_t dh = *(uint16_t *)arg;
703 const mptsas_target_t *tp = op;
704
705 return ((int)tp->m_devhdl - (int)dh);
706 }
707
708 static int
709 mptsas_target_eval_slot(const void *op, void *arg)
710 {
711 mptsas_led_control_t *lcp = arg;
712 const mptsas_target_t *tp = op;
713
714 if (tp->m_enclosure != lcp->Enclosure)
715 return ((int)tp->m_enclosure - (int)lcp->Enclosure);
716
717 return ((int)tp->m_slot_num - (int)lcp->Slot);
718 }
719
720 static int
721 mptsas_target_eval_nowwn(const void *op, void *arg)
722 {
723 uint8_t phy = *(uint8_t *)arg;
724 const mptsas_target_t *tp = op;
725
726 if (tp->m_addr.mta_wwn != 0)
727 return (-1);
728
729 return ((int)tp->m_phynum - (int)phy);
730 }
731
732 static int
733 mptsas_smp_eval_devhdl(const void *op, void *arg)
734 {
735 uint16_t dh = *(uint16_t *)arg;
736 const mptsas_smp_t *sp = op;
737
738 return ((int)sp->m_devhdl - (int)dh);
739 }
740
741 static uint64_t
742 mptsas_target_addr_hash(const void *tp)
743 {
744 const mptsas_target_addr_t *tap = tp;
745
746 return ((tap->mta_wwn & 0xffffffffffffULL) |
747 ((uint64_t)tap->mta_phymask << 48));
748 }
749
750 static int
751 mptsas_target_addr_cmp(const void *a, const void *b)
752 {
753 const mptsas_target_addr_t *aap = a;
754 const mptsas_target_addr_t *bap = b;
755
756 if (aap->mta_wwn < bap->mta_wwn)
757 return (-1);
758 if (aap->mta_wwn > bap->mta_wwn)
759 return (1);
760 return ((int)bap->mta_phymask - (int)aap->mta_phymask);
761 }
762
763 static void
764 mptsas_target_free(void *op)
765 {
766 kmem_free(op, sizeof (mptsas_target_t));
767 }
768
769 static void
770 mptsas_smp_free(void *op)
771 {
772 kmem_free(op, sizeof (mptsas_smp_t));
773 }
774
775 static void
776 mptsas_destroy_hashes(mptsas_t *mpt)
777 {
778 mptsas_target_t *tp;
779 mptsas_smp_t *sp;
780
781 for (tp = refhash_first(mpt->m_targets); tp != NULL;
782 tp = refhash_next(mpt->m_targets, tp)) {
783 refhash_remove(mpt->m_targets, tp);
784 }
785 for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
786 sp = refhash_next(mpt->m_smp_targets, sp)) {
787 refhash_remove(mpt->m_smp_targets, sp);
788 }
789 refhash_destroy(mpt->m_targets);
790 refhash_destroy(mpt->m_smp_targets);
791 mpt->m_targets = NULL;
792 mpt->m_smp_targets = NULL;
793 }
794
795 static int
796 mptsas_iport_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
797 {
798 dev_info_t *pdip;
799 mptsas_t *mpt;
800 scsi_hba_tran_t *hba_tran;
801 char *iport = NULL;
802 char phymask[MPTSAS_MAX_PHYS];
803 mptsas_phymask_t phy_mask = 0;
804 int dynamic_port = 0;
805 uint32_t page_address;
806 char initiator_wwnstr[MPTSAS_WWN_STRLEN];
807 int rval = DDI_FAILURE;
808 int i = 0;
809 uint8_t numphys = 0;
810 uint8_t phy_id;
811 uint8_t phy_port = 0;
812 uint16_t attached_devhdl = 0;
813 uint32_t dev_info;
814 uint64_t attached_sas_wwn;
815 uint16_t dev_hdl;
816 uint16_t pdev_hdl;
817 uint16_t bay_num, enclosure, io_flags;
818 char attached_wwnstr[MPTSAS_WWN_STRLEN];
819
820 /* CONSTCOND */
821 ASSERT(NO_COMPETING_THREADS);
822
823 switch (cmd) {
824 case DDI_ATTACH:
825 break;
826
827 case DDI_RESUME:
828 /*
829 * If this a scsi-iport node, nothing to do here.
830 */
831 return (DDI_SUCCESS);
832
833 default:
834 return (DDI_FAILURE);
835 }
836
837 pdip = ddi_get_parent(dip);
838
839 if ((hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SCSI_DEVICE)) ==
840 NULL) {
841 cmn_err(CE_WARN, "Failed attach iport because fail to "
842 "get tran vector for the HBA node");
843 return (DDI_FAILURE);
844 }
845
846 mpt = TRAN2MPT(hba_tran);
847 ASSERT(mpt != NULL);
848 if (mpt == NULL)
849 return (DDI_FAILURE);
850
851 if ((hba_tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) ==
852 NULL) {
853 mptsas_log(mpt, CE_WARN, "Failed attach iport because fail to "
854 "get tran vector for the iport node");
855 return (DDI_FAILURE);
856 }
857
858 /*
859 * Overwrite parent's tran_hba_private to iport's tran vector
860 */
861 hba_tran->tran_hba_private = mpt;
862
863 ddi_report_dev(dip);
864
865 /*
866 * Get SAS address for initiator port according dev_handle
867 */
868 iport = ddi_get_name_addr(dip);
869 if (iport && strncmp(iport, "v0", 2) == 0) {
870 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
871 MPTSAS_VIRTUAL_PORT, 1) !=
872 DDI_PROP_SUCCESS) {
873 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
874 MPTSAS_VIRTUAL_PORT);
875 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
876 "prop update failed");
877 return (DDI_FAILURE);
878 }
879 return (DDI_SUCCESS);
880 }
881
882 mutex_enter(&mpt->m_mutex);
883 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
884 bzero(phymask, sizeof (phymask));
885 (void) sprintf(phymask,
886 "%x", mpt->m_phy_info[i].phy_mask);
887 if (strcmp(phymask, iport) == 0) {
888 break;
889 }
890 }
891
892 if (i == MPTSAS_MAX_PHYS) {
893 mptsas_log(mpt, CE_WARN, "Failed attach port %s because port"
894 "seems not exist", iport);
895 mutex_exit(&mpt->m_mutex);
896 return (DDI_FAILURE);
897 }
898
899 phy_mask = mpt->m_phy_info[i].phy_mask;
900
901 if (mpt->m_phy_info[i].port_flags & AUTO_PORT_CONFIGURATION)
902 dynamic_port = 1;
903 else
904 dynamic_port = 0;
905
906 /*
907 * Update PHY info for smhba
908 */
909 if (mptsas_smhba_phy_init(mpt)) {
910 mutex_exit(&mpt->m_mutex);
911 mptsas_log(mpt, CE_WARN, "mptsas phy update "
912 "failed");
913 return (DDI_FAILURE);
914 }
915
916 mutex_exit(&mpt->m_mutex);
917
918 numphys = 0;
919 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
920 if ((phy_mask >> i) & 0x01) {
921 numphys++;
922 }
923 }
924
925 bzero(initiator_wwnstr, sizeof (initiator_wwnstr));
926 (void) sprintf(initiator_wwnstr, "w%016"PRIx64,
927 mpt->un.m_base_wwid);
928
929 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
930 SCSI_ADDR_PROP_INITIATOR_PORT, initiator_wwnstr) !=
931 DDI_PROP_SUCCESS) {
932 (void) ddi_prop_remove(DDI_DEV_T_NONE,
933 dip, SCSI_ADDR_PROP_INITIATOR_PORT);
934 mptsas_log(mpt, CE_WARN, "mptsas Initiator port "
935 "prop update failed");
936 return (DDI_FAILURE);
937 }
938 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
939 MPTSAS_NUM_PHYS, numphys) !=
940 DDI_PROP_SUCCESS) {
941 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, MPTSAS_NUM_PHYS);
942 return (DDI_FAILURE);
943 }
944
945 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
946 "phymask", phy_mask) !=
947 DDI_PROP_SUCCESS) {
948 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "phymask");
949 mptsas_log(mpt, CE_WARN, "mptsas phy mask "
950 "prop update failed");
951 return (DDI_FAILURE);
952 }
953
954 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
955 "dynamic-port", dynamic_port) !=
956 DDI_PROP_SUCCESS) {
957 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "dynamic-port");
958 mptsas_log(mpt, CE_WARN, "mptsas dynamic port "
959 "prop update failed");
960 return (DDI_FAILURE);
961 }
962 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
963 MPTSAS_VIRTUAL_PORT, 0) !=
964 DDI_PROP_SUCCESS) {
965 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
966 MPTSAS_VIRTUAL_PORT);
967 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
968 "prop update failed");
969 return (DDI_FAILURE);
970 }
971 mptsas_smhba_set_all_phy_props(mpt, dip, numphys, phy_mask,
972 &attached_devhdl);
973
974 mutex_enter(&mpt->m_mutex);
975 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
976 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)attached_devhdl;
977 rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
978 &attached_sas_wwn, &dev_info, &phy_port, &phy_id,
979 &pdev_hdl, &bay_num, &enclosure, &io_flags);
980 if (rval != DDI_SUCCESS) {
981 mptsas_log(mpt, CE_WARN,
982 "Failed to get device page0 for handle:%d",
983 attached_devhdl);
984 mutex_exit(&mpt->m_mutex);
985 return (DDI_FAILURE);
986 }
987
988 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
989 bzero(phymask, sizeof (phymask));
990 (void) sprintf(phymask, "%x", mpt->m_phy_info[i].phy_mask);
991 if (strcmp(phymask, iport) == 0) {
992 (void) sprintf(&mpt->m_phy_info[i].smhba_info.path[0],
993 "%x",
994 mpt->m_phy_info[i].phy_mask);
995 }
996 }
997 mutex_exit(&mpt->m_mutex);
998
999 bzero(attached_wwnstr, sizeof (attached_wwnstr));
1000 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
1001 attached_sas_wwn);
1002 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
1003 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
1004 DDI_PROP_SUCCESS) {
1005 (void) ddi_prop_remove(DDI_DEV_T_NONE,
1006 dip, SCSI_ADDR_PROP_ATTACHED_PORT);
1007 return (DDI_FAILURE);
1008 }
1009
1010 /* Create kstats for each phy on this iport */
1011
1012 mptsas_create_phy_stats(mpt, iport, dip);
1013
1014 /*
1015 * register sas hba iport with mdi (MPxIO/vhci)
1016 */
1017 if (mdi_phci_register(MDI_HCI_CLASS_SCSI,
1018 dip, 0) == MDI_SUCCESS) {
1019 mpt->m_mpxio_enable = TRUE;
1020 }
1021 return (DDI_SUCCESS);
1022 }
1023
1024 /*
1025 * Notes:
1026 * Set up all device state and allocate data structures,
1027 * mutexes, condition variables, etc. for device operation.
1028 * Add interrupts needed.
1029 * Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
1030 */
1031 static int
1032 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1033 {
1034 mptsas_t *mpt = NULL;
1035 int instance, i, j;
1036 int doneq_thread_num;
1037 char intr_added = 0;
1038 char map_setup = 0;
1039 char config_setup = 0;
1040 char hba_attach_setup = 0;
1041 char smp_attach_setup = 0;
1042 char mutex_init_done = 0;
1043 char event_taskq_create = 0;
1044 char dr_taskq_create = 0;
1045 char doneq_thread_create = 0;
1046 char added_watchdog = 0;
1047 scsi_hba_tran_t *hba_tran;
1048 uint_t mem_bar = MEM_SPACE;
1049 int rval = DDI_FAILURE;
1050
1051 /* CONSTCOND */
1052 ASSERT(NO_COMPETING_THREADS);
1053
1054 if (scsi_hba_iport_unit_address(dip)) {
1055 return (mptsas_iport_attach(dip, cmd));
1056 }
1057
1058 switch (cmd) {
1059 case DDI_ATTACH:
1060 break;
1061
1062 case DDI_RESUME:
1063 if ((hba_tran = ddi_get_driver_private(dip)) == NULL)
1064 return (DDI_FAILURE);
1065
1066 mpt = TRAN2MPT(hba_tran);
1067
1068 if (!mpt) {
1069 return (DDI_FAILURE);
1070 }
1071
1072 /*
1073 * Reset hardware and softc to "no outstanding commands"
1074 * Note that a check condition can result on first command
1075 * to a target.
1076 */
1077 mutex_enter(&mpt->m_mutex);
1078
1079 /*
1080 * raise power.
1081 */
1082 if (mpt->m_options & MPTSAS_OPT_PM) {
1083 mutex_exit(&mpt->m_mutex);
1084 (void) pm_busy_component(dip, 0);
1085 rval = pm_power_has_changed(dip, 0, PM_LEVEL_D0);
1086 if (rval == DDI_SUCCESS) {
1087 mutex_enter(&mpt->m_mutex);
1088 } else {
1089 /*
1090 * The pm_raise_power() call above failed,
1091 * and that can only occur if we were unable
1092 * to reset the hardware. This is probably
1093 * due to unhealty hardware, and because
1094 * important filesystems(such as the root
1095 * filesystem) could be on the attached disks,
1096 * it would not be a good idea to continue,
1097 * as we won't be entirely certain we are
1098 * writing correct data. So we panic() here
1099 * to not only prevent possible data corruption,
1100 * but to give developers or end users a hope
1101 * of identifying and correcting any problems.
1102 */
1103 fm_panic("mptsas could not reset hardware "
1104 "during resume");
1105 }
1106 }
1107
1108 mpt->m_suspended = 0;
1109
1110 /*
1111 * Reinitialize ioc
1112 */
1113 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1114 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
1115 mutex_exit(&mpt->m_mutex);
1116 if (mpt->m_options & MPTSAS_OPT_PM) {
1117 (void) pm_idle_component(dip, 0);
1118 }
1119 fm_panic("mptsas init chip fail during resume");
1120 }
1121 /*
1122 * mptsas_update_driver_data needs interrupts so enable them
1123 * first.
1124 */
1125 MPTSAS_ENABLE_INTR(mpt);
1126 mptsas_update_driver_data(mpt);
1127
1128 /* start requests, if possible */
1129 mptsas_restart_hba(mpt);
1130
1131 mutex_exit(&mpt->m_mutex);
1132
1133 /*
1134 * Restart watch thread
1135 */
1136 mutex_enter(&mptsas_global_mutex);
1137 if (mptsas_timeout_id == 0) {
1138 mptsas_timeout_id = timeout(mptsas_watch, NULL,
1139 mptsas_tick);
1140 mptsas_timeouts_enabled = 1;
1141 }
1142 mutex_exit(&mptsas_global_mutex);
1143
1144 /* report idle status to pm framework */
1145 if (mpt->m_options & MPTSAS_OPT_PM) {
1146 (void) pm_idle_component(dip, 0);
1147 }
1148
1149 return (DDI_SUCCESS);
1150
1151 default:
1152 return (DDI_FAILURE);
1153
1154 }
1155
1156 instance = ddi_get_instance(dip);
1157
1158 /*
1159 * Allocate softc information.
1160 */
1161 if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
1162 mptsas_log(NULL, CE_WARN,
1163 "mptsas%d: cannot allocate soft state", instance);
1164 goto fail;
1165 }
1166
1167 mpt = ddi_get_soft_state(mptsas_state, instance);
1168
1169 if (mpt == NULL) {
1170 mptsas_log(NULL, CE_WARN,
1171 "mptsas%d: cannot get soft state", instance);
1172 goto fail;
1173 }
1174
1175 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
1176 scsi_size_clean(dip);
1177
1178 mpt->m_dip = dip;
1179 mpt->m_instance = instance;
1180
1181 /* Make a per-instance copy of the structures */
1182 mpt->m_io_dma_attr = mptsas_dma_attrs64;
1183 mpt->m_msg_dma_attr = mptsas_dma_attrs;
1184 mpt->m_reg_acc_attr = mptsas_dev_attr;
1185 mpt->m_dev_acc_attr = mptsas_dev_attr;
1186
1187 /*
1188 * Initialize FMA
1189 */
1190 mpt->m_fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, mpt->m_dip,
1191 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
1192 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1193 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1194
1195 mptsas_fm_init(mpt);
1196
1197 if (mptsas_alloc_handshake_msg(mpt,
1198 sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) {
1199 mptsas_log(mpt, CE_WARN, "cannot initialize handshake msg.");
1200 goto fail;
1201 }
1202
1203 /*
1204 * Setup configuration space
1205 */
1206 if (mptsas_config_space_init(mpt) == FALSE) {
1207 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init failed");
1208 goto fail;
1209 }
1210 config_setup++;
1211
1212 if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&mpt->m_reg,
1213 0, 0, &mpt->m_reg_acc_attr, &mpt->m_datap) != DDI_SUCCESS) {
1214 mptsas_log(mpt, CE_WARN, "map setup failed");
1215 goto fail;
1216 }
1217 map_setup++;
1218
1219 /*
1220 * A taskq is created for dealing with the event handler
1221 */
1222 if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
1223 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1224 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
1225 goto fail;
1226 }
1227 event_taskq_create++;
1228
1229 /*
1230 * A taskq is created for dealing with dr events
1231 */
1232 if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1233 "mptsas_dr_taskq",
1234 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1235 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1236 "failed");
1237 goto fail;
1238 }
1239 dr_taskq_create++;
1240
1241 mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1242 0, "mptsas_doneq_thread_threshold_prop", 10);
1243 mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1244 0, "mptsas_doneq_length_threshold_prop", 8);
1245 mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1246 0, "mptsas_doneq_thread_n_prop", 8);
1247
1248 if (mpt->m_doneq_thread_n) {
1249 cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1250 mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1251
1252 mutex_enter(&mpt->m_doneq_mutex);
1253 mpt->m_doneq_thread_id =
1254 kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1255 * mpt->m_doneq_thread_n, KM_SLEEP);
1256
1257 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1258 cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1259 CV_DRIVER, NULL);
1260 mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1261 MUTEX_DRIVER, NULL);
1262 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1263 mpt->m_doneq_thread_id[j].flag |=
1264 MPTSAS_DONEQ_THREAD_ACTIVE;
1265 mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1266 mpt->m_doneq_thread_id[j].arg.t = j;
1267 mpt->m_doneq_thread_id[j].threadp =
1268 thread_create(NULL, 0, mptsas_doneq_thread,
1269 &mpt->m_doneq_thread_id[j].arg,
1270 0, &p0, TS_RUN, minclsyspri);
1271 mpt->m_doneq_thread_id[j].donetail =
1272 &mpt->m_doneq_thread_id[j].doneq;
1273 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1274 }
1275 mutex_exit(&mpt->m_doneq_mutex);
1276 doneq_thread_create++;
1277 }
1278
1279 /*
1280 * Disable hardware interrupt since we're not ready to
1281 * handle it yet.
1282 */
1283 MPTSAS_DISABLE_INTR(mpt);
1284 if (mptsas_register_intrs(mpt) == FALSE)
1285 goto fail;
1286 intr_added++;
1287
1288 /* Initialize mutex used in interrupt handler */
1289 mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1290 DDI_INTR_PRI(mpt->m_intr_pri));
1291 mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1292 mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1293 DDI_INTR_PRI(mpt->m_intr_pri));
1294 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1295 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1296 NULL, MUTEX_DRIVER,
1297 DDI_INTR_PRI(mpt->m_intr_pri));
1298 }
1299
1300 cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1301 cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1302 cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1303 cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1304 cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1305 mutex_init_done++;
1306
1307 mutex_enter(&mpt->m_mutex);
1308 /*
1309 * Initialize power management component
1310 */
1311 if (mpt->m_options & MPTSAS_OPT_PM) {
1312 if (mptsas_init_pm(mpt)) {
1313 mutex_exit(&mpt->m_mutex);
1314 mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1315 "failed");
1316 goto fail;
1317 }
1318 }
1319
1320 /*
1321 * Initialize chip using Message Unit Reset, if allowed
1322 */
1323 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1324 if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1325 mutex_exit(&mpt->m_mutex);
1326 mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1327 goto fail;
1328 }
1329
1330 mpt->m_targets = refhash_create(MPTSAS_TARGET_BUCKET_COUNT,
1331 mptsas_target_addr_hash, mptsas_target_addr_cmp,
1332 mptsas_target_free, sizeof (mptsas_target_t),
1333 offsetof(mptsas_target_t, m_link),
1334 offsetof(mptsas_target_t, m_addr), KM_SLEEP);
1335
1336 /*
1337 * Fill in the phy_info structure and get the base WWID
1338 */
1339 if (mptsas_get_manufacture_page5(mpt) == DDI_FAILURE) {
1340 mptsas_log(mpt, CE_WARN,
1341 "mptsas_get_manufacture_page5 failed!");
1342 goto fail;
1343 }
1344
1345 if (mptsas_get_sas_io_unit_page_hndshk(mpt)) {
1346 mptsas_log(mpt, CE_WARN,
1347 "mptsas_get_sas_io_unit_page_hndshk failed!");
1348 goto fail;
1349 }
1350
1351 if (mptsas_get_manufacture_page0(mpt) == DDI_FAILURE) {
1352 mptsas_log(mpt, CE_WARN,
1353 "mptsas_get_manufacture_page0 failed!");
1354 goto fail;
1355 }
1356
1357 mutex_exit(&mpt->m_mutex);
1358
1359 /*
1360 * Register the iport for multiple port HBA
1361 */
1362 mptsas_iport_register(mpt);
1363
1364 /*
1365 * initialize SCSI HBA transport structure
1366 */
1367 if (mptsas_hba_setup(mpt) == FALSE)
1368 goto fail;
1369 hba_attach_setup++;
1370
1371 if (mptsas_smp_setup(mpt) == FALSE)
1372 goto fail;
1373 smp_attach_setup++;
1374
1375 if (mptsas_cache_create(mpt) == FALSE)
1376 goto fail;
1377
1378 mpt->m_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1379 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
1380 if (mpt->m_scsi_reset_delay == 0) {
1381 mptsas_log(mpt, CE_NOTE,
1382 "scsi_reset_delay of 0 is not recommended,"
1383 " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1384 mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1385 }
1386
1387 /*
1388 * Initialize the wait and done FIFO queue
1389 */
1390 mpt->m_donetail = &mpt->m_doneq;
1391 mpt->m_waitqtail = &mpt->m_waitq;
1392 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
1393 mpt->m_tx_draining = 0;
1394
1395 /*
1396 * ioc cmd queue initialize
1397 */
1398 mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1399 mpt->m_dev_handle = 0xFFFF;
1400
1401 MPTSAS_ENABLE_INTR(mpt);
1402
1403 /*
1404 * enable event notification
1405 */
1406 mutex_enter(&mpt->m_mutex);
1407 if (mptsas_ioc_enable_event_notification(mpt)) {
1408 mutex_exit(&mpt->m_mutex);
1409 goto fail;
1410 }
1411 mutex_exit(&mpt->m_mutex);
1412
1413 /*
1414 * used for mptsas_watch
1415 */
1416 mptsas_list_add(mpt);
1417
1418 mutex_enter(&mptsas_global_mutex);
1419 if (mptsas_timeouts_enabled == 0) {
1420 mptsas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
1421 dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
1422
1423 mptsas_tick = mptsas_scsi_watchdog_tick *
1424 drv_usectohz((clock_t)1000000);
1425
1426 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
1427 mptsas_timeouts_enabled = 1;
1428 }
1429 mutex_exit(&mptsas_global_mutex);
1430 added_watchdog++;
1431
1432 /*
1433 * Initialize PHY info for smhba.
1434 * This requires watchdog to be enabled otherwise if interrupts
1435 * don't work the system will hang.
1436 */
1437 if (mptsas_smhba_setup(mpt)) {
1438 mptsas_log(mpt, CE_WARN, "mptsas phy initialization "
1439 "failed");
1440 goto fail;
1441 }
1442
1443 /* Check all dma handles allocated in attach */
1444 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl)
1445 != DDI_SUCCESS) ||
1446 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl)
1447 != DDI_SUCCESS) ||
1448 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl)
1449 != DDI_SUCCESS) ||
1450 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl)
1451 != DDI_SUCCESS) ||
1452 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl)
1453 != DDI_SUCCESS)) {
1454 goto fail;
1455 }
1456
1457 /* Check all acc handles allocated in attach */
1458 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
1459 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl)
1460 != DDI_SUCCESS) ||
1461 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl)
1462 != DDI_SUCCESS) ||
1463 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl)
1464 != DDI_SUCCESS) ||
1465 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl)
1466 != DDI_SUCCESS) ||
1467 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl)
1468 != DDI_SUCCESS) ||
1469 (mptsas_check_acc_handle(mpt->m_config_handle)
1470 != DDI_SUCCESS)) {
1471 goto fail;
1472 }
1473
1474 /*
1475 * After this point, we are not going to fail the attach.
1476 */
1477
1478 /* Print message of HBA present */
1479 ddi_report_dev(dip);
1480
1481 /* report idle status to pm framework */
1482 if (mpt->m_options & MPTSAS_OPT_PM) {
1483 (void) pm_idle_component(dip, 0);
1484 }
1485
1486 return (DDI_SUCCESS);
1487
1488 fail:
1489 mptsas_log(mpt, CE_WARN, "attach failed");
1490 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
1491 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
1492 if (mpt) {
1493 /* deallocate in reverse order */
1494 if (added_watchdog) {
1495 mptsas_list_del(mpt);
1496 mutex_enter(&mptsas_global_mutex);
1497
1498 if (mptsas_timeout_id && (mptsas_head == NULL)) {
1499 timeout_id_t tid = mptsas_timeout_id;
1500 mptsas_timeouts_enabled = 0;
1501 mptsas_timeout_id = 0;
1502 mutex_exit(&mptsas_global_mutex);
1503 (void) untimeout(tid);
1504 mutex_enter(&mptsas_global_mutex);
1505 }
1506 mutex_exit(&mptsas_global_mutex);
1507 }
1508
1509 mptsas_cache_destroy(mpt);
1510
1511 if (smp_attach_setup) {
1512 mptsas_smp_teardown(mpt);
1513 }
1514 if (hba_attach_setup) {
1515 mptsas_hba_teardown(mpt);
1516 }
1517
1518 if (mpt->m_targets)
1519 refhash_destroy(mpt->m_targets);
1520 if (mpt->m_smp_targets)
1521 refhash_destroy(mpt->m_smp_targets);
1522
1523 if (mpt->m_active) {
1524 mptsas_free_active_slots(mpt);
1525 }
1526 if (intr_added) {
1527 mptsas_unregister_intrs(mpt);
1528 }
1529
1530 if (doneq_thread_create) {
1531 mutex_enter(&mpt->m_doneq_mutex);
1532 doneq_thread_num = mpt->m_doneq_thread_n;
1533 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1534 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1535 mpt->m_doneq_thread_id[j].flag &=
1536 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1537 cv_signal(&mpt->m_doneq_thread_id[j].cv);
1538 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1539 }
1540 while (mpt->m_doneq_thread_n) {
1541 cv_wait(&mpt->m_doneq_thread_cv,
1542 &mpt->m_doneq_mutex);
1543 }
1544 for (j = 0; j < doneq_thread_num; j++) {
1545 cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1546 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1547 }
1548 kmem_free(mpt->m_doneq_thread_id,
1549 sizeof (mptsas_doneq_thread_list_t)
1550 * doneq_thread_num);
1551 mutex_exit(&mpt->m_doneq_mutex);
1552 cv_destroy(&mpt->m_doneq_thread_cv);
1553 mutex_destroy(&mpt->m_doneq_mutex);
1554 }
1555 if (event_taskq_create) {
1556 ddi_taskq_destroy(mpt->m_event_taskq);
1557 }
1558 if (dr_taskq_create) {
1559 ddi_taskq_destroy(mpt->m_dr_taskq);
1560 }
1561 if (mutex_init_done) {
1562 mutex_destroy(&mpt->m_tx_waitq_mutex);
1563 mutex_destroy(&mpt->m_passthru_mutex);
1564 mutex_destroy(&mpt->m_mutex);
1565 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1566 mutex_destroy(
1567 &mpt->m_phy_info[i].smhba_info.phy_mutex);
1568 }
1569 cv_destroy(&mpt->m_cv);
1570 cv_destroy(&mpt->m_passthru_cv);
1571 cv_destroy(&mpt->m_fw_cv);
1572 cv_destroy(&mpt->m_config_cv);
1573 cv_destroy(&mpt->m_fw_diag_cv);
1574 }
1575
1576 if (map_setup) {
1577 mptsas_cfg_fini(mpt);
1578 }
1579 if (config_setup) {
1580 mptsas_config_space_fini(mpt);
1581 }
1582 mptsas_free_handshake_msg(mpt);
1583 mptsas_hba_fini(mpt);
1584
1585 mptsas_fm_fini(mpt);
1586 ddi_soft_state_free(mptsas_state, instance);
1587 ddi_prop_remove_all(dip);
1588 }
1589 return (DDI_FAILURE);
1590 }
1591
1592 static int
1593 mptsas_suspend(dev_info_t *devi)
1594 {
1595 mptsas_t *mpt, *g;
1596 scsi_hba_tran_t *tran;
1597
1598 if (scsi_hba_iport_unit_address(devi)) {
1599 return (DDI_SUCCESS);
1600 }
1601
1602 if ((tran = ddi_get_driver_private(devi)) == NULL)
1603 return (DDI_SUCCESS);
1604
1605 mpt = TRAN2MPT(tran);
1606 if (!mpt) {
1607 return (DDI_SUCCESS);
1608 }
1609
1610 mutex_enter(&mpt->m_mutex);
1611
1612 if (mpt->m_suspended++) {
1613 mutex_exit(&mpt->m_mutex);
1614 return (DDI_SUCCESS);
1615 }
1616
1617 /*
1618 * Cancel timeout threads for this mpt
1619 */
1620 if (mpt->m_quiesce_timeid) {
1621 timeout_id_t tid = mpt->m_quiesce_timeid;
1622 mpt->m_quiesce_timeid = 0;
1623 mutex_exit(&mpt->m_mutex);
1624 (void) untimeout(tid);
1625 mutex_enter(&mpt->m_mutex);
1626 }
1627
1628 if (mpt->m_restart_cmd_timeid) {
1629 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1630 mpt->m_restart_cmd_timeid = 0;
1631 mutex_exit(&mpt->m_mutex);
1632 (void) untimeout(tid);
1633 mutex_enter(&mpt->m_mutex);
1634 }
1635
1636 mutex_exit(&mpt->m_mutex);
1637
1638 (void) pm_idle_component(mpt->m_dip, 0);
1639
1640 /*
1641 * Cancel watch threads if all mpts suspended
1642 */
1643 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1644 for (g = mptsas_head; g != NULL; g = g->m_next) {
1645 if (!g->m_suspended)
1646 break;
1647 }
1648 rw_exit(&mptsas_global_rwlock);
1649
1650 mutex_enter(&mptsas_global_mutex);
1651 if (g == NULL) {
1652 timeout_id_t tid;
1653
1654 mptsas_timeouts_enabled = 0;
1655 if (mptsas_timeout_id) {
1656 tid = mptsas_timeout_id;
1657 mptsas_timeout_id = 0;
1658 mutex_exit(&mptsas_global_mutex);
1659 (void) untimeout(tid);
1660 mutex_enter(&mptsas_global_mutex);
1661 }
1662 if (mptsas_reset_watch) {
1663 tid = mptsas_reset_watch;
1664 mptsas_reset_watch = 0;
1665 mutex_exit(&mptsas_global_mutex);
1666 (void) untimeout(tid);
1667 mutex_enter(&mptsas_global_mutex);
1668 }
1669 }
1670 mutex_exit(&mptsas_global_mutex);
1671
1672 mutex_enter(&mpt->m_mutex);
1673
1674 /*
1675 * If this mpt is not in full power(PM_LEVEL_D0), just return.
1676 */
1677 if ((mpt->m_options & MPTSAS_OPT_PM) &&
1678 (mpt->m_power_level != PM_LEVEL_D0)) {
1679 mutex_exit(&mpt->m_mutex);
1680 return (DDI_SUCCESS);
1681 }
1682
1683 /* Disable HBA interrupts in hardware */
1684 MPTSAS_DISABLE_INTR(mpt);
1685 /*
1686 * Send RAID action system shutdown to sync IR
1687 */
1688 mptsas_raid_action_system_shutdown(mpt);
1689
1690 mutex_exit(&mpt->m_mutex);
1691
1692 /* drain the taskq */
1693 ddi_taskq_wait(mpt->m_event_taskq);
1694 ddi_taskq_wait(mpt->m_dr_taskq);
1695
1696 return (DDI_SUCCESS);
1697 }
1698
1699 #ifdef __sparc
1700 /*ARGSUSED*/
1701 static int
1702 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd)
1703 {
1704 mptsas_t *mpt;
1705 scsi_hba_tran_t *tran;
1706
1707 /*
1708 * If this call is for iport, just return.
1709 */
1710 if (scsi_hba_iport_unit_address(devi))
1711 return (DDI_SUCCESS);
1712
1713 if ((tran = ddi_get_driver_private(devi)) == NULL)
1714 return (DDI_SUCCESS);
1715
1716 if ((mpt = TRAN2MPT(tran)) == NULL)
1717 return (DDI_SUCCESS);
1718
1719 /*
1720 * Send RAID action system shutdown to sync IR. Disable HBA
1721 * interrupts in hardware first.
1722 */
1723 MPTSAS_DISABLE_INTR(mpt);
1724 mptsas_raid_action_system_shutdown(mpt);
1725
1726 return (DDI_SUCCESS);
1727 }
1728 #else /* __sparc */
1729 /*
1730 * quiesce(9E) entry point.
1731 *
1732 * This function is called when the system is single-threaded at high
1733 * PIL with preemption disabled. Therefore, this function must not be
1734 * blocked.
1735 *
1736 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1737 * DDI_FAILURE indicates an error condition and should almost never happen.
1738 */
1739 static int
1740 mptsas_quiesce(dev_info_t *devi)
1741 {
1742 mptsas_t *mpt;
1743 scsi_hba_tran_t *tran;
1744
1745 /*
1746 * If this call is for iport, just return.
1747 */
1748 if (scsi_hba_iport_unit_address(devi))
1749 return (DDI_SUCCESS);
1750
1751 if ((tran = ddi_get_driver_private(devi)) == NULL)
1752 return (DDI_SUCCESS);
1753
1754 if ((mpt = TRAN2MPT(tran)) == NULL)
1755 return (DDI_SUCCESS);
1756
1757 /* Disable HBA interrupts in hardware */
1758 MPTSAS_DISABLE_INTR(mpt);
1759 /* Send RAID action system shutdonw to sync IR */
1760 mptsas_raid_action_system_shutdown(mpt);
1761
1762 return (DDI_SUCCESS);
1763 }
1764 #endif /* __sparc */
1765
1766 /*
1767 * detach(9E). Remove all device allocations and system resources;
1768 * disable device interrupts.
1769 * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
1770 */
1771 static int
1772 mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1773 {
1774 /* CONSTCOND */
1775 ASSERT(NO_COMPETING_THREADS);
1776 NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi, (void *)cmd));
1777
1778 switch (cmd) {
1779 case DDI_DETACH:
1780 return (mptsas_do_detach(devi));
1781
1782 case DDI_SUSPEND:
1783 return (mptsas_suspend(devi));
1784
1785 default:
1786 return (DDI_FAILURE);
1787 }
1788 /* NOTREACHED */
1789 }
1790
1791 static int
1792 mptsas_do_detach(dev_info_t *dip)
1793 {
1794 mptsas_t *mpt;
1795 scsi_hba_tran_t *tran;
1796 int circ = 0;
1797 int circ1 = 0;
1798 mdi_pathinfo_t *pip = NULL;
1799 int i;
1800 int doneq_thread_num = 0;
1801
1802 NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1803
1804 if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1805 return (DDI_FAILURE);
1806
1807 mpt = TRAN2MPT(tran);
1808 if (!mpt) {
1809 return (DDI_FAILURE);
1810 }
1811 /*
1812 * Still have pathinfo child, should not detach mpt driver
1813 */
1814 if (scsi_hba_iport_unit_address(dip)) {
1815 if (mpt->m_mpxio_enable) {
1816 /*
1817 * MPxIO enabled for the iport
1818 */
1819 ndi_devi_enter(scsi_vhci_dip, &circ1);
1820 ndi_devi_enter(dip, &circ);
1821 while (pip = mdi_get_next_client_path(dip, NULL)) {
1822 if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1823 continue;
1824 }
1825 ndi_devi_exit(dip, circ);
1826 ndi_devi_exit(scsi_vhci_dip, circ1);
1827 NDBG12(("detach failed because of "
1828 "outstanding path info"));
1829 return (DDI_FAILURE);
1830 }
1831 ndi_devi_exit(dip, circ);
1832 ndi_devi_exit(scsi_vhci_dip, circ1);
1833 (void) mdi_phci_unregister(dip, 0);
1834 }
1835
1836 ddi_prop_remove_all(dip);
1837
1838 return (DDI_SUCCESS);
1839 }
1840
1841 /* Make sure power level is D0 before accessing registers */
1842 if (mpt->m_options & MPTSAS_OPT_PM) {
1843 (void) pm_busy_component(dip, 0);
1844 if (mpt->m_power_level != PM_LEVEL_D0) {
1845 if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1846 DDI_SUCCESS) {
1847 mptsas_log(mpt, CE_WARN,
1848 "mptsas%d: Raise power request failed.",
1849 mpt->m_instance);
1850 (void) pm_idle_component(dip, 0);
1851 return (DDI_FAILURE);
1852 }
1853 }
1854 }
1855
1856 /*
1857 * Send RAID action system shutdown to sync IR. After action, send a
1858 * Message Unit Reset. Since after that DMA resource will be freed,
1859 * set ioc to READY state will avoid HBA initiated DMA operation.
1860 */
1861 mutex_enter(&mpt->m_mutex);
1862 MPTSAS_DISABLE_INTR(mpt);
1863 mptsas_raid_action_system_shutdown(mpt);
1864 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1865 (void) mptsas_ioc_reset(mpt, FALSE);
1866 mutex_exit(&mpt->m_mutex);
1867 mptsas_rem_intrs(mpt);
1868 ddi_taskq_destroy(mpt->m_event_taskq);
1869 ddi_taskq_destroy(mpt->m_dr_taskq);
1870
1871 if (mpt->m_doneq_thread_n) {
1872 mutex_enter(&mpt->m_doneq_mutex);
1873 doneq_thread_num = mpt->m_doneq_thread_n;
1874 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1875 mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1876 mpt->m_doneq_thread_id[i].flag &=
1877 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1878 cv_signal(&mpt->m_doneq_thread_id[i].cv);
1879 mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1880 }
1881 while (mpt->m_doneq_thread_n) {
1882 cv_wait(&mpt->m_doneq_thread_cv,
1883 &mpt->m_doneq_mutex);
1884 }
1885 for (i = 0; i < doneq_thread_num; i++) {
1886 cv_destroy(&mpt->m_doneq_thread_id[i].cv);
1887 mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);
1888 }
1889 kmem_free(mpt->m_doneq_thread_id,
1890 sizeof (mptsas_doneq_thread_list_t)
1891 * doneq_thread_num);
1892 mutex_exit(&mpt->m_doneq_mutex);
1893 cv_destroy(&mpt->m_doneq_thread_cv);
1894 mutex_destroy(&mpt->m_doneq_mutex);
1895 }
1896
1897 scsi_hba_reset_notify_tear_down(mpt->m_reset_notify_listf);
1898
1899 mptsas_list_del(mpt);
1900
1901 /*
1902 * Cancel timeout threads for this mpt
1903 */
1904 mutex_enter(&mpt->m_mutex);
1905 if (mpt->m_quiesce_timeid) {
1906 timeout_id_t tid = mpt->m_quiesce_timeid;
1907 mpt->m_quiesce_timeid = 0;
1908 mutex_exit(&mpt->m_mutex);
1909 (void) untimeout(tid);
1910 mutex_enter(&mpt->m_mutex);
1911 }
1912
1913 if (mpt->m_restart_cmd_timeid) {
1914 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1915 mpt->m_restart_cmd_timeid = 0;
1916 mutex_exit(&mpt->m_mutex);
1917 (void) untimeout(tid);
1918 mutex_enter(&mpt->m_mutex);
1919 }
1920
1921 mutex_exit(&mpt->m_mutex);
1922
1923 /*
1924 * last mpt? ... if active, CANCEL watch threads.
1925 */
1926 mutex_enter(&mptsas_global_mutex);
1927 if (mptsas_head == NULL) {
1928 timeout_id_t tid;
1929 /*
1930 * Clear mptsas_timeouts_enable so that the watch thread
1931 * gets restarted on DDI_ATTACH
1932 */
1933 mptsas_timeouts_enabled = 0;
1934 if (mptsas_timeout_id) {
1935 tid = mptsas_timeout_id;
1936 mptsas_timeout_id = 0;
1937 mutex_exit(&mptsas_global_mutex);
1938 (void) untimeout(tid);
1939 mutex_enter(&mptsas_global_mutex);
1940 }
1941 if (mptsas_reset_watch) {
1942 tid = mptsas_reset_watch;
1943 mptsas_reset_watch = 0;
1944 mutex_exit(&mptsas_global_mutex);
1945 (void) untimeout(tid);
1946 mutex_enter(&mptsas_global_mutex);
1947 }
1948 }
1949 mutex_exit(&mptsas_global_mutex);
1950
1951 /*
1952 * Delete Phy stats
1953 */
1954 mptsas_destroy_phy_stats(mpt);
1955
1956 mptsas_destroy_hashes(mpt);
1957
1958 /*
1959 * Delete nt_active.
1960 */
1961 mutex_enter(&mpt->m_mutex);
1962 mptsas_free_active_slots(mpt);
1963 mutex_exit(&mpt->m_mutex);
1964
1965 /* deallocate everything that was allocated in mptsas_attach */
1966 mptsas_cache_destroy(mpt);
1967
1968 mptsas_hba_fini(mpt);
1969 mptsas_cfg_fini(mpt);
1970
1971 /* Lower the power informing PM Framework */
1972 if (mpt->m_options & MPTSAS_OPT_PM) {
1973 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
1974 mptsas_log(mpt, CE_WARN,
1975 "!mptsas%d: Lower power request failed "
1976 "during detach, ignoring.",
1977 mpt->m_instance);
1978 }
1979
1980 mutex_destroy(&mpt->m_tx_waitq_mutex);
1981 mutex_destroy(&mpt->m_passthru_mutex);
1982 mutex_destroy(&mpt->m_mutex);
1983 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1984 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
1985 }
1986 cv_destroy(&mpt->m_cv);
1987 cv_destroy(&mpt->m_passthru_cv);
1988 cv_destroy(&mpt->m_fw_cv);
1989 cv_destroy(&mpt->m_config_cv);
1990 cv_destroy(&mpt->m_fw_diag_cv);
1991
1992
1993 mptsas_smp_teardown(mpt);
1994 mptsas_hba_teardown(mpt);
1995
1996 mptsas_config_space_fini(mpt);
1997
1998 mptsas_free_handshake_msg(mpt);
1999
2000 mptsas_fm_fini(mpt);
2001 ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
2002 ddi_prop_remove_all(dip);
2003
2004 return (DDI_SUCCESS);
2005 }
2006
2007 static void
2008 mptsas_list_add(mptsas_t *mpt)
2009 {
2010 rw_enter(&mptsas_global_rwlock, RW_WRITER);
2011
2012 if (mptsas_head == NULL) {
2013 mptsas_head = mpt;
2014 } else {
2015 mptsas_tail->m_next = mpt;
2016 }
2017 mptsas_tail = mpt;
2018 rw_exit(&mptsas_global_rwlock);
2019 }
2020
2021 static void
2022 mptsas_list_del(mptsas_t *mpt)
2023 {
2024 mptsas_t *m;
2025 /*
2026 * Remove device instance from the global linked list
2027 */
2028 rw_enter(&mptsas_global_rwlock, RW_WRITER);
2029 if (mptsas_head == mpt) {
2030 m = mptsas_head = mpt->m_next;
2031 } else {
2032 for (m = mptsas_head; m != NULL; m = m->m_next) {
2033 if (m->m_next == mpt) {
2034 m->m_next = mpt->m_next;
2035 break;
2036 }
2037 }
2038 if (m == NULL) {
2039 mptsas_log(mpt, CE_PANIC, "Not in softc list!");
2040 }
2041 }
2042
2043 if (mptsas_tail == mpt) {
2044 mptsas_tail = m;
2045 }
2046 rw_exit(&mptsas_global_rwlock);
2047 }
2048
2049 static int
2050 mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size)
2051 {
2052 ddi_dma_attr_t task_dma_attrs;
2053
2054 mpt->m_hshk_dma_size = 0;
2055 task_dma_attrs = mpt->m_msg_dma_attr;
2056 task_dma_attrs.dma_attr_sgllen = 1;
2057 task_dma_attrs.dma_attr_granular = (uint32_t)(alloc_size);
2058
2059 /* allocate Task Management ddi_dma resources */
2060 if (mptsas_dma_addr_create(mpt, task_dma_attrs,
2061 &mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl, &mpt->m_hshk_memp,
2062 alloc_size, NULL) == FALSE) {
2063 return (DDI_FAILURE);
2064 }
2065 mpt->m_hshk_dma_size = alloc_size;
2066
2067 return (DDI_SUCCESS);
2068 }
2069
2070 static void
2071 mptsas_free_handshake_msg(mptsas_t *mpt)
2072 {
2073 if (mpt->m_hshk_dma_size == 0)
2074 return;
2075 mptsas_dma_addr_destroy(&mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl);
2076 mpt->m_hshk_dma_size = 0;
2077 }
2078
2079 static int
2080 mptsas_hba_setup(mptsas_t *mpt)
2081 {
2082 scsi_hba_tran_t *hba_tran;
2083 int tran_flags;
2084
2085 /* Allocate a transport structure */
2086 hba_tran = mpt->m_tran = scsi_hba_tran_alloc(mpt->m_dip,
2087 SCSI_HBA_CANSLEEP);
2088 ASSERT(mpt->m_tran != NULL);
2089
2090 hba_tran->tran_hba_private = mpt;
2091 hba_tran->tran_tgt_private = NULL;
2092
2093 hba_tran->tran_tgt_init = mptsas_scsi_tgt_init;
2094 hba_tran->tran_tgt_free = mptsas_scsi_tgt_free;
2095
2096 hba_tran->tran_start = mptsas_scsi_start;
2097 hba_tran->tran_reset = mptsas_scsi_reset;
2098 hba_tran->tran_abort = mptsas_scsi_abort;
2099 hba_tran->tran_getcap = mptsas_scsi_getcap;
2100 hba_tran->tran_setcap = mptsas_scsi_setcap;
2101 hba_tran->tran_init_pkt = mptsas_scsi_init_pkt;
2102 hba_tran->tran_destroy_pkt = mptsas_scsi_destroy_pkt;
2103
2104 hba_tran->tran_dmafree = mptsas_scsi_dmafree;
2105 hba_tran->tran_sync_pkt = mptsas_scsi_sync_pkt;
2106 hba_tran->tran_reset_notify = mptsas_scsi_reset_notify;
2107
2108 hba_tran->tran_get_bus_addr = mptsas_get_bus_addr;
2109 hba_tran->tran_get_name = mptsas_get_name;
2110
2111 hba_tran->tran_quiesce = mptsas_scsi_quiesce;
2112 hba_tran->tran_unquiesce = mptsas_scsi_unquiesce;
2113 hba_tran->tran_bus_reset = NULL;
2114
2115 hba_tran->tran_add_eventcall = NULL;
2116 hba_tran->tran_get_eventcookie = NULL;
2117 hba_tran->tran_post_event = NULL;
2118 hba_tran->tran_remove_eventcall = NULL;
2119
2120 hba_tran->tran_bus_config = mptsas_bus_config;
2121
2122 hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
2123
2124 /*
2125 * All children of the HBA are iports. We need tran was cloned.
2126 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
2127 * inherited to iport's tran vector.
2128 */
2129 tran_flags = (SCSI_HBA_HBA | SCSI_HBA_TRAN_CLONE);
2130
2131 if (scsi_hba_attach_setup(mpt->m_dip, &mpt->m_msg_dma_attr,
2132 hba_tran, tran_flags) != DDI_SUCCESS) {
2133 mptsas_log(mpt, CE_WARN, "hba attach setup failed");
2134 scsi_hba_tran_free(hba_tran);
2135 mpt->m_tran = NULL;
2136 return (FALSE);
2137 }
2138 return (TRUE);
2139 }
2140
2141 static void
2142 mptsas_hba_teardown(mptsas_t *mpt)
2143 {
2144 (void) scsi_hba_detach(mpt->m_dip);
2145 if (mpt->m_tran != NULL) {
2146 scsi_hba_tran_free(mpt->m_tran);
2147 mpt->m_tran = NULL;
2148 }
2149 }
2150
2151 static void
2152 mptsas_iport_register(mptsas_t *mpt)
2153 {
2154 int i, j;
2155 mptsas_phymask_t mask = 0x0;
2156 /*
2157 * initial value of mask is 0
2158 */
2159 mutex_enter(&mpt->m_mutex);
2160 for (i = 0; i < mpt->m_num_phys; i++) {
2161 mptsas_phymask_t phy_mask = 0x0;
2162 char phy_mask_name[MPTSAS_MAX_PHYS];
2163 uint8_t current_port;
2164
2165 if (mpt->m_phy_info[i].attached_devhdl == 0)
2166 continue;
2167
2168 bzero(phy_mask_name, sizeof (phy_mask_name));
2169
2170 current_port = mpt->m_phy_info[i].port_num;
2171
2172 if ((mask & (1 << i)) != 0)
2173 continue;
2174
2175 for (j = 0; j < mpt->m_num_phys; j++) {
2176 if (mpt->m_phy_info[j].attached_devhdl &&
2177 (mpt->m_phy_info[j].port_num == current_port)) {
2178 phy_mask |= (1 << j);
2179 }
2180 }
2181 mask = mask | phy_mask;
2182
2183 for (j = 0; j < mpt->m_num_phys; j++) {
2184 if ((phy_mask >> j) & 0x01) {
2185 mpt->m_phy_info[j].phy_mask = phy_mask;
2186 }
2187 }
2188
2189 (void) sprintf(phy_mask_name, "%x", phy_mask);
2190
2191 mutex_exit(&mpt->m_mutex);
2192 /*
2193 * register a iport
2194 */
2195 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
2196 mutex_enter(&mpt->m_mutex);
2197 }
2198 mutex_exit(&mpt->m_mutex);
2199 /*
2200 * register a virtual port for RAID volume always
2201 */
2202 (void) scsi_hba_iport_register(mpt->m_dip, "v0");
2203
2204 }
2205
2206 static int
2207 mptsas_smp_setup(mptsas_t *mpt)
2208 {
2209 mpt->m_smptran = smp_hba_tran_alloc(mpt->m_dip);
2210 ASSERT(mpt->m_smptran != NULL);
2211 mpt->m_smptran->smp_tran_hba_private = mpt;
2212 mpt->m_smptran->smp_tran_start = mptsas_smp_start;
2213 if (smp_hba_attach_setup(mpt->m_dip, mpt->m_smptran) != DDI_SUCCESS) {
2214 mptsas_log(mpt, CE_WARN, "smp attach setup failed");
2215 smp_hba_tran_free(mpt->m_smptran);
2216 mpt->m_smptran = NULL;
2217 return (FALSE);
2218 }
2219 /*
2220 * Initialize smp hash table
2221 */
2222 mpt->m_smp_targets = refhash_create(MPTSAS_SMP_BUCKET_COUNT,
2223 mptsas_target_addr_hash, mptsas_target_addr_cmp,
2224 mptsas_smp_free, sizeof (mptsas_smp_t),
2225 offsetof(mptsas_smp_t, m_link), offsetof(mptsas_smp_t, m_addr),
2226 KM_SLEEP);
2227 mpt->m_smp_devhdl = 0xFFFF;
2228
2229 return (TRUE);
2230 }
2231
2232 static void
2233 mptsas_smp_teardown(mptsas_t *mpt)
2234 {
2235 (void) smp_hba_detach(mpt->m_dip);
2236 if (mpt->m_smptran != NULL) {
2237 smp_hba_tran_free(mpt->m_smptran);
2238 mpt->m_smptran = NULL;
2239 }
2240 mpt->m_smp_devhdl = 0;
2241 }
2242
2243 static int
2244 mptsas_cache_create(mptsas_t *mpt)
2245 {
2246 int instance = mpt->m_instance;
2247 char buf[64];
2248
2249 /*
2250 * create kmem cache for packets
2251 */
2252 (void) sprintf(buf, "mptsas%d_cache", instance);
2253 mpt->m_kmem_cache = kmem_cache_create(buf,
2254 sizeof (struct mptsas_cmd) + scsi_pkt_size(), 8,
2255 mptsas_kmem_cache_constructor, mptsas_kmem_cache_destructor,
2256 NULL, (void *)mpt, NULL, 0);
2257
2258 if (mpt->m_kmem_cache == NULL) {
2259 mptsas_log(mpt, CE_WARN, "creating kmem cache failed");
2260 return (FALSE);
2261 }
2262
2263 /*
2264 * create kmem cache for extra SGL frames if SGL cannot
2265 * be accomodated into main request frame.
2266 */
2267 (void) sprintf(buf, "mptsas%d_cache_frames", instance);
2268 mpt->m_cache_frames = kmem_cache_create(buf,
2269 sizeof (mptsas_cache_frames_t), 8,
2270 mptsas_cache_frames_constructor, mptsas_cache_frames_destructor,
2271 NULL, (void *)mpt, NULL, 0);
2272
2273 if (mpt->m_cache_frames == NULL) {
2274 mptsas_log(mpt, CE_WARN, "creating cache for frames failed");
2275 return (FALSE);
2276 }
2277
2278 return (TRUE);
2279 }
2280
2281 static void
2282 mptsas_cache_destroy(mptsas_t *mpt)
2283 {
2284 /* deallocate in reverse order */
2285 if (mpt->m_cache_frames) {
2286 kmem_cache_destroy(mpt->m_cache_frames);
2287 mpt->m_cache_frames = NULL;
2288 }
2289 if (mpt->m_kmem_cache) {
2290 kmem_cache_destroy(mpt->m_kmem_cache);
2291 mpt->m_kmem_cache = NULL;
2292 }
2293 }
2294
2295 static int
2296 mptsas_power(dev_info_t *dip, int component, int level)
2297 {
2298 #ifndef __lock_lint
2299 _NOTE(ARGUNUSED(component))
2300 #endif
2301 mptsas_t *mpt;
2302 int rval = DDI_SUCCESS;
2303 int polls = 0;
2304 uint32_t ioc_status;
2305
2306 if (scsi_hba_iport_unit_address(dip) != 0)
2307 return (DDI_SUCCESS);
2308
2309 mpt = ddi_get_soft_state(mptsas_state, ddi_get_instance(dip));
2310 if (mpt == NULL) {
2311 return (DDI_FAILURE);
2312 }
2313
2314 mutex_enter(&mpt->m_mutex);
2315
2316 /*
2317 * If the device is busy, don't lower its power level
2318 */
2319 if (mpt->m_busy && (mpt->m_power_level > level)) {
2320 mutex_exit(&mpt->m_mutex);
2321 return (DDI_FAILURE);
2322 }
2323 switch (level) {
2324 case PM_LEVEL_D0:
2325 NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
2326 MPTSAS_POWER_ON(mpt);
2327 /*
2328 * Wait up to 30 seconds for IOC to come out of reset.
2329 */
2330 while (((ioc_status = ddi_get32(mpt->m_datap,
2331 &mpt->m_reg->Doorbell)) &
2332 MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2333 if (polls++ > 3000) {
2334 break;
2335 }
2336 delay(drv_usectohz(10000));
2337 }
2338 /*
2339 * If IOC is not in operational state, try to hard reset it.
2340 */
2341 if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2342 MPI2_IOC_STATE_OPERATIONAL) {
2343 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2344 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2345 mptsas_log(mpt, CE_WARN,
2346 "mptsas_power: hard reset failed");
2347 mutex_exit(&mpt->m_mutex);
2348 return (DDI_FAILURE);
2349 }
2350 }
2351 mpt->m_power_level = PM_LEVEL_D0;
2352 break;
2353 case PM_LEVEL_D3:
2354 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2355 MPTSAS_POWER_OFF(mpt);
2356 break;
2357 default:
2358 mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2359 mpt->m_instance, level);
2360 rval = DDI_FAILURE;
2361 break;
2362 }
2363 mutex_exit(&mpt->m_mutex);
2364 return (rval);
2365 }
2366
2367 /*
2368 * Initialize configuration space and figure out which
2369 * chip and revison of the chip the mpt driver is using.
2370 */
2371 static int
2372 mptsas_config_space_init(mptsas_t *mpt)
2373 {
2374 NDBG0(("mptsas_config_space_init"));
2375
2376 if (mpt->m_config_handle != NULL)
2377 return (TRUE);
2378
2379 if (pci_config_setup(mpt->m_dip,
2380 &mpt->m_config_handle) != DDI_SUCCESS) {
2381 mptsas_log(mpt, CE_WARN, "cannot map configuration space.");
2382 return (FALSE);
2383 }
2384
2385 /*
2386 * This is a workaround for a XMITS ASIC bug which does not
2387 * drive the CBE upper bits.
2388 */
2389 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) &
2390 PCI_STAT_PERROR) {
2391 pci_config_put16(mpt->m_config_handle, PCI_CONF_STAT,
2392 PCI_STAT_PERROR);
2393 }
2394
2395 mptsas_setup_cmd_reg(mpt);
2396
2397 /*
2398 * Get the chip device id:
2399 */
2400 mpt->m_devid = pci_config_get16(mpt->m_config_handle, PCI_CONF_DEVID);
2401
2402 /*
2403 * Save the revision.
2404 */
2405 mpt->m_revid = pci_config_get8(mpt->m_config_handle, PCI_CONF_REVID);
2406
2407 /*
2408 * Save the SubSystem Vendor and Device IDs
2409 */
2410 mpt->m_svid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBVENID);
2411 mpt->m_ssid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBSYSID);
2412
2413 /*
2414 * Set the latency timer to 0x40 as specified by the upa -> pci
2415 * bridge chip design team. This may be done by the sparc pci
2416 * bus nexus driver, but the driver should make sure the latency
2417 * timer is correct for performance reasons.
2418 */
2419 pci_config_put8(mpt->m_config_handle, PCI_CONF_LATENCY_TIMER,
2420 MPTSAS_LATENCY_TIMER);
2421
2422 (void) mptsas_get_pci_cap(mpt);
2423 return (TRUE);
2424 }
2425
2426 static void
2427 mptsas_config_space_fini(mptsas_t *mpt)
2428 {
2429 if (mpt->m_config_handle != NULL) {
2430 mptsas_disable_bus_master(mpt);
2431 pci_config_teardown(&mpt->m_config_handle);
2432 mpt->m_config_handle = NULL;
2433 }
2434 }
2435
2436 static void
2437 mptsas_setup_cmd_reg(mptsas_t *mpt)
2438 {
2439 ushort_t cmdreg;
2440
2441 /*
2442 * Set the command register to the needed values.
2443 */
2444 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2445 cmdreg |= (PCI_COMM_ME | PCI_COMM_SERR_ENABLE |
2446 PCI_COMM_PARITY_DETECT | PCI_COMM_MAE);
2447 cmdreg &= ~PCI_COMM_IO;
2448 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2449 }
2450
2451 static void
2452 mptsas_disable_bus_master(mptsas_t *mpt)
2453 {
2454 ushort_t cmdreg;
2455
2456 /*
2457 * Clear the master enable bit in the PCI command register.
2458 * This prevents any bus mastering activity like DMA.
2459 */
2460 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2461 cmdreg &= ~PCI_COMM_ME;
2462 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2463 }
2464
2465 int
2466 mptsas_dma_alloc(mptsas_t *mpt, mptsas_dma_alloc_state_t *dma_statep)
2467 {
2468 ddi_dma_attr_t attrs;
2469
2470 attrs = mpt->m_io_dma_attr;
2471 attrs.dma_attr_sgllen = 1;
2472
2473 ASSERT(dma_statep != NULL);
2474
2475 if (mptsas_dma_addr_create(mpt, attrs, &dma_statep->handle,
2476 &dma_statep->accessp, &dma_statep->memp, dma_statep->size,
2477 &dma_statep->cookie) == FALSE) {
2478 return (DDI_FAILURE);
2479 }
2480
2481 return (DDI_SUCCESS);
2482 }
2483
2484 void
2485 mptsas_dma_free(mptsas_dma_alloc_state_t *dma_statep)
2486 {
2487 ASSERT(dma_statep != NULL);
2488 mptsas_dma_addr_destroy(&dma_statep->handle, &dma_statep->accessp);
2489 dma_statep->size = 0;
2490 }
2491
2492 int
2493 mptsas_do_dma(mptsas_t *mpt, uint32_t size, int var, int (*callback)())
2494 {
2495 ddi_dma_attr_t attrs;
2496 ddi_dma_handle_t dma_handle;
2497 caddr_t memp;
2498 ddi_acc_handle_t accessp;
2499 int rval;
2500
2501 ASSERT(mutex_owned(&mpt->m_mutex));
2502
2503 attrs = mpt->m_msg_dma_attr;
2504 attrs.dma_attr_sgllen = 1;
2505 attrs.dma_attr_granular = size;
2506
2507 if (mptsas_dma_addr_create(mpt, attrs, &dma_handle,
2508 &accessp, &memp, size, NULL) == FALSE) {
2509 return (DDI_FAILURE);
2510 }
2511
2512 rval = (*callback) (mpt, memp, var, accessp);
2513
2514 if ((mptsas_check_dma_handle(dma_handle) != DDI_SUCCESS) ||
2515 (mptsas_check_acc_handle(accessp) != DDI_SUCCESS)) {
2516 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
2517 rval = DDI_FAILURE;
2518 }
2519
2520 mptsas_dma_addr_destroy(&dma_handle, &accessp);
2521 return (rval);
2522
2523 }
2524
2525 static int
2526 mptsas_alloc_request_frames(mptsas_t *mpt)
2527 {
2528 ddi_dma_attr_t frame_dma_attrs;
2529 caddr_t memp;
2530 ddi_dma_cookie_t cookie;
2531 size_t mem_size;
2532
2533 /*
2534 * re-alloc when it has already alloced
2535 */
2536 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2537 &mpt->m_acc_req_frame_hdl);
2538
2539 /*
2540 * The size of the request frame pool is:
2541 * Number of Request Frames * Request Frame Size
2542 */
2543 mem_size = mpt->m_max_requests * mpt->m_req_frame_size;
2544
2545 /*
2546 * set the DMA attributes. System Request Message Frames must be
2547 * aligned on a 16-byte boundry.
2548 */
2549 frame_dma_attrs = mpt->m_msg_dma_attr;
2550 frame_dma_attrs.dma_attr_align = 16;
2551 frame_dma_attrs.dma_attr_sgllen = 1;
2552
2553 /*
2554 * allocate the request frame pool.
2555 */
2556 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2557 &mpt->m_dma_req_frame_hdl, &mpt->m_acc_req_frame_hdl, &memp,
2558 mem_size, &cookie) == FALSE) {
2559 return (DDI_FAILURE);
2560 }
2561
2562 /*
2563 * Store the request frame memory address. This chip uses this
2564 * address to dma to and from the driver's frame. The second
2565 * address is the address mpt uses to fill in the frame.
2566 */
2567 mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2568 mpt->m_req_frame = memp;
2569
2570 /*
2571 * Clear the request frame pool.
2572 */
2573 bzero(mpt->m_req_frame, mem_size);
2574
2575 return (DDI_SUCCESS);
2576 }
2577
2578 static int
2579 mptsas_alloc_reply_frames(mptsas_t *mpt)
2580 {
2581 ddi_dma_attr_t frame_dma_attrs;
2582 caddr_t memp;
2583 ddi_dma_cookie_t cookie;
2584 size_t mem_size;
2585
2586 /*
2587 * re-alloc when it has already alloced
2588 */
2589 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2590 &mpt->m_acc_reply_frame_hdl);
2591
2592 /*
2593 * The size of the reply frame pool is:
2594 * Number of Reply Frames * Reply Frame Size
2595 */
2596 mem_size = mpt->m_max_replies * mpt->m_reply_frame_size;
2597
2598 /*
2599 * set the DMA attributes. System Reply Message Frames must be
2600 * aligned on a 4-byte boundry. This is the default.
2601 */
2602 frame_dma_attrs = mpt->m_msg_dma_attr;
2603 frame_dma_attrs.dma_attr_sgllen = 1;
2604
2605 /*
2606 * allocate the reply frame pool
2607 */
2608 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2609 &mpt->m_dma_reply_frame_hdl, &mpt->m_acc_reply_frame_hdl, &memp,
2610 mem_size, &cookie) == FALSE) {
2611 return (DDI_FAILURE);
2612 }
2613
2614 /*
2615 * Store the reply frame memory address. This chip uses this
2616 * address to dma to and from the driver's frame. The second
2617 * address is the address mpt uses to process the frame.
2618 */
2619 mpt->m_reply_frame_dma_addr = cookie.dmac_laddress;
2620 mpt->m_reply_frame = memp;
2621
2622 /*
2623 * Clear the reply frame pool.
2624 */
2625 bzero(mpt->m_reply_frame, mem_size);
2626
2627 return (DDI_SUCCESS);
2628 }
2629
2630 static int
2631 mptsas_alloc_free_queue(mptsas_t *mpt)
2632 {
2633 ddi_dma_attr_t frame_dma_attrs;
2634 caddr_t memp;
2635 ddi_dma_cookie_t cookie;
2636 size_t mem_size;
2637
2638 /*
2639 * re-alloc when it has already alloced
2640 */
2641 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2642 &mpt->m_acc_free_queue_hdl);
2643
2644 /*
2645 * The reply free queue size is:
2646 * Reply Free Queue Depth * 4
2647 * The "4" is the size of one 32 bit address (low part of 64-bit
2648 * address)
2649 */
2650 mem_size = mpt->m_free_queue_depth * 4;
2651
2652 /*
2653 * set the DMA attributes The Reply Free Queue must be aligned on a
2654 * 16-byte boundry.
2655 */
2656 frame_dma_attrs = mpt->m_msg_dma_attr;
2657 frame_dma_attrs.dma_attr_align = 16;
2658 frame_dma_attrs.dma_attr_sgllen = 1;
2659
2660 /*
2661 * allocate the reply free queue
2662 */
2663 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2664 &mpt->m_dma_free_queue_hdl, &mpt->m_acc_free_queue_hdl, &memp,
2665 mem_size, &cookie) == FALSE) {
2666 return (DDI_FAILURE);
2667 }
2668
2669 /*
2670 * Store the reply free queue memory address. This chip uses this
2671 * address to read from the reply free queue. The second address
2672 * is the address mpt uses to manage the queue.
2673 */
2674 mpt->m_free_queue_dma_addr = cookie.dmac_laddress;
2675 mpt->m_free_queue = memp;
2676
2677 /*
2678 * Clear the reply free queue memory.
2679 */
2680 bzero(mpt->m_free_queue, mem_size);
2681
2682 return (DDI_SUCCESS);
2683 }
2684
2685 static int
2686 mptsas_alloc_post_queue(mptsas_t *mpt)
2687 {
2688 ddi_dma_attr_t frame_dma_attrs;
2689 caddr_t memp;
2690 ddi_dma_cookie_t cookie;
2691 size_t mem_size;
2692
2693 /*
2694 * re-alloc when it has already alloced
2695 */
2696 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2697 &mpt->m_acc_post_queue_hdl);
2698
2699 /*
2700 * The reply descriptor post queue size is:
2701 * Reply Descriptor Post Queue Depth * 8
2702 * The "8" is the size of each descriptor (8 bytes or 64 bits).
2703 */
2704 mem_size = mpt->m_post_queue_depth * 8;
2705
2706 /*
2707 * set the DMA attributes. The Reply Descriptor Post Queue must be
2708 * aligned on a 16-byte boundry.
2709 */
2710 frame_dma_attrs = mpt->m_msg_dma_attr;
2711 frame_dma_attrs.dma_attr_align = 16;
2712 frame_dma_attrs.dma_attr_sgllen = 1;
2713
2714 /*
2715 * allocate the reply post queue
2716 */
2717 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2718 &mpt->m_dma_post_queue_hdl, &mpt->m_acc_post_queue_hdl, &memp,
2719 mem_size, &cookie) == FALSE) {
2720 return (DDI_FAILURE);
2721 }
2722
2723 /*
2724 * Store the reply descriptor post queue memory address. This chip
2725 * uses this address to write to the reply descriptor post queue. The
2726 * second address is the address mpt uses to manage the queue.
2727 */
2728 mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
2729 mpt->m_post_queue = memp;
2730
2731 /*
2732 * Clear the reply post queue memory.
2733 */
2734 bzero(mpt->m_post_queue, mem_size);
2735
2736 return (DDI_SUCCESS);
2737 }
2738
2739 static void
2740 mptsas_alloc_reply_args(mptsas_t *mpt)
2741 {
2742 if (mpt->m_replyh_args == NULL) {
2743 mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
2744 mpt->m_max_replies, KM_SLEEP);
2745 }
2746 }
2747
2748 static int
2749 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2750 {
2751 mptsas_cache_frames_t *frames = NULL;
2752 if (cmd->cmd_extra_frames == NULL) {
2753 frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
2754 if (frames == NULL) {
2755 return (DDI_FAILURE);
2756 }
2757 cmd->cmd_extra_frames = frames;
2758 }
2759 return (DDI_SUCCESS);
2760 }
2761
2762 static void
2763 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2764 {
2765 if (cmd->cmd_extra_frames) {
2766 kmem_cache_free(mpt->m_cache_frames,
2767 (void *)cmd->cmd_extra_frames);
2768 cmd->cmd_extra_frames = NULL;
2769 }
2770 }
2771
2772 static void
2773 mptsas_cfg_fini(mptsas_t *mpt)
2774 {
2775 NDBG0(("mptsas_cfg_fini"));
2776 ddi_regs_map_free(&mpt->m_datap);
2777 }
2778
2779 static void
2780 mptsas_hba_fini(mptsas_t *mpt)
2781 {
2782 NDBG0(("mptsas_hba_fini"));
2783
2784 /*
2785 * Free up any allocated memory
2786 */
2787 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2788 &mpt->m_acc_req_frame_hdl);
2789
2790 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2791 &mpt->m_acc_reply_frame_hdl);
2792
2793 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2794 &mpt->m_acc_free_queue_hdl);
2795
2796 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2797 &mpt->m_acc_post_queue_hdl);
2798
2799 if (mpt->m_replyh_args != NULL) {
2800 kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
2801 * mpt->m_max_replies);
2802 }
2803 }
2804
2805 static int
2806 mptsas_name_child(dev_info_t *lun_dip, char *name, int len)
2807 {
2808 int lun = 0;
2809 char *sas_wwn = NULL;
2810 int phynum = -1;
2811 int reallen = 0;
2812
2813 /* Get the target num */
2814 lun = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
2815 LUN_PROP, 0);
2816
2817 if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
2818 DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
2819 /*
2820 * Stick in the address of form "pPHY,LUN"
2821 */
2822 reallen = snprintf(name, len, "p%x,%x", phynum, lun);
2823 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
2824 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn)
2825 == DDI_PROP_SUCCESS) {
2826 /*
2827 * Stick in the address of the form "wWWN,LUN"
2828 */
2829 reallen = snprintf(name, len, "%s,%x", sas_wwn, lun);
2830 ddi_prop_free(sas_wwn);
2831 } else {
2832 return (DDI_FAILURE);
2833 }
2834
2835 ASSERT(reallen < len);
2836 if (reallen >= len) {
2837 mptsas_log(0, CE_WARN, "!mptsas_get_name: name parameter "
2838 "length too small, it needs to be %d bytes", reallen + 1);
2839 }
2840 return (DDI_SUCCESS);
2841 }
2842
2843 /*
2844 * tran_tgt_init(9E) - target device instance initialization
2845 */
2846 static int
2847 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2848 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2849 {
2850 #ifndef __lock_lint
2851 _NOTE(ARGUNUSED(hba_tran))
2852 #endif
2853
2854 /*
2855 * At this point, the scsi_device structure already exists
2856 * and has been initialized.
2857 *
2858 * Use this function to allocate target-private data structures,
2859 * if needed by this HBA. Add revised flow-control and queue
2860 * properties for child here, if desired and if you can tell they
2861 * support tagged queueing by now.
2862 */
2863 mptsas_t *mpt;
2864 int lun = sd->sd_address.a_lun;
2865 mdi_pathinfo_t *pip = NULL;
2866 mptsas_tgt_private_t *tgt_private = NULL;
2867 mptsas_target_t *ptgt = NULL;
2868 char *psas_wwn = NULL;
2869 mptsas_phymask_t phymask = 0;
2870 uint64_t sas_wwn = 0;
2871 mptsas_target_addr_t addr;
2872 mpt = SDEV2MPT(sd);
2873
2874 ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
2875
2876 NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
2877 (void *)hba_dip, (void *)tgt_dip, lun));
2878
2879 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
2880 (void) ndi_merge_node(tgt_dip, mptsas_name_child);
2881 ddi_set_name_addr(tgt_dip, NULL);
2882 return (DDI_FAILURE);
2883 }
2884 /*
2885 * phymask is 0 means the virtual port for RAID
2886 */
2887 phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
2888 "phymask", 0);
2889 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2890 if ((pip = (void *)(sd->sd_private)) == NULL) {
2891 /*
2892 * Very bad news if this occurs. Somehow scsi_vhci has
2893 * lost the pathinfo node for this target.
2894 */
2895 return (DDI_NOT_WELL_FORMED);
2896 }
2897
2898 if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
2899 DDI_PROP_SUCCESS) {
2900 mptsas_log(mpt, CE_WARN, "Get lun property failed\n");
2901 return (DDI_FAILURE);
2902 }
2903
2904 if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
2905 &psas_wwn) == MDI_SUCCESS) {
2906 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2907 sas_wwn = 0;
2908 }
2909 (void) mdi_prop_free(psas_wwn);
2910 }
2911 } else {
2912 lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
2913 DDI_PROP_DONTPASS, LUN_PROP, 0);
2914 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
2915 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
2916 DDI_PROP_SUCCESS) {
2917 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2918 sas_wwn = 0;
2919 }
2920 ddi_prop_free(psas_wwn);
2921 } else {
2922 sas_wwn = 0;
2923 }
2924 }
2925
2926 ASSERT((sas_wwn != 0) || (phymask != 0));
2927 addr.mta_wwn = sas_wwn;
2928 addr.mta_phymask = phymask;
2929 mutex_enter(&mpt->m_mutex);
2930 ptgt = refhash_lookup(mpt->m_targets, &addr);
2931 mutex_exit(&mpt->m_mutex);
2932 if (ptgt == NULL) {
2933 mptsas_log(mpt, CE_WARN, "!tgt_init: target doesn't exist or "
2934 "gone already! phymask:%x, saswwn %"PRIx64, phymask,
2935 sas_wwn);
2936 return (DDI_FAILURE);
2937 }
2938 if (hba_tran->tran_tgt_private == NULL) {
2939 tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
2940 KM_SLEEP);
2941 tgt_private->t_lun = lun;
2942 tgt_private->t_private = ptgt;
2943 hba_tran->tran_tgt_private = tgt_private;
2944 }
2945
2946 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2947 return (DDI_SUCCESS);
2948 }
2949 mutex_enter(&mpt->m_mutex);
2950
2951 if (ptgt->m_deviceinfo &
2952 (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
2953 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
2954 uchar_t *inq89 = NULL;
2955 int inq89_len = 0x238;
2956 int reallen = 0;
2957 int rval = 0;
2958 struct sata_id *sid = NULL;
2959 char model[SATA_ID_MODEL_LEN + 1];
2960 char fw[SATA_ID_FW_LEN + 1];
2961 char *vid, *pid;
2962 int i;
2963
2964 mutex_exit(&mpt->m_mutex);
2965 /*
2966 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
2967 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
2968 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
2969 */
2970 inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
2971 rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
2972 inq89, inq89_len, &reallen, 1);
2973
2974 if (rval != 0) {
2975 if (inq89 != NULL) {
2976 kmem_free(inq89, inq89_len);
2977 }
2978
2979 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
2980 "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
2981 return (DDI_SUCCESS);
2982 }
2983 sid = (void *)(&inq89[60]);
2984
2985 swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
2986 swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
2987
2988 model[SATA_ID_MODEL_LEN] = 0;
2989 fw[SATA_ID_FW_LEN] = 0;
2990
2991 /*
2992 * split model into into vid/pid
2993 */
2994 for (i = 0, pid = model; i < SATA_ID_MODEL_LEN; i++, pid++)
2995 if ((*pid == ' ') || (*pid == '\t'))
2996 break;
2997 if (i < SATA_ID_MODEL_LEN) {
2998 vid = model;
2999 /*
3000 * terminate vid, establish pid
3001 */
3002 *pid++ = 0;
3003 } else {
3004 /*
3005 * vid will stay "ATA ", the rule is same
3006 * as sata framework implementation.
3007 */
3008 vid = NULL;
3009 /*
3010 * model is all pid
3011 */
3012 pid = model;
3013 }
3014
3015 /*
3016 * override SCSA "inquiry-*" properties
3017 */
3018 if (vid)
3019 (void) scsi_device_prop_update_inqstring(sd,
3020 INQUIRY_VENDOR_ID, vid, strlen(vid));
3021 if (pid)
3022 (void) scsi_device_prop_update_inqstring(sd,
3023 INQUIRY_PRODUCT_ID, pid, strlen(pid));
3024 (void) scsi_device_prop_update_inqstring(sd,
3025 INQUIRY_REVISION_ID, fw, strlen(fw));
3026
3027 if (inq89 != NULL) {
3028 kmem_free(inq89, inq89_len);
3029 }
3030 } else {
3031 mutex_exit(&mpt->m_mutex);
3032 }
3033
3034 return (DDI_SUCCESS);
3035 }
3036 /*
3037 * tran_tgt_free(9E) - target device instance deallocation
3038 */
3039 static void
3040 mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3041 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
3042 {
3043 #ifndef __lock_lint
3044 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran, sd))
3045 #endif
3046
3047 mptsas_tgt_private_t *tgt_private = hba_tran->tran_tgt_private;
3048
3049 if (tgt_private != NULL) {
3050 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
3051 hba_tran->tran_tgt_private = NULL;
3052 }
3053 }
3054
3055 /*
3056 * scsi_pkt handling
3057 *
3058 * Visible to the external world via the transport structure.
3059 */
3060
3061 /*
3062 * Notes:
3063 * - transport the command to the addressed SCSI target/lun device
3064 * - normal operation is to schedule the command to be transported,
3065 * and return TRAN_ACCEPT if this is successful.
3066 * - if NO_INTR, tran_start must poll device for command completion
3067 */
3068 static int
3069 mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
3070 {
3071 #ifndef __lock_lint
3072 _NOTE(ARGUNUSED(ap))
3073 #endif
3074 mptsas_t *mpt = PKT2MPT(pkt);
3075 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3076 int rval;
3077 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3078
3079 NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt));
3080 ASSERT(ptgt);
3081 if (ptgt == NULL)
3082 return (TRAN_FATAL_ERROR);
3083
3084 /*
3085 * prepare the pkt before taking mutex.
3086 */
3087 rval = mptsas_prepare_pkt(cmd);
3088 if (rval != TRAN_ACCEPT) {
3089 return (rval);
3090 }
3091
3092 /*
3093 * Send the command to target/lun, however your HBA requires it.
3094 * If busy, return TRAN_BUSY; if there's some other formatting error
3095 * in the packet, return TRAN_BADPKT; otherwise, fall through to the
3096 * return of TRAN_ACCEPT.
3097 *
3098 * Remember that access to shared resources, including the mptsas_t
3099 * data structure and the HBA hardware registers, must be protected
3100 * with mutexes, here and everywhere.
3101 *
3102 * Also remember that at interrupt time, you'll get an argument
3103 * to the interrupt handler which is a pointer to your mptsas_t
3104 * structure; you'll have to remember which commands are outstanding
3105 * and which scsi_pkt is the currently-running command so the
3106 * interrupt handler can refer to the pkt to set completion
3107 * status, call the target driver back through pkt_comp, etc.
3108 *
3109 * If the instance lock is held by other thread, don't spin to wait
3110 * for it. Instead, queue the cmd and next time when the instance lock
3111 * is not held, accept all the queued cmd. A extra tx_waitq is
3112 * introduced to protect the queue.
3113 *
3114 * The polled cmd will not be queud and accepted as usual.
3115 *
3116 * Under the tx_waitq mutex, record whether a thread is draining
3117 * the tx_waitq. An IO requesting thread that finds the instance
3118 * mutex contended appends to the tx_waitq and while holding the
3119 * tx_wait mutex, if the draining flag is not set, sets it and then
3120 * proceeds to spin for the instance mutex. This scheme ensures that
3121 * the last cmd in a burst be processed.
3122 *
3123 * we enable this feature only when the helper threads are enabled,
3124 * at which we think the loads are heavy.
3125 *
3126 * per instance mutex m_tx_waitq_mutex is introduced to protect the
3127 * m_tx_waitqtail, m_tx_waitq, m_tx_draining.
3128 */
3129
3130 if (mpt->m_doneq_thread_n) {
3131 if (mutex_tryenter(&mpt->m_mutex) != 0) {
3132 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3133 mutex_exit(&mpt->m_mutex);
3134 } else if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3135 mutex_enter(&mpt->m_mutex);
3136 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3137 mutex_exit(&mpt->m_mutex);
3138 } else {
3139 mutex_enter(&mpt->m_tx_waitq_mutex);
3140 /*
3141 * ptgt->m_dr_flag is protected by m_mutex or
3142 * m_tx_waitq_mutex. In this case, m_tx_waitq_mutex
3143 * is acquired.
3144 */
3145 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3146 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3147 /*
3148 * The command should be allowed to
3149 * retry by returning TRAN_BUSY to
3150 * to stall the I/O's which come from
3151 * scsi_vhci since the device/path is
3152 * in unstable state now.
3153 */
3154 mutex_exit(&mpt->m_tx_waitq_mutex);
3155 return (TRAN_BUSY);
3156 } else {
3157 /*
3158 * The device is offline, just fail the
3159 * command by returning
3160 * TRAN_FATAL_ERROR.
3161 */
3162 mutex_exit(&mpt->m_tx_waitq_mutex);
3163 return (TRAN_FATAL_ERROR);
3164 }
3165 }
3166 if (mpt->m_tx_draining) {
3167 cmd->cmd_flags |= CFLAG_TXQ;
3168 *mpt->m_tx_waitqtail = cmd;
3169 mpt->m_tx_waitqtail = &cmd->cmd_linkp;
3170 mutex_exit(&mpt->m_tx_waitq_mutex);
3171 } else { /* drain the queue */
3172 mpt->m_tx_draining = 1;
3173 mutex_exit(&mpt->m_tx_waitq_mutex);
3174 mutex_enter(&mpt->m_mutex);
3175 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3176 mutex_exit(&mpt->m_mutex);
3177 }
3178 }
3179 } else {
3180 mutex_enter(&mpt->m_mutex);
3181 /*
3182 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3183 * in this case, m_mutex is acquired.
3184 */
3185 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3186 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3187 /*
3188 * commands should be allowed to retry by
3189 * returning TRAN_BUSY to stall the I/O's
3190 * which come from scsi_vhci since the device/
3191 * path is in unstable state now.
3192 */
3193 mutex_exit(&mpt->m_mutex);
3194 return (TRAN_BUSY);
3195 } else {
3196 /*
3197 * The device is offline, just fail the
3198 * command by returning TRAN_FATAL_ERROR.
3199 */
3200 mutex_exit(&mpt->m_mutex);
3201 return (TRAN_FATAL_ERROR);
3202 }
3203 }
3204 rval = mptsas_accept_pkt(mpt, cmd);
3205 mutex_exit(&mpt->m_mutex);
3206 }
3207
3208 return (rval);
3209 }
3210
3211 /*
3212 * Accept all the queued cmds(if any) before accept the current one.
3213 */
3214 static int
3215 mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3216 {
3217 int rval;
3218 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3219
3220 ASSERT(mutex_owned(&mpt->m_mutex));
3221 /*
3222 * The call to mptsas_accept_tx_waitq() must always be performed
3223 * because that is where mpt->m_tx_draining is cleared.
3224 */
3225 mutex_enter(&mpt->m_tx_waitq_mutex);
3226 mptsas_accept_tx_waitq(mpt);
3227 mutex_exit(&mpt->m_tx_waitq_mutex);
3228 /*
3229 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3230 * in this case, m_mutex is acquired.
3231 */
3232 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3233 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3234 /*
3235 * The command should be allowed to retry by returning
3236 * TRAN_BUSY to stall the I/O's which come from
3237 * scsi_vhci since the device/path is in unstable state
3238 * now.
3239 */
3240 return (TRAN_BUSY);
3241 } else {
3242 /*
3243 * The device is offline, just fail the command by
3244 * return TRAN_FATAL_ERROR.
3245 */
3246 return (TRAN_FATAL_ERROR);
3247 }
3248 }
3249 rval = mptsas_accept_pkt(mpt, cmd);
3250
3251 return (rval);
3252 }
3253
3254 static int
3255 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3256 {
3257 int rval = TRAN_ACCEPT;
3258 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3259
3260 NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3261
3262 ASSERT(mutex_owned(&mpt->m_mutex));
3263
3264 if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3265 rval = mptsas_prepare_pkt(cmd);
3266 if (rval != TRAN_ACCEPT) {
3267 cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3268 return (rval);
3269 }
3270 }
3271
3272 /*
3273 * reset the throttle if we were draining
3274 */
3275 if ((ptgt->m_t_ncmds == 0) &&
3276 (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3277 NDBG23(("reset throttle"));
3278 ASSERT(ptgt->m_reset_delay == 0);
3279 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3280 }
3281
3282 /*
3283 * If HBA is being reset, the DevHandles are being re-initialized,
3284 * which means that they could be invalid even if the target is still
3285 * attached. Check if being reset and if DevHandle is being
3286 * re-initialized. If this is the case, return BUSY so the I/O can be
3287 * retried later.
3288 */
3289 if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3290 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
3291 if (cmd->cmd_flags & CFLAG_TXQ) {
3292 mptsas_doneq_add(mpt, cmd);
3293 mptsas_doneq_empty(mpt);
3294 return (rval);
3295 } else {
3296 return (TRAN_BUSY);
3297 }
3298 }
3299
3300 /*
3301 * If device handle has already been invalidated, just
3302 * fail the command. In theory, command from scsi_vhci
3303 * client is impossible send down command with invalid
3304 * devhdl since devhdl is set after path offline, target
3305 * driver is not suppose to select a offlined path.
3306 */
3307 if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3308 NDBG3(("rejecting command, it might because invalid devhdl "
3309 "request."));
3310 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3311 if (cmd->cmd_flags & CFLAG_TXQ) {
3312 mptsas_doneq_add(mpt, cmd);
3313 mptsas_doneq_empty(mpt);
3314 return (rval);
3315 } else {
3316 return (TRAN_FATAL_ERROR);
3317 }
3318 }
3319 /*
3320 * The first case is the normal case. mpt gets a command from the
3321 * target driver and starts it.
3322 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3323 * commands is m_max_requests - 2.
3324 */
3325 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
3326 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
3327 (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3328 (ptgt->m_reset_delay == 0) &&
3329 (ptgt->m_t_nwait == 0) &&
3330 ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3331 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3332 (void) mptsas_start_cmd(mpt, cmd);
3333 } else {
3334 mptsas_waitq_add(mpt, cmd);
3335 }
3336 } else {
3337 /*
3338 * Add this pkt to the work queue
3339 */
3340 mptsas_waitq_add(mpt, cmd);
3341
3342 if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3343 (void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3344
3345 /*
3346 * Only flush the doneq if this is not a TM
3347 * cmd. For TM cmds the flushing of the
3348 * doneq will be done in those routines.
3349 */
3350 if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
3351 mptsas_doneq_empty(mpt);
3352 }
3353 }
3354 }
3355 return (rval);
3356 }
3357
3358 int
3359 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3360 {
3361 mptsas_slots_t *slots = mpt->m_active;
3362 uint_t slot, start_rotor;
3363 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3364
3365 ASSERT(MUTEX_HELD(&mpt->m_mutex));
3366
3367 /*
3368 * Account for reserved TM request slot and reserved SMID of 0.
3369 */
3370 ASSERT(slots->m_n_normal == (mpt->m_max_requests - 2));
3371
3372 /*
3373 * Find the next available slot, beginning at m_rotor. If no slot is
3374 * available, we'll return FALSE to indicate that. This mechanism
3375 * considers only the normal slots, not the reserved slot 0 nor the
3376 * task management slot m_n_normal + 1. The rotor is left to point to
3377 * the normal slot after the one we select, unless we select the last
3378 * normal slot in which case it returns to slot 1.
3379 */
3380 start_rotor = slots->m_rotor;
3381 do {
3382 slot = slots->m_rotor++;
3383 if (slots->m_rotor > slots->m_n_normal)
3384 slots->m_rotor = 1;
3385
3386 if (slots->m_rotor == start_rotor)
3387 break;
3388 } while (slots->m_slot[slot] != NULL);
3389
3390 if (slots->m_slot[slot] != NULL)
3391 return (FALSE);
3392
3393 ASSERT(slot != 0 && slot <= slots->m_n_normal);
3394
3395 cmd->cmd_slot = slot;
3396 slots->m_slot[slot] = cmd;
3397 mpt->m_ncmds++;
3398
3399 /*
3400 * only increment per target ncmds if this is not a
3401 * command that has no target associated with it (i.e. a
3402 * event acknoledgment)
3403 */
3404 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3405 /*
3406 * Expiration time is set in mptsas_start_cmd
3407 */
3408 ptgt->m_t_ncmds++;
3409 cmd->cmd_active_expiration = 0;
3410 } else {
3411 /*
3412 * Initialize expiration time for passthrough commands,
3413 */
3414 cmd->cmd_active_expiration = gethrtime() +
3415 (hrtime_t)cmd->cmd_pkt->pkt_time * NANOSEC;
3416 }
3417 return (TRUE);
3418 }
3419
3420 /*
3421 * prepare the pkt:
3422 * the pkt may have been resubmitted or just reused so
3423 * initialize some fields and do some checks.
3424 */
3425 static int
3426 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3427 {
3428 struct scsi_pkt *pkt = CMD2PKT(cmd);
3429
3430 NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3431
3432 /*
3433 * Reinitialize some fields that need it; the packet may
3434 * have been resubmitted
3435 */
3436 pkt->pkt_reason = CMD_CMPLT;
3437 pkt->pkt_state = 0;
3438 pkt->pkt_statistics = 0;
3439 pkt->pkt_resid = 0;
3440 cmd->cmd_age = 0;
3441 cmd->cmd_pkt_flags = pkt->pkt_flags;
3442
3443 /*
3444 * zero status byte.
3445 */
3446 *(pkt->pkt_scbp) = 0;
3447
3448 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3449 pkt->pkt_resid = cmd->cmd_dmacount;
3450
3451 /*
3452 * consistent packets need to be sync'ed first
3453 * (only for data going out)
3454 */
3455 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
3456 (cmd->cmd_flags & CFLAG_DMASEND)) {
3457 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
3458 DDI_DMA_SYNC_FORDEV);
3459 }
3460 }
3461
3462 cmd->cmd_flags =
3463 (cmd->cmd_flags & ~(CFLAG_TRANFLAG)) |
3464 CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
3465
3466 return (TRAN_ACCEPT);
3467 }
3468
3469 /*
3470 * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3471 *
3472 * One of three possibilities:
3473 * - allocate scsi_pkt
3474 * - allocate scsi_pkt and DMA resources
3475 * - allocate DMA resources to an already-allocated pkt
3476 */
3477 static struct scsi_pkt *
3478 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3479 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3480 int (*callback)(), caddr_t arg)
3481 {
3482 mptsas_cmd_t *cmd, *new_cmd;
3483 mptsas_t *mpt = ADDR2MPT(ap);
3484 int failure = 1;
3485 uint_t oldcookiec;
3486 mptsas_target_t *ptgt = NULL;
3487 int rval;
3488 mptsas_tgt_private_t *tgt_private;
3489 int kf;
3490
3491 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3492
3493 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3494 tran_tgt_private;
3495 ASSERT(tgt_private != NULL);
3496 if (tgt_private == NULL) {
3497 return (NULL);
3498 }
3499 ptgt = tgt_private->t_private;
3500 ASSERT(ptgt != NULL);
3501 if (ptgt == NULL)
3502 return (NULL);
3503 ap->a_target = ptgt->m_devhdl;
3504 ap->a_lun = tgt_private->t_lun;
3505
3506 ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3507 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3508 statuslen *= 100; tgtlen *= 4;
3509 #endif
3510 NDBG3(("mptsas_scsi_init_pkt:\n"
3511 "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3512 ap->a_target, (void *)pkt, (void *)bp,
3513 cmdlen, statuslen, tgtlen, flags));
3514
3515 /*
3516 * Allocate the new packet.
3517 */
3518 if (pkt == NULL) {
3519 ddi_dma_handle_t save_dma_handle;
3520 ddi_dma_handle_t save_arq_dma_handle;
3521 struct buf *save_arq_bp;
3522 ddi_dma_cookie_t save_arqcookie;
3523
3524 cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3525
3526 if (cmd) {
3527 save_dma_handle = cmd->cmd_dmahandle;
3528 save_arq_dma_handle = cmd->cmd_arqhandle;
3529 save_arq_bp = cmd->cmd_arq_buf;
3530 save_arqcookie = cmd->cmd_arqcookie;
3531 bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3532 cmd->cmd_dmahandle = save_dma_handle;
3533 cmd->cmd_arqhandle = save_arq_dma_handle;
3534 cmd->cmd_arq_buf = save_arq_bp;
3535 cmd->cmd_arqcookie = save_arqcookie;
3536
3537 pkt = (void *)((uchar_t *)cmd +
3538 sizeof (struct mptsas_cmd));
3539 pkt->pkt_ha_private = (opaque_t)cmd;
3540 pkt->pkt_address = *ap;
3541 pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3542 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3543 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3544 cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3545 cmd->cmd_cdblen = (uchar_t)cmdlen;
3546 cmd->cmd_scblen = statuslen;
3547 cmd->cmd_rqslen = SENSE_LENGTH;
3548 cmd->cmd_tgt_addr = ptgt;
3549 failure = 0;
3550 }
3551
3552 if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
3553 (tgtlen > PKT_PRIV_LEN) ||
3554 (statuslen > EXTCMDS_STATUS_SIZE)) {
3555 if (failure == 0) {
3556 /*
3557 * if extern alloc fails, all will be
3558 * deallocated, including cmd
3559 */
3560 failure = mptsas_pkt_alloc_extern(mpt, cmd,
3561 cmdlen, tgtlen, statuslen, kf);
3562 }
3563 if (failure) {
3564 /*
3565 * if extern allocation fails, it will
3566 * deallocate the new pkt as well
3567 */
3568 return (NULL);
3569 }
3570 }
3571 new_cmd = cmd;
3572
3573 } else {
3574 cmd = PKT2CMD(pkt);
3575 new_cmd = NULL;
3576 }
3577
3578
3579 /* grab cmd->cmd_cookiec here as oldcookiec */
3580
3581 oldcookiec = cmd->cmd_cookiec;
3582
3583 /*
3584 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3585 * greater than 0 and we'll need to grab the next dma window
3586 */
3587 /*
3588 * SLM-not doing extra command frame right now; may add later
3589 */
3590
3591 if (cmd->cmd_nwin > 0) {
3592
3593 /*
3594 * Make sure we havn't gone past the the total number
3595 * of windows
3596 */
3597 if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3598 return (NULL);
3599 }
3600 if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3601 &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
3602 &cmd->cmd_cookie, &cmd->cmd_cookiec) == DDI_FAILURE) {
3603 return (NULL);
3604 }
3605 goto get_dma_cookies;
3606 }
3607
3608
3609 if (flags & PKT_XARQ) {
3610 cmd->cmd_flags |= CFLAG_XARQ;
3611 }
3612
3613 /*
3614 * DMA resource allocation. This version assumes your
3615 * HBA has some sort of bus-mastering or onboard DMA capability, with a
3616 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
3617 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
3618 */
3619 if (bp && (bp->b_bcount != 0) &&
3620 (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
3621
3622 int cnt, dma_flags;
3623 mptti_t *dmap; /* ptr to the S/G list */
3624
3625 /*
3626 * Set up DMA memory and position to the next DMA segment.
3627 */
3628 ASSERT(cmd->cmd_dmahandle != NULL);
3629
3630 if (bp->b_flags & B_READ) {
3631 dma_flags = DDI_DMA_READ;
3632 cmd->cmd_flags &= ~CFLAG_DMASEND;
3633 } else {
3634 dma_flags = DDI_DMA_WRITE;
3635 cmd->cmd_flags |= CFLAG_DMASEND;
3636 }
3637 if (flags & PKT_CONSISTENT) {
3638 cmd->cmd_flags |= CFLAG_CMDIOPB;
3639 dma_flags |= DDI_DMA_CONSISTENT;
3640 }
3641
3642 if (flags & PKT_DMA_PARTIAL) {
3643 dma_flags |= DDI_DMA_PARTIAL;
3644 }
3645
3646 /*
3647 * workaround for byte hole issue on psycho and
3648 * schizo pre 2.1
3649 */
3650 if ((bp->b_flags & B_READ) && ((bp->b_flags &
3651 (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
3652 ((uintptr_t)bp->b_un.b_addr & 0x7)) {
3653 dma_flags |= DDI_DMA_CONSISTENT;
3654 }
3655
3656 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
3657 dma_flags, callback, arg,
3658 &cmd->cmd_cookie, &cmd->cmd_cookiec);
3659 if (rval == DDI_DMA_PARTIAL_MAP) {
3660 (void) ddi_dma_numwin(cmd->cmd_dmahandle,
3661 &cmd->cmd_nwin);
3662 cmd->cmd_winindex = 0;
3663 (void) ddi_dma_getwin(cmd->cmd_dmahandle,
3664 cmd->cmd_winindex, &cmd->cmd_dma_offset,
3665 &cmd->cmd_dma_len, &cmd->cmd_cookie,
3666 &cmd->cmd_cookiec);
3667 } else if (rval && (rval != DDI_DMA_MAPPED)) {
3668 switch (rval) {
3669 case DDI_DMA_NORESOURCES:
3670 bioerror(bp, 0);
3671 break;
3672 case DDI_DMA_BADATTR:
3673 case DDI_DMA_NOMAPPING:
3674 bioerror(bp, EFAULT);
3675 break;
3676 case DDI_DMA_TOOBIG:
3677 default:
3678 bioerror(bp, EINVAL);
3679 break;
3680 }
3681 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3682 if (new_cmd) {
3683 mptsas_scsi_destroy_pkt(ap, pkt);
3684 }
3685 return ((struct scsi_pkt *)NULL);
3686 }
3687
3688 get_dma_cookies:
3689 cmd->cmd_flags |= CFLAG_DMAVALID;
3690 ASSERT(cmd->cmd_cookiec > 0);
3691
3692 if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
3693 mptsas_log(mpt, CE_NOTE, "large cookiec received %d\n",
3694 cmd->cmd_cookiec);
3695 bioerror(bp, EINVAL);
3696 if (new_cmd) {
3697 mptsas_scsi_destroy_pkt(ap, pkt);
3698 }
3699 return ((struct scsi_pkt *)NULL);
3700 }
3701
3702 /*
3703 * Allocate extra SGL buffer if needed.
3704 */
3705 if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
3706 (cmd->cmd_extra_frames == NULL)) {
3707 if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
3708 DDI_FAILURE) {
3709 mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
3710 "failed");
3711 bioerror(bp, ENOMEM);
3712 if (new_cmd) {
3713 mptsas_scsi_destroy_pkt(ap, pkt);
3714 }
3715 return ((struct scsi_pkt *)NULL);
3716 }
3717 }
3718
3719 /*
3720 * Always use scatter-gather transfer
3721 * Use the loop below to store physical addresses of
3722 * DMA segments, from the DMA cookies, into your HBA's
3723 * scatter-gather list.
3724 * We need to ensure we have enough kmem alloc'd
3725 * for the sg entries since we are no longer using an
3726 * array inside mptsas_cmd_t.
3727 *
3728 * We check cmd->cmd_cookiec against oldcookiec so
3729 * the scatter-gather list is correctly allocated
3730 */
3731
3732 if (oldcookiec != cmd->cmd_cookiec) {
3733 if (cmd->cmd_sg != (mptti_t *)NULL) {
3734 kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
3735 oldcookiec);
3736 cmd->cmd_sg = NULL;
3737 }
3738 }
3739
3740 if (cmd->cmd_sg == (mptti_t *)NULL) {
3741 cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3742 cmd->cmd_cookiec), kf);
3743
3744 if (cmd->cmd_sg == (mptti_t *)NULL) {
3745 mptsas_log(mpt, CE_WARN,
3746 "unable to kmem_alloc enough memory "
3747 "for scatter/gather list");
3748 /*
3749 * if we have an ENOMEM condition we need to behave
3750 * the same way as the rest of this routine
3751 */
3752
3753 bioerror(bp, ENOMEM);
3754 if (new_cmd) {
3755 mptsas_scsi_destroy_pkt(ap, pkt);
3756 }
3757 return ((struct scsi_pkt *)NULL);
3758 }
3759 }
3760
3761 dmap = cmd->cmd_sg;
3762
3763 ASSERT(cmd->cmd_cookie.dmac_size != 0);
3764
3765 /*
3766 * store the first segment into the S/G list
3767 */
3768 dmap->count = cmd->cmd_cookie.dmac_size;
3769 dmap->addr.address64.Low = (uint32_t)
3770 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3771 dmap->addr.address64.High = (uint32_t)
3772 (cmd->cmd_cookie.dmac_laddress >> 32);
3773
3774 /*
3775 * dmacount counts the size of the dma for this window
3776 * (if partial dma is being used). totaldmacount
3777 * keeps track of the total amount of dma we have
3778 * transferred for all the windows (needed to calculate
3779 * the resid value below).
3780 */
3781 cmd->cmd_dmacount = cmd->cmd_cookie.dmac_size;
3782 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3783
3784 /*
3785 * We already stored the first DMA scatter gather segment,
3786 * start at 1 if we need to store more.
3787 */
3788 for (cnt = 1; cnt < cmd->cmd_cookiec; cnt++) {
3789 /*
3790 * Get next DMA cookie
3791 */
3792 ddi_dma_nextcookie(cmd->cmd_dmahandle,
3793 &cmd->cmd_cookie);
3794 dmap++;
3795
3796 cmd->cmd_dmacount += cmd->cmd_cookie.dmac_size;
3797 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3798
3799 /*
3800 * store the segment parms into the S/G list
3801 */
3802 dmap->count = cmd->cmd_cookie.dmac_size;
3803 dmap->addr.address64.Low = (uint32_t)
3804 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3805 dmap->addr.address64.High = (uint32_t)
3806 (cmd->cmd_cookie.dmac_laddress >> 32);
3807 }
3808
3809 /*
3810 * If this was partially allocated we set the resid
3811 * the amount of data NOT transferred in this window
3812 * If there is only one window, the resid will be 0
3813 */
3814 pkt->pkt_resid = (bp->b_bcount - cmd->cmd_totaldmacount);
3815 NDBG3(("mptsas_scsi_init_pkt: cmd_dmacount=%d.",
3816 cmd->cmd_dmacount));
3817 }
3818 return (pkt);
3819 }
3820
3821 /*
3822 * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
3823 *
3824 * Notes:
3825 * - also frees DMA resources if allocated
3826 * - implicit DMA synchonization
3827 */
3828 static void
3829 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
3830 {
3831 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3832 mptsas_t *mpt = ADDR2MPT(ap);
3833
3834 NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
3835 ap->a_target, (void *)pkt));
3836
3837 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3838 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
3839 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3840 }
3841
3842 if (cmd->cmd_sg) {
3843 kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
3844 cmd->cmd_sg = NULL;
3845 }
3846
3847 mptsas_free_extra_sgl_frame(mpt, cmd);
3848
3849 if ((cmd->cmd_flags &
3850 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
3851 CFLAG_SCBEXTERN)) == 0) {
3852 cmd->cmd_flags = CFLAG_FREE;
3853 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
3854 } else {
3855 mptsas_pkt_destroy_extern(mpt, cmd);
3856 }
3857 }
3858
3859 /*
3860 * kmem cache constructor and destructor:
3861 * When constructing, we bzero the cmd and allocate the dma handle
3862 * When destructing, just free the dma handle
3863 */
3864 static int
3865 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
3866 {
3867 mptsas_cmd_t *cmd = buf;
3868 mptsas_t *mpt = cdrarg;
3869 struct scsi_address ap;
3870 uint_t cookiec;
3871 ddi_dma_attr_t arq_dma_attr;
3872 int (*callback)(caddr_t);
3873
3874 callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3875
3876 NDBG4(("mptsas_kmem_cache_constructor"));
3877
3878 ap.a_hba_tran = mpt->m_tran;
3879 ap.a_target = 0;
3880 ap.a_lun = 0;
3881
3882 /*
3883 * allocate a dma handle
3884 */
3885 if ((ddi_dma_alloc_handle(mpt->m_dip, &mpt->m_io_dma_attr, callback,
3886 NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
3887 cmd->cmd_dmahandle = NULL;
3888 return (-1);
3889 }
3890
3891 cmd->cmd_arq_buf = scsi_alloc_consistent_buf(&ap, (struct buf *)NULL,
3892 SENSE_LENGTH, B_READ, callback, NULL);
3893 if (cmd->cmd_arq_buf == NULL) {
3894 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3895 cmd->cmd_dmahandle = NULL;
3896 return (-1);
3897 }
3898
3899 /*
3900 * allocate a arq handle
3901 */
3902 arq_dma_attr = mpt->m_msg_dma_attr;
3903 arq_dma_attr.dma_attr_sgllen = 1;
3904 if ((ddi_dma_alloc_handle(mpt->m_dip, &arq_dma_attr, callback,
3905 NULL, &cmd->cmd_arqhandle)) != DDI_SUCCESS) {
3906 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3907 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3908 cmd->cmd_dmahandle = NULL;
3909 cmd->cmd_arqhandle = NULL;
3910 return (-1);
3911 }
3912
3913 if (ddi_dma_buf_bind_handle(cmd->cmd_arqhandle,
3914 cmd->cmd_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
3915 callback, NULL, &cmd->cmd_arqcookie, &cookiec) != DDI_SUCCESS) {
3916 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3917 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3918 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3919 cmd->cmd_dmahandle = NULL;
3920 cmd->cmd_arqhandle = NULL;
3921 cmd->cmd_arq_buf = NULL;
3922 return (-1);
3923 }
3924
3925 return (0);
3926 }
3927
3928 static void
3929 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
3930 {
3931 #ifndef __lock_lint
3932 _NOTE(ARGUNUSED(cdrarg))
3933 #endif
3934 mptsas_cmd_t *cmd = buf;
3935
3936 NDBG4(("mptsas_kmem_cache_destructor"));
3937
3938 if (cmd->cmd_arqhandle) {
3939 (void) ddi_dma_unbind_handle(cmd->cmd_arqhandle);
3940 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3941 cmd->cmd_arqhandle = NULL;
3942 }
3943 if (cmd->cmd_arq_buf) {
3944 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3945 cmd->cmd_arq_buf = NULL;
3946 }
3947 if (cmd->cmd_dmahandle) {
3948 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3949 cmd->cmd_dmahandle = NULL;
3950 }
3951 }
3952
3953 static int
3954 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
3955 {
3956 mptsas_cache_frames_t *p = buf;
3957 mptsas_t *mpt = cdrarg;
3958 ddi_dma_attr_t frame_dma_attr;
3959 size_t mem_size, alloc_len;
3960 ddi_dma_cookie_t cookie;
3961 uint_t ncookie;
3962 int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
3963 ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3964
3965 frame_dma_attr = mpt->m_msg_dma_attr;
3966 frame_dma_attr.dma_attr_align = 0x10;
3967 frame_dma_attr.dma_attr_sgllen = 1;
3968
3969 if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
3970 &p->m_dma_hdl) != DDI_SUCCESS) {
3971 mptsas_log(mpt, CE_WARN, "Unable to allocate dma handle for"
3972 " extra SGL.");
3973 return (DDI_FAILURE);
3974 }
3975
3976 mem_size = (mpt->m_max_request_frames - 1) * mpt->m_req_frame_size;
3977
3978 if (ddi_dma_mem_alloc(p->m_dma_hdl, mem_size, &mpt->m_dev_acc_attr,
3979 DDI_DMA_CONSISTENT, callback, NULL, (caddr_t *)&p->m_frames_addr,
3980 &alloc_len, &p->m_acc_hdl) != DDI_SUCCESS) {
3981 ddi_dma_free_handle(&p->m_dma_hdl);
3982 p->m_dma_hdl = NULL;
3983 mptsas_log(mpt, CE_WARN, "Unable to allocate dma memory for"
3984 " extra SGL.");
3985 return (DDI_FAILURE);
3986 }
3987
3988 if (ddi_dma_addr_bind_handle(p->m_dma_hdl, NULL, p->m_frames_addr,
3989 alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
3990 &cookie, &ncookie) != DDI_DMA_MAPPED) {
3991 (void) ddi_dma_mem_free(&p->m_acc_hdl);
3992 ddi_dma_free_handle(&p->m_dma_hdl);
3993 p->m_dma_hdl = NULL;
3994 mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources for"
3995 " extra SGL");
3996 return (DDI_FAILURE);
3997 }
3998
3999 /*
4000 * Store the SGL memory address. This chip uses this
4001 * address to dma to and from the driver. The second
4002 * address is the address mpt uses to fill in the SGL.
4003 */
4004 p->m_phys_addr = cookie.dmac_address;
4005
4006 return (DDI_SUCCESS);
4007 }
4008
4009 static void
4010 mptsas_cache_frames_destructor(void *buf, void *cdrarg)
4011 {
4012 #ifndef __lock_lint
4013 _NOTE(ARGUNUSED(cdrarg))
4014 #endif
4015 mptsas_cache_frames_t *p = buf;
4016 if (p->m_dma_hdl != NULL) {
4017 (void) ddi_dma_unbind_handle(p->m_dma_hdl);
4018 (void) ddi_dma_mem_free(&p->m_acc_hdl);
4019 ddi_dma_free_handle(&p->m_dma_hdl);
4020 p->m_phys_addr = NULL;
4021 p->m_frames_addr = NULL;
4022 p->m_dma_hdl = NULL;
4023 p->m_acc_hdl = NULL;
4024 }
4025
4026 }
4027
4028 /*
4029 * allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
4030 * for non-standard length cdb, pkt_private, status areas
4031 * if allocation fails, then deallocate all external space and the pkt
4032 */
4033 /* ARGSUSED */
4034 static int
4035 mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
4036 int cmdlen, int tgtlen, int statuslen, int kf)
4037 {
4038 caddr_t cdbp, scbp, tgt;
4039 int (*callback)(caddr_t) = (kf == KM_SLEEP) ?
4040 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
4041 struct scsi_address ap;
4042 size_t senselength;
4043 ddi_dma_attr_t ext_arq_dma_attr;
4044 uint_t cookiec;
4045
4046 NDBG3(("mptsas_pkt_alloc_extern: "
4047 "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
4048 (void *)cmd, cmdlen, tgtlen, statuslen, kf));
4049
4050 tgt = cdbp = scbp = NULL;
4051 cmd->cmd_scblen = statuslen;
4052 cmd->cmd_privlen = (uchar_t)tgtlen;
4053
4054 if (cmdlen > sizeof (cmd->cmd_cdb)) {
4055 if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
4056 goto fail;
4057 }
4058 cmd->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
4059 cmd->cmd_flags |= CFLAG_CDBEXTERN;
4060 }
4061 if (tgtlen > PKT_PRIV_LEN) {
4062 if ((tgt = kmem_zalloc((size_t)tgtlen, kf)) == NULL) {
4063 goto fail;
4064 }
4065 cmd->cmd_flags |= CFLAG_PRIVEXTERN;
4066 cmd->cmd_pkt->pkt_private = tgt;
4067 }
4068 if (statuslen > EXTCMDS_STATUS_SIZE) {
4069 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
4070 goto fail;
4071 }
4072 cmd->cmd_flags |= CFLAG_SCBEXTERN;
4073 cmd->cmd_pkt->pkt_scbp = (opaque_t)scbp;
4074
4075 /* allocate sense data buf for DMA */
4076
4077 senselength = statuslen - MPTSAS_GET_ITEM_OFF(
4078 struct scsi_arq_status, sts_sensedata);
4079 cmd->cmd_rqslen = (uchar_t)senselength;
4080
4081 ap.a_hba_tran = mpt->m_tran;
4082 ap.a_target = 0;
4083 ap.a_lun = 0;
4084
4085 cmd->cmd_ext_arq_buf = scsi_alloc_consistent_buf(&ap,
4086 (struct buf *)NULL, senselength, B_READ,
4087 callback, NULL);
4088
4089 if (cmd->cmd_ext_arq_buf == NULL) {
4090 goto fail;
4091 }
4092 /*
4093 * allocate a extern arq handle and bind the buf
4094 */
4095 ext_arq_dma_attr = mpt->m_msg_dma_attr;
4096 ext_arq_dma_attr.dma_attr_sgllen = 1;
4097 if ((ddi_dma_alloc_handle(mpt->m_dip,
4098 &ext_arq_dma_attr, callback,
4099 NULL, &cmd->cmd_ext_arqhandle)) != DDI_SUCCESS) {
4100 goto fail;
4101 }
4102
4103 if (ddi_dma_buf_bind_handle(cmd->cmd_ext_arqhandle,
4104 cmd->cmd_ext_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
4105 callback, NULL, &cmd->cmd_ext_arqcookie,
4106 &cookiec)
4107 != DDI_SUCCESS) {
4108 goto fail;
4109 }
4110 cmd->cmd_flags |= CFLAG_EXTARQBUFVALID;
4111 }
4112 return (0);
4113 fail:
4114 mptsas_pkt_destroy_extern(mpt, cmd);
4115 return (1);
4116 }
4117
4118 /*
4119 * deallocate external pkt space and deallocate the pkt
4120 */
4121 static void
4122 mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd)
4123 {
4124 NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd));
4125
4126 if (cmd->cmd_flags & CFLAG_FREE) {
4127 mptsas_log(mpt, CE_PANIC,
4128 "mptsas_pkt_destroy_extern: freeing free packet");
4129 _NOTE(NOT_REACHED)
4130 /* NOTREACHED */
4131 }
4132 if (cmd->cmd_flags & CFLAG_CDBEXTERN) {
4133 kmem_free(cmd->cmd_pkt->pkt_cdbp, (size_t)cmd->cmd_cdblen);
4134 }
4135 if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
4136 kmem_free(cmd->cmd_pkt->pkt_scbp, (size_t)cmd->cmd_scblen);
4137 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4138 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4139 }
4140 if (cmd->cmd_ext_arqhandle) {
4141 ddi_dma_free_handle(&cmd->cmd_ext_arqhandle);
4142 cmd->cmd_ext_arqhandle = NULL;
4143 }
4144 if (cmd->cmd_ext_arq_buf)
4145 scsi_free_consistent_buf(cmd->cmd_ext_arq_buf);
4146 }
4147 if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
4148 kmem_free(cmd->cmd_pkt->pkt_private, (size_t)cmd->cmd_privlen);
4149 }
4150 cmd->cmd_flags = CFLAG_FREE;
4151 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4152 }
4153
4154 /*
4155 * tran_sync_pkt(9E) - explicit DMA synchronization
4156 */
4157 /*ARGSUSED*/
4158 static void
4159 mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4160 {
4161 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4162
4163 NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
4164 ap->a_target, (void *)pkt));
4165
4166 if (cmd->cmd_dmahandle) {
4167 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4168 (cmd->cmd_flags & CFLAG_DMASEND) ?
4169 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
4170 }
4171 }
4172
4173 /*
4174 * tran_dmafree(9E) - deallocate DMA resources allocated for command
4175 */
4176 /*ARGSUSED*/
4177 static void
4178 mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
4179 {
4180 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4181 mptsas_t *mpt = ADDR2MPT(ap);
4182
4183 NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
4184 ap->a_target, (void *)pkt));
4185
4186 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4187 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4188 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4189 }
4190
4191 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4192 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4193 cmd->cmd_flags &= ~CFLAG_EXTARQBUFVALID;
4194 }
4195
4196 mptsas_free_extra_sgl_frame(mpt, cmd);
4197 }
4198
4199 static void
4200 mptsas_pkt_comp(struct scsi_pkt *pkt, mptsas_cmd_t *cmd)
4201 {
4202 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
4203 (!(cmd->cmd_flags & CFLAG_DMASEND))) {
4204 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4205 DDI_DMA_SYNC_FORCPU);
4206 }
4207 (*pkt->pkt_comp)(pkt);
4208 }
4209
4210 static void
4211 mptsas_sge_mainframe(mptsas_cmd_t *cmd, pMpi2SCSIIORequest_t frame,
4212 ddi_acc_handle_t acc_hdl, uint_t cookiec, uint32_t end_flags)
4213 {
4214 pMpi2SGESimple64_t sge;
4215 mptti_t *dmap;
4216 uint32_t flags;
4217
4218 dmap = cmd->cmd_sg;
4219
4220 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4221 while (cookiec--) {
4222 ddi_put32(acc_hdl,
4223 &sge->Address.Low, dmap->addr.address64.Low);
4224 ddi_put32(acc_hdl,
4225 &sge->Address.High, dmap->addr.address64.High);
4226 ddi_put32(acc_hdl, &sge->FlagsLength,
4227 dmap->count);
4228 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4229 flags |= ((uint32_t)
4230 (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4231 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4232 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4233 MPI2_SGE_FLAGS_SHIFT);
4234
4235 /*
4236 * If this is the last cookie, we set the flags
4237 * to indicate so
4238 */
4239 if (cookiec == 0) {
4240 flags |= end_flags;
4241 }
4242 if (cmd->cmd_flags & CFLAG_DMASEND) {
4243 flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4244 MPI2_SGE_FLAGS_SHIFT);
4245 } else {
4246 flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4247 MPI2_SGE_FLAGS_SHIFT);
4248 }
4249 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4250 dmap++;
4251 sge++;
4252 }
4253 }
4254
4255 static void
4256 mptsas_sge_chain(mptsas_t *mpt, mptsas_cmd_t *cmd,
4257 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4258 {
4259 pMpi2SGESimple64_t sge;
4260 pMpi2SGEChain64_t sgechain;
4261 uint_t cookiec;
4262 mptti_t *dmap;
4263 uint32_t flags;
4264
4265 /*
4266 * Save the number of entries in the DMA
4267 * Scatter/Gather list
4268 */
4269 cookiec = cmd->cmd_cookiec;
4270
4271 /*
4272 * Hereby we start to deal with multiple frames.
4273 * The process is as follows:
4274 * 1. Determine how many frames are needed for SGL element
4275 * storage; Note that all frames are stored in contiguous
4276 * memory space and in 64-bit DMA mode each element is
4277 * 3 double-words (12 bytes) long.
4278 * 2. Fill up the main frame. We need to do this separately
4279 * since it contains the SCSI IO request header and needs
4280 * dedicated processing. Note that the last 4 double-words
4281 * of the SCSI IO header is for SGL element storage
4282 * (MPI2_SGE_IO_UNION).
4283 * 3. Fill the chain element in the main frame, so the DMA
4284 * engine can use the following frames.
4285 * 4. Enter a loop to fill the remaining frames. Note that the
4286 * last frame contains no chain element. The remaining
4287 * frames go into the mpt SGL buffer allocated on the fly,
4288 * not immediately following the main message frame, as in
4289 * Gen1.
4290 * Some restrictions:
4291 * 1. For 64-bit DMA, the simple element and chain element
4292 * are both of 3 double-words (12 bytes) in size, even
4293 * though all frames are stored in the first 4G of mem
4294 * range and the higher 32-bits of the address are always 0.
4295 * 2. On some controllers (like the 1064/1068), a frame can
4296 * hold SGL elements with the last 1 or 2 double-words
4297 * (4 or 8 bytes) un-used. On these controllers, we should
4298 * recognize that there's not enough room for another SGL
4299 * element and move the sge pointer to the next frame.
4300 */
4301 int i, j, k, l, frames, sgemax;
4302 int temp;
4303 uint8_t chainflags;
4304 uint16_t chainlength;
4305 mptsas_cache_frames_t *p;
4306
4307 /*
4308 * Sgemax is the number of SGE's that will fit
4309 * each extra frame and frames is total
4310 * number of frames we'll need. 1 sge entry per
4311 * frame is reseverd for the chain element thus the -1 below.
4312 */
4313 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64))
4314 - 1);
4315 temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4316
4317 /*
4318 * A little check to see if we need to round up the number
4319 * of frames we need
4320 */
4321 if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4322 sgemax) > 1) {
4323 frames = (temp + 1);
4324 } else {
4325 frames = temp;
4326 }
4327 dmap = cmd->cmd_sg;
4328 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4329
4330 /*
4331 * First fill in the main frame
4332 */
4333 j = MPTSAS_MAX_FRAME_SGES64(mpt) - 1;
4334 mptsas_sge_mainframe(cmd, frame, acc_hdl, j,
4335 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4336 MPI2_SGE_FLAGS_SHIFT));
4337 dmap += j;
4338 sge += j;
4339 j++;
4340
4341 /*
4342 * Fill in the chain element in the main frame.
4343 * About calculation on ChainOffset:
4344 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4345 * in the end reserved for SGL element storage
4346 * (MPI2_SGE_IO_UNION); we should count it in our
4347 * calculation. See its definition in the header file.
4348 * 2. Constant j is the counter of the current SGL element
4349 * that will be processed, and (j - 1) is the number of
4350 * SGL elements that have been processed (stored in the
4351 * main frame).
4352 * 3. ChainOffset value should be in units of double-words (4
4353 * bytes) so the last value should be divided by 4.
4354 */
4355 ddi_put8(acc_hdl, &frame->ChainOffset,
4356 (sizeof (MPI2_SCSI_IO_REQUEST) -
4357 sizeof (MPI2_SGE_IO_UNION) +
4358 (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4359 sgechain = (pMpi2SGEChain64_t)sge;
4360 chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4361 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4362 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4363 ddi_put8(acc_hdl, &sgechain->Flags, chainflags);
4364
4365 /*
4366 * The size of the next frame is the accurate size of space
4367 * (in bytes) used to store the SGL elements. j is the counter
4368 * of SGL elements. (j - 1) is the number of SGL elements that
4369 * have been processed (stored in frames).
4370 */
4371 if (frames >= 2) {
4372 ASSERT(mpt->m_req_frame_size >= sizeof (MPI2_SGE_SIMPLE64));
4373 chainlength = mpt->m_req_frame_size /
4374 sizeof (MPI2_SGE_SIMPLE64) *
4375 sizeof (MPI2_SGE_SIMPLE64);
4376 } else {
4377 chainlength = ((cookiec - (j - 1)) *
4378 sizeof (MPI2_SGE_SIMPLE64));
4379 }
4380
4381 p = cmd->cmd_extra_frames;
4382
4383 ddi_put16(acc_hdl, &sgechain->Length, chainlength);
4384 ddi_put32(acc_hdl, &sgechain->Address.Low,
4385 p->m_phys_addr);
4386 /* SGL is allocated in the first 4G mem range */
4387 ddi_put32(acc_hdl, &sgechain->Address.High, 0);
4388
4389 /*
4390 * If there are more than 2 frames left we have to
4391 * fill in the next chain offset to the location of
4392 * the chain element in the next frame.
4393 * sgemax is the number of simple elements in an extra
4394 * frame. Note that the value NextChainOffset should be
4395 * in double-words (4 bytes).
4396 */
4397 if (frames >= 2) {
4398 ddi_put8(acc_hdl, &sgechain->NextChainOffset,
4399 (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4400 } else {
4401 ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0);
4402 }
4403
4404 /*
4405 * Jump to next frame;
4406 * Starting here, chain buffers go into the per command SGL.
4407 * This buffer is allocated when chain buffers are needed.
4408 */
4409 sge = (pMpi2SGESimple64_t)p->m_frames_addr;
4410 i = cookiec;
4411
4412 /*
4413 * Start filling in frames with SGE's. If we
4414 * reach the end of frame and still have SGE's
4415 * to fill we need to add a chain element and
4416 * use another frame. j will be our counter
4417 * for what cookie we are at and i will be
4418 * the total cookiec. k is the current frame
4419 */
4420 for (k = 1; k <= frames; k++) {
4421 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4422
4423 /*
4424 * If we have reached the end of frame
4425 * and we have more SGE's to fill in
4426 * we have to fill the final entry
4427 * with a chain element and then
4428 * continue to the next frame
4429 */
4430 if ((l == (sgemax + 1)) && (k != frames)) {
4431 sgechain = (pMpi2SGEChain64_t)sge;
4432 j--;
4433 chainflags = (
4434 MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4435 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4436 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4437 ddi_put8(p->m_acc_hdl,
4438 &sgechain->Flags, chainflags);
4439 /*
4440 * k is the frame counter and (k + 1)
4441 * is the number of the next frame.
4442 * Note that frames are in contiguous
4443 * memory space.
4444 */
4445 ddi_put32(p->m_acc_hdl,
4446 &sgechain->Address.Low,
4447 (p->m_phys_addr +
4448 (mpt->m_req_frame_size * k)));
4449 ddi_put32(p->m_acc_hdl,
4450 &sgechain->Address.High, 0);
4451
4452 /*
4453 * If there are more than 2 frames left
4454 * we have to next chain offset to
4455 * the location of the chain element
4456 * in the next frame and fill in the
4457 * length of the next chain
4458 */
4459 if ((frames - k) >= 2) {
4460 ddi_put8(p->m_acc_hdl,
4461 &sgechain->NextChainOffset,
4462 (sgemax *
4463 sizeof (MPI2_SGE_SIMPLE64))
4464 >> 2);
4465 ddi_put16(p->m_acc_hdl,
4466 &sgechain->Length,
4467 mpt->m_req_frame_size /
4468 sizeof (MPI2_SGE_SIMPLE64) *
4469 sizeof (MPI2_SGE_SIMPLE64));
4470 } else {
4471 /*
4472 * This is the last frame. Set
4473 * the NextChainOffset to 0 and
4474 * Length is the total size of
4475 * all remaining simple elements
4476 */
4477 ddi_put8(p->m_acc_hdl,
4478 &sgechain->NextChainOffset,
4479 0);
4480 ddi_put16(p->m_acc_hdl,
4481 &sgechain->Length,
4482 (cookiec - j) *
4483 sizeof (MPI2_SGE_SIMPLE64));
4484 }
4485
4486 /* Jump to the next frame */
4487 sge = (pMpi2SGESimple64_t)
4488 ((char *)p->m_frames_addr +
4489 (int)mpt->m_req_frame_size * k);
4490
4491 continue;
4492 }
4493
4494 ddi_put32(p->m_acc_hdl,
4495 &sge->Address.Low,
4496 dmap->addr.address64.Low);
4497 ddi_put32(p->m_acc_hdl,
4498 &sge->Address.High,
4499 dmap->addr.address64.High);
4500 ddi_put32(p->m_acc_hdl,
4501 &sge->FlagsLength, dmap->count);
4502 flags = ddi_get32(p->m_acc_hdl,
4503 &sge->FlagsLength);
4504 flags |= ((uint32_t)(
4505 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4506 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4507 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4508 MPI2_SGE_FLAGS_SHIFT);
4509
4510 /*
4511 * If we are at the end of the frame and
4512 * there is another frame to fill in
4513 * we set the last simple element as last
4514 * element
4515 */
4516 if ((l == sgemax) && (k != frames)) {
4517 flags |= ((uint32_t)
4518 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4519 MPI2_SGE_FLAGS_SHIFT);
4520 }
4521
4522 /*
4523 * If this is the final cookie we
4524 * indicate it by setting the flags
4525 */
4526 if (j == i) {
4527 flags |= ((uint32_t)
4528 (MPI2_SGE_FLAGS_LAST_ELEMENT |
4529 MPI2_SGE_FLAGS_END_OF_BUFFER |
4530 MPI2_SGE_FLAGS_END_OF_LIST) <<
4531 MPI2_SGE_FLAGS_SHIFT);
4532 }
4533 if (cmd->cmd_flags & CFLAG_DMASEND) {
4534 flags |=
4535 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4536 MPI2_SGE_FLAGS_SHIFT);
4537 } else {
4538 flags |=
4539 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4540 MPI2_SGE_FLAGS_SHIFT);
4541 }
4542 ddi_put32(p->m_acc_hdl,
4543 &sge->FlagsLength, flags);
4544 dmap++;
4545 sge++;
4546 }
4547 }
4548
4549 /*
4550 * Sync DMA with the chain buffers that were just created
4551 */
4552 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4553 }
4554
4555 static void
4556 mptsas_ieee_sge_mainframe(mptsas_cmd_t *cmd, pMpi2SCSIIORequest_t frame,
4557 ddi_acc_handle_t acc_hdl, uint_t cookiec, uint8_t end_flag)
4558 {
4559 pMpi2IeeeSgeSimple64_t ieeesge;
4560 mptti_t *dmap;
4561 uint8_t flags;
4562
4563 dmap = cmd->cmd_sg;
4564
4565 NDBG1(("mptsas_ieee_sge_mainframe: cookiec=%d, %s", cookiec,
4566 cmd->cmd_flags & CFLAG_DMASEND?"Out":"In"));
4567
4568 ieeesge = (pMpi2IeeeSgeSimple64_t)(&frame->SGL);
4569 while (cookiec--) {
4570 ddi_put32(acc_hdl,
4571 &ieeesge->Address.Low, dmap->addr.address64.Low);
4572 ddi_put32(acc_hdl,
4573 &ieeesge->Address.High, dmap->addr.address64.High);
4574 ddi_put32(acc_hdl, &ieeesge->Length,
4575 dmap->count);
4576 NDBG1(("mptsas_ieee_sge_mainframe: len=%d", dmap->count));
4577 flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
4578 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4579
4580 /*
4581 * If this is the last cookie, we set the flags
4582 * to indicate so
4583 */
4584 if (cookiec == 0) {
4585 flags |= end_flag;
4586 }
4587
4588 ddi_put8(acc_hdl, &ieeesge->Flags, flags);
4589 dmap++;
4590 ieeesge++;
4591 }
4592 }
4593
4594 static void
4595 mptsas_ieee_sge_chain(mptsas_t *mpt, mptsas_cmd_t *cmd,
4596 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4597 {
4598 pMpi2IeeeSgeSimple64_t ieeesge;
4599 pMpi25IeeeSgeChain64_t ieeesgechain;
4600 uint_t cookiec;
4601 mptti_t *dmap;
4602 uint8_t flags;
4603
4604 /*
4605 * Save the number of entries in the DMA
4606 * Scatter/Gather list
4607 */
4608 cookiec = cmd->cmd_cookiec;
4609
4610 NDBG1(("mptsas_ieee_sge_chain: cookiec=%d", cookiec));
4611
4612 /*
4613 * Hereby we start to deal with multiple frames.
4614 * The process is as follows:
4615 * 1. Determine how many frames are needed for SGL element
4616 * storage; Note that all frames are stored in contiguous
4617 * memory space and in 64-bit DMA mode each element is
4618 * 4 double-words (16 bytes) long.
4619 * 2. Fill up the main frame. We need to do this separately
4620 * since it contains the SCSI IO request header and needs
4621 * dedicated processing. Note that the last 4 double-words
4622 * of the SCSI IO header is for SGL element storage
4623 * (MPI2_SGE_IO_UNION).
4624 * 3. Fill the chain element in the main frame, so the DMA
4625 * engine can use the following frames.
4626 * 4. Enter a loop to fill the remaining frames. Note that the
4627 * last frame contains no chain element. The remaining
4628 * frames go into the mpt SGL buffer allocated on the fly,
4629 * not immediately following the main message frame, as in
4630 * Gen1.
4631 * Restrictions:
4632 * For 64-bit DMA, the simple element and chain element
4633 * are both of 4 double-words (16 bytes) in size, even
4634 * though all frames are stored in the first 4G of mem
4635 * range and the higher 32-bits of the address are always 0.
4636 */
4637 int i, j, k, l, frames, sgemax;
4638 int temp;
4639 uint8_t chainflags;
4640 uint32_t chainlength;
4641 mptsas_cache_frames_t *p;
4642
4643 /*
4644 * Sgemax is the number of SGE's that will fit
4645 * each extra frame and frames is total
4646 * number of frames we'll need. 1 sge entry per
4647 * frame is reseverd for the chain element thus the -1 below.
4648 */
4649 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_IEEE_SGE_SIMPLE64))
4650 - 1);
4651 temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4652
4653 /*
4654 * A little check to see if we need to round up the number
4655 * of frames we need
4656 */
4657 if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4658 sgemax) > 1) {
4659 frames = (temp + 1);
4660 } else {
4661 frames = temp;
4662 }
4663 NDBG1(("mptsas_ieee_sge_chain: temp=%d, frames=%d", temp, frames));
4664 dmap = cmd->cmd_sg;
4665 ieeesge = (pMpi2IeeeSgeSimple64_t)(&frame->SGL);
4666
4667 /*
4668 * First fill in the main frame
4669 */
4670 j = MPTSAS_MAX_FRAME_SGES64(mpt) - 1;
4671 mptsas_ieee_sge_mainframe(cmd, frame, acc_hdl, j, 0);
4672 dmap += j;
4673 ieeesge += j;
4674 j++;
4675
4676 /*
4677 * Fill in the chain element in the main frame.
4678 * About calculation on ChainOffset:
4679 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4680 * in the end reserved for SGL element storage
4681 * (MPI2_SGE_IO_UNION); we should count it in our
4682 * calculation. See its definition in the header file.
4683 * 2. Constant j is the counter of the current SGL element
4684 * that will be processed, and (j - 1) is the number of
4685 * SGL elements that have been processed (stored in the
4686 * main frame).
4687 * 3. ChainOffset value should be in units of quad-words (16
4688 * bytes) so the last value should be divided by 16.
4689 */
4690 ddi_put8(acc_hdl, &frame->ChainOffset,
4691 (sizeof (MPI2_SCSI_IO_REQUEST) -
4692 sizeof (MPI2_SGE_IO_UNION) +
4693 (j - 1) * sizeof (MPI2_IEEE_SGE_SIMPLE64)) >> 4);
4694 ieeesgechain = (pMpi25IeeeSgeChain64_t)ieeesge;
4695 chainflags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
4696 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4697 ddi_put8(acc_hdl, &ieeesgechain->Flags, chainflags);
4698
4699 /*
4700 * The size of the next frame is the accurate size of space
4701 * (in bytes) used to store the SGL elements. j is the counter
4702 * of SGL elements. (j - 1) is the number of SGL elements that
4703 * have been processed (stored in frames).
4704 */
4705 if (frames >= 2) {
4706 ASSERT(mpt->m_req_frame_size >=
4707 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4708 chainlength = mpt->m_req_frame_size /
4709 sizeof (MPI2_IEEE_SGE_SIMPLE64) *
4710 sizeof (MPI2_IEEE_SGE_SIMPLE64);
4711 } else {
4712 chainlength = ((cookiec - (j - 1)) *
4713 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4714 }
4715
4716 p = cmd->cmd_extra_frames;
4717
4718 ddi_put32(acc_hdl, &ieeesgechain->Length, chainlength);
4719 ddi_put32(acc_hdl, &ieeesgechain->Address.Low,
4720 p->m_phys_addr);
4721 /* SGL is allocated in the first 4G mem range */
4722 ddi_put32(acc_hdl, &ieeesgechain->Address.High, 0);
4723
4724 /*
4725 * If there are more than 2 frames left we have to
4726 * fill in the next chain offset to the location of
4727 * the chain element in the next frame.
4728 * sgemax is the number of simple elements in an extra
4729 * frame. Note that the value NextChainOffset should be
4730 * in double-words (4 bytes).
4731 */
4732 if (frames >= 2) {
4733 ddi_put8(acc_hdl, &ieeesgechain->NextChainOffset,
4734 (sgemax * sizeof (MPI2_IEEE_SGE_SIMPLE64)) >> 4);
4735 } else {
4736 ddi_put8(acc_hdl, &ieeesgechain->NextChainOffset, 0);
4737 }
4738
4739 /*
4740 * Jump to next frame;
4741 * Starting here, chain buffers go into the per command SGL.
4742 * This buffer is allocated when chain buffers are needed.
4743 */
4744 ieeesge = (pMpi2IeeeSgeSimple64_t)p->m_frames_addr;
4745 i = cookiec;
4746
4747 /*
4748 * Start filling in frames with SGE's. If we
4749 * reach the end of frame and still have SGE's
4750 * to fill we need to add a chain element and
4751 * use another frame. j will be our counter
4752 * for what cookie we are at and i will be
4753 * the total cookiec. k is the current frame
4754 */
4755 for (k = 1; k <= frames; k++) {
4756 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4757
4758 /*
4759 * If we have reached the end of frame
4760 * and we have more SGE's to fill in
4761 * we have to fill the final entry
4762 * with a chain element and then
4763 * continue to the next frame
4764 */
4765 if ((l == (sgemax + 1)) && (k != frames)) {
4766 ieeesgechain = (pMpi25IeeeSgeChain64_t)ieeesge;
4767 j--;
4768 chainflags =
4769 MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
4770 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
4771 ddi_put8(p->m_acc_hdl,
4772 &ieeesgechain->Flags, chainflags);
4773 /*
4774 * k is the frame counter and (k + 1)
4775 * is the number of the next frame.
4776 * Note that frames are in contiguous
4777 * memory space.
4778 */
4779 ddi_put32(p->m_acc_hdl,
4780 &ieeesgechain->Address.Low,
4781 (p->m_phys_addr +
4782 (mpt->m_req_frame_size * k)));
4783 ddi_put32(p->m_acc_hdl,
4784 &ieeesgechain->Address.High, 0);
4785
4786 /*
4787 * If there are more than 2 frames left
4788 * we have to next chain offset to
4789 * the location of the chain element
4790 * in the next frame and fill in the
4791 * length of the next chain
4792 */
4793 if ((frames - k) >= 2) {
4794 ddi_put8(p->m_acc_hdl,
4795 &ieeesgechain->NextChainOffset,
4796 (sgemax *
4797 sizeof (MPI2_IEEE_SGE_SIMPLE64))
4798 >> 4);
4799 ASSERT(mpt->m_req_frame_size >=
4800 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4801 ddi_put32(p->m_acc_hdl,
4802 &ieeesgechain->Length,
4803 mpt->m_req_frame_size /
4804 sizeof (MPI2_IEEE_SGE_SIMPLE64) *
4805 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4806 } else {
4807 /*
4808 * This is the last frame. Set
4809 * the NextChainOffset to 0 and
4810 * Length is the total size of
4811 * all remaining simple elements
4812 */
4813 ddi_put8(p->m_acc_hdl,
4814 &ieeesgechain->NextChainOffset,
4815 0);
4816 ddi_put32(p->m_acc_hdl,
4817 &ieeesgechain->Length,
4818 (cookiec - j) *
4819 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4820 }
4821
4822 /* Jump to the next frame */
4823 ieeesge = (pMpi2IeeeSgeSimple64_t)
4824 ((char *)p->m_frames_addr +
4825 (int)mpt->m_req_frame_size * k);
4826
4827 continue;
4828 }
4829
4830 ddi_put32(p->m_acc_hdl,
4831 &ieeesge->Address.Low,
4832 dmap->addr.address64.Low);
4833 ddi_put32(p->m_acc_hdl,
4834 &ieeesge->Address.High,
4835 dmap->addr.address64.High);
4836 ddi_put32(p->m_acc_hdl,
4837 &ieeesge->Length, dmap->count);
4838 flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
4839 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4840
4841 /*
4842 * If we are at the end of the frame and
4843 * there is another frame to fill in
4844 * do we need to do anything?
4845 * if ((l == sgemax) && (k != frames)) {
4846 * }
4847 */
4848
4849 /*
4850 * If this is the final cookie set end of list.
4851 */
4852 if (j == i) {
4853 flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
4854 }
4855
4856 ddi_put8(p->m_acc_hdl, &ieeesge->Flags, flags);
4857 dmap++;
4858 ieeesge++;
4859 }
4860 }
4861
4862 /*
4863 * Sync DMA with the chain buffers that were just created
4864 */
4865 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4866 }
4867
4868 static void
4869 mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control,
4870 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4871 {
4872 ASSERT(cmd->cmd_flags & CFLAG_DMAVALID);
4873
4874 NDBG1(("mptsas_sge_setup: cookiec=%d", cmd->cmd_cookiec));
4875
4876 /*
4877 * Set read/write bit in control.
4878 */
4879 if (cmd->cmd_flags & CFLAG_DMASEND) {
4880 *control |= MPI2_SCSIIO_CONTROL_WRITE;
4881 } else {
4882 *control |= MPI2_SCSIIO_CONTROL_READ;
4883 }
4884
4885 ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount);
4886
4887 /*
4888 * We have 4 cases here. First where we can fit all the
4889 * SG elements into the main frame, and the case
4890 * where we can't. The SG element is also different when using
4891 * MPI2.5 interface.
4892 * If we have more cookies than we can attach to a frame
4893 * we will need to use a chain element to point
4894 * a location of memory where the rest of the S/G
4895 * elements reside.
4896 */
4897 if (cmd->cmd_cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) {
4898 if (mpt->m_MPI25) {
4899 mptsas_ieee_sge_mainframe(cmd, frame, acc_hdl,
4900 cmd->cmd_cookiec,
4901 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
4902 } else {
4903 mptsas_sge_mainframe(cmd, frame, acc_hdl,
4904 cmd->cmd_cookiec,
4905 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
4906 | MPI2_SGE_FLAGS_END_OF_BUFFER
4907 | MPI2_SGE_FLAGS_END_OF_LIST) <<
4908 MPI2_SGE_FLAGS_SHIFT));
4909 }
4910 } else {
4911 if (mpt->m_MPI25) {
4912 mptsas_ieee_sge_chain(mpt, cmd, frame, acc_hdl);
4913 } else {
4914 mptsas_sge_chain(mpt, cmd, frame, acc_hdl);
4915 }
4916 }
4917 }
4918
4919 /*
4920 * Interrupt handling
4921 * Utility routine. Poll for status of a command sent to HBA
4922 * without interrupts (a FLAG_NOINTR command).
4923 */
4924 int
4925 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
4926 {
4927 int rval = TRUE;
4928
4929 NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
4930
4931 if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
4932 mptsas_restart_hba(mpt);
4933 }
4934
4935 /*
4936 * Wait, using drv_usecwait(), long enough for the command to
4937 * reasonably return from the target if the target isn't
4938 * "dead". A polled command may well be sent from scsi_poll, and
4939 * there are retries built in to scsi_poll if the transport
4940 * accepted the packet (TRAN_ACCEPT). scsi_poll waits 1 second
4941 * and retries the transport up to scsi_poll_busycnt times
4942 * (currently 60) if
4943 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
4944 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
4945 *
4946 * limit the waiting to avoid a hang in the event that the
4947 * cmd never gets started but we are still receiving interrupts
4948 */
4949 while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
4950 if (mptsas_wait_intr(mpt, polltime) == FALSE) {
4951 NDBG5(("mptsas_poll: command incomplete"));
4952 rval = FALSE;
4953 break;
4954 }
4955 }
4956
4957 if (rval == FALSE) {
4958
4959 /*
4960 * this isn't supposed to happen, the hba must be wedged
4961 * Mark this cmd as a timeout.
4962 */
4963 mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
4964 (STAT_TIMEOUT|STAT_ABORTED));
4965
4966 if (poll_cmd->cmd_queued == FALSE) {
4967
4968 NDBG5(("mptsas_poll: not on waitq"));
4969
4970 poll_cmd->cmd_pkt->pkt_state |=
4971 (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
4972 } else {
4973
4974 /* find and remove it from the waitq */
4975 NDBG5(("mptsas_poll: delete from waitq"));
4976 mptsas_waitq_delete(mpt, poll_cmd);
4977 }
4978
4979 }
4980 mptsas_fma_check(mpt, poll_cmd);
4981 NDBG5(("mptsas_poll: done"));
4982 return (rval);
4983 }
4984
4985 /*
4986 * Used for polling cmds and TM function
4987 */
4988 static int
4989 mptsas_wait_intr(mptsas_t *mpt, int polltime)
4990 {
4991 int cnt;
4992 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
4993 uint32_t int_mask;
4994
4995 NDBG5(("mptsas_wait_intr"));
4996
4997 mpt->m_polled_intr = 1;
4998
4999 /*
5000 * Get the current interrupt mask and disable interrupts. When
5001 * re-enabling ints, set mask to saved value.
5002 */
5003 int_mask = ddi_get32(mpt->m_datap, &mpt->m_reg->HostInterruptMask);
5004 MPTSAS_DISABLE_INTR(mpt);
5005
5006 /*
5007 * Keep polling for at least (polltime * 1000) seconds
5008 */
5009 for (cnt = 0; cnt < polltime; cnt++) {
5010 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5011 DDI_DMA_SYNC_FORCPU);
5012
5013 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5014 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5015
5016 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5017 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5018 ddi_get32(mpt->m_acc_post_queue_hdl,
5019 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5020 drv_usecwait(1000);
5021 continue;
5022 }
5023
5024 /*
5025 * The reply is valid, process it according to its
5026 * type.
5027 */
5028 mptsas_process_intr(mpt, reply_desc_union);
5029
5030 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5031 mpt->m_post_index = 0;
5032 }
5033
5034 /*
5035 * Update the global reply index
5036 */
5037 ddi_put32(mpt->m_datap,
5038 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5039 mpt->m_polled_intr = 0;
5040
5041 /*
5042 * Re-enable interrupts and quit.
5043 */
5044 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask,
5045 int_mask);
5046 return (TRUE);
5047
5048 }
5049
5050 /*
5051 * Clear polling flag, re-enable interrupts and quit.
5052 */
5053 mpt->m_polled_intr = 0;
5054 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask);
5055 return (FALSE);
5056 }
5057
5058 static void
5059 mptsas_handle_scsi_io_success(mptsas_t *mpt,
5060 pMpi2ReplyDescriptorsUnion_t reply_desc)
5061 {
5062 pMpi2SCSIIOSuccessReplyDescriptor_t scsi_io_success;
5063 uint16_t SMID;
5064 mptsas_slots_t *slots = mpt->m_active;
5065 mptsas_cmd_t *cmd = NULL;
5066 struct scsi_pkt *pkt;
5067
5068 ASSERT(mutex_owned(&mpt->m_mutex));
5069
5070 scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
5071 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &scsi_io_success->SMID);
5072
5073 /*
5074 * This is a success reply so just complete the IO. First, do a sanity
5075 * check on the SMID. The final slot is used for TM requests, which
5076 * would not come into this reply handler.
5077 */
5078 if ((SMID == 0) || (SMID > slots->m_n_normal)) {
5079 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
5080 SMID);
5081 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5082 return;
5083 }
5084
5085 cmd = slots->m_slot[SMID];
5086
5087 /*
5088 * print warning and return if the slot is empty
5089 */
5090 if (cmd == NULL) {
5091 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
5092 "in slot %d", SMID);
5093 return;
5094 }
5095
5096 pkt = CMD2PKT(cmd);
5097 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
5098 STATE_GOT_STATUS);
5099 if (cmd->cmd_flags & CFLAG_DMAVALID) {
5100 pkt->pkt_state |= STATE_XFERRED_DATA;
5101 }
5102 pkt->pkt_resid = 0;
5103
5104 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
5105 cmd->cmd_flags |= CFLAG_FINISHED;
5106 cv_broadcast(&mpt->m_passthru_cv);
5107 return;
5108 } else {
5109 mptsas_remove_cmd(mpt, cmd);
5110 }
5111
5112 if (cmd->cmd_flags & CFLAG_RETRY) {
5113 /*
5114 * The target returned QFULL or busy, do not add tihs
5115 * pkt to the doneq since the hba will retry
5116 * this cmd.
5117 *
5118 * The pkt has already been resubmitted in
5119 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5120 * Remove this cmd_flag here.
5121 */
5122 cmd->cmd_flags &= ~CFLAG_RETRY;
5123 } else {
5124 mptsas_doneq_add(mpt, cmd);
5125 }
5126 }
5127
5128 static void
5129 mptsas_handle_address_reply(mptsas_t *mpt,
5130 pMpi2ReplyDescriptorsUnion_t reply_desc)
5131 {
5132 pMpi2AddressReplyDescriptor_t address_reply;
5133 pMPI2DefaultReply_t reply;
5134 mptsas_fw_diagnostic_buffer_t *pBuffer;
5135 uint32_t reply_addr;
5136 uint16_t SMID, iocstatus;
5137 mptsas_slots_t *slots = mpt->m_active;
5138 mptsas_cmd_t *cmd = NULL;
5139 uint8_t function, buffer_type;
5140 m_replyh_arg_t *args;
5141 int reply_frame_no;
5142
5143 ASSERT(mutex_owned(&mpt->m_mutex));
5144
5145 address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
5146 reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
5147 &address_reply->ReplyFrameAddress);
5148 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID);
5149
5150 /*
5151 * If reply frame is not in the proper range we should ignore this
5152 * message and exit the interrupt handler.
5153 */
5154 if ((reply_addr < mpt->m_reply_frame_dma_addr) ||
5155 (reply_addr >= (mpt->m_reply_frame_dma_addr +
5156 (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
5157 ((reply_addr - mpt->m_reply_frame_dma_addr) %
5158 mpt->m_reply_frame_size != 0)) {
5159 mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
5160 "address 0x%x\n", reply_addr);
5161 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5162 return;
5163 }
5164
5165 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
5166 DDI_DMA_SYNC_FORCPU);
5167 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
5168 mpt->m_reply_frame_dma_addr));
5169 function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
5170
5171 NDBG31(("mptsas_handle_address_reply: function 0x%x, reply_addr=0x%x",
5172 function, reply_addr));
5173
5174 /*
5175 * don't get slot information and command for events since these values
5176 * don't exist
5177 */
5178 if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) &&
5179 (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) {
5180 /*
5181 * This could be a TM reply, which use the last allocated SMID,
5182 * so allow for that.
5183 */
5184 if ((SMID == 0) || (SMID > (slots->m_n_normal + 1))) {
5185 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of "
5186 "%d\n", SMID);
5187 ddi_fm_service_impact(mpt->m_dip,
5188 DDI_SERVICE_UNAFFECTED);
5189 return;
5190 }
5191
5192 cmd = slots->m_slot[SMID];
5193
5194 /*
5195 * print warning and return if the slot is empty
5196 */
5197 if (cmd == NULL) {
5198 mptsas_log(mpt, CE_WARN, "?NULL command for address "
5199 "reply in slot %d", SMID);
5200 return;
5201 }
5202 if ((cmd->cmd_flags &
5203 (CFLAG_PASSTHRU | CFLAG_CONFIG | CFLAG_FW_DIAG))) {
5204 cmd->cmd_rfm = reply_addr;
5205 cmd->cmd_flags |= CFLAG_FINISHED;
5206 cv_broadcast(&mpt->m_passthru_cv);
5207 cv_broadcast(&mpt->m_config_cv);
5208 cv_broadcast(&mpt->m_fw_diag_cv);
5209 return;
5210 } else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
5211 mptsas_remove_cmd(mpt, cmd);
5212 }
5213 NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
5214 }
5215 /*
5216 * Depending on the function, we need to handle
5217 * the reply frame (and cmd) differently.
5218 */
5219 switch (function) {
5220 case MPI2_FUNCTION_SCSI_IO_REQUEST:
5221 mptsas_check_scsi_io_error(mpt, (pMpi2SCSIIOReply_t)reply, cmd);
5222 break;
5223 case MPI2_FUNCTION_SCSI_TASK_MGMT:
5224 cmd->cmd_rfm = reply_addr;
5225 mptsas_check_task_mgt(mpt, (pMpi2SCSIManagementReply_t)reply,
5226 cmd);
5227 break;
5228 case MPI2_FUNCTION_FW_DOWNLOAD:
5229 cmd->cmd_flags |= CFLAG_FINISHED;
5230 cv_signal(&mpt->m_fw_cv);
5231 break;
5232 case MPI2_FUNCTION_EVENT_NOTIFICATION:
5233 reply_frame_no = (reply_addr - mpt->m_reply_frame_dma_addr) /
5234 mpt->m_reply_frame_size;
5235 args = &mpt->m_replyh_args[reply_frame_no];
5236 args->mpt = (void *)mpt;
5237 args->rfm = reply_addr;
5238
5239 /*
5240 * Record the event if its type is enabled in
5241 * this mpt instance by ioctl.
5242 */
5243 mptsas_record_event(args);
5244
5245 /*
5246 * Handle time critical events
5247 * NOT_RESPONDING/ADDED only now
5248 */
5249 if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
5250 /*
5251 * Would not return main process,
5252 * just let taskq resolve ack action
5253 * and ack would be sent in taskq thread
5254 */
5255 NDBG20(("send mptsas_handle_event_sync success"));
5256 }
5257
5258 if (mpt->m_in_reset) {
5259 NDBG20(("dropping event received during reset"));
5260 return;
5261 }
5262
5263 if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
5264 (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
5265 mptsas_log(mpt, CE_WARN, "No memory available"
5266 "for dispatch taskq");
5267 /*
5268 * Return the reply frame to the free queue.
5269 */
5270 ddi_put32(mpt->m_acc_free_queue_hdl,
5271 &((uint32_t *)(void *)
5272 mpt->m_free_queue)[mpt->m_free_index], reply_addr);
5273 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5274 DDI_DMA_SYNC_FORDEV);
5275 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5276 mpt->m_free_index = 0;
5277 }
5278
5279 ddi_put32(mpt->m_datap,
5280 &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
5281 }
5282 return;
5283 case MPI2_FUNCTION_DIAG_BUFFER_POST:
5284 /*
5285 * If SMID is 0, this implies that the reply is due to a
5286 * release function with a status that the buffer has been
5287 * released. Set the buffer flags accordingly.
5288 */
5289 if (SMID == 0) {
5290 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
5291 &reply->IOCStatus);
5292 buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl,
5293 &(((pMpi2DiagBufferPostReply_t)reply)->BufferType));
5294 if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) {
5295 pBuffer =
5296 &mpt->m_fw_diag_buffer_list[buffer_type];
5297 pBuffer->valid_data = TRUE;
5298 pBuffer->owned_by_firmware = FALSE;
5299 pBuffer->immediate = FALSE;
5300 }
5301 } else {
5302 /*
5303 * Normal handling of diag post reply with SMID.
5304 */
5305 cmd = slots->m_slot[SMID];
5306
5307 /*
5308 * print warning and return if the slot is empty
5309 */
5310 if (cmd == NULL) {
5311 mptsas_log(mpt, CE_WARN, "?NULL command for "
5312 "address reply in slot %d", SMID);
5313 return;
5314 }
5315 cmd->cmd_rfm = reply_addr;
5316 cmd->cmd_flags |= CFLAG_FINISHED;
5317 cv_broadcast(&mpt->m_fw_diag_cv);
5318 }
5319 return;
5320 default:
5321 mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
5322 break;
5323 }
5324
5325 /*
5326 * Return the reply frame to the free queue.
5327 */
5328 ddi_put32(mpt->m_acc_free_queue_hdl,
5329 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
5330 reply_addr);
5331 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5332 DDI_DMA_SYNC_FORDEV);
5333 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5334 mpt->m_free_index = 0;
5335 }
5336 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
5337 mpt->m_free_index);
5338
5339 if (cmd->cmd_flags & CFLAG_FW_CMD)
5340 return;
5341
5342 if (cmd->cmd_flags & CFLAG_RETRY) {
5343 /*
5344 * The target returned QFULL or busy, do not add this
5345 * pkt to the doneq since the hba will retry
5346 * this cmd.
5347 *
5348 * The pkt has already been resubmitted in
5349 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5350 * Remove this cmd_flag here.
5351 */
5352 cmd->cmd_flags &= ~CFLAG_RETRY;
5353 } else {
5354 mptsas_doneq_add(mpt, cmd);
5355 }
5356 }
5357
5358 static void
5359 mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
5360 mptsas_cmd_t *cmd)
5361 {
5362 uint8_t scsi_status, scsi_state;
5363 uint16_t ioc_status;
5364 uint32_t xferred, sensecount, responsedata, loginfo = 0;
5365 struct scsi_pkt *pkt;
5366 struct scsi_arq_status *arqstat;
5367 struct buf *bp;
5368 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5369 uint8_t *sensedata = NULL;
5370 uint64_t sas_wwn;
5371 uint8_t phy;
5372 char wwn_str[MPTSAS_WWN_STRLEN];
5373
5374 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
5375 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
5376 bp = cmd->cmd_ext_arq_buf;
5377 } else {
5378 bp = cmd->cmd_arq_buf;
5379 }
5380
5381 scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
5382 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5383 scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
5384 xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
5385 sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
5386 responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl,
5387 &reply->ResponseInfo);
5388
5389 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
5390 sas_wwn = ptgt->m_addr.mta_wwn;
5391 phy = ptgt->m_phynum;
5392 if (sas_wwn == 0) {
5393 (void) sprintf(wwn_str, "p%x", phy);
5394 } else {
5395 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
5396 }
5397 loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
5398 &reply->IOCLogInfo);
5399 mptsas_log(mpt, CE_NOTE,
5400 "?Log info 0x%x received for target %d %s.\n"
5401 "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5402 loginfo, Tgt(cmd), wwn_str, scsi_status, ioc_status,
5403 scsi_state);
5404 }
5405
5406 NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5407 scsi_status, ioc_status, scsi_state));
5408
5409 pkt = CMD2PKT(cmd);
5410 *(pkt->pkt_scbp) = scsi_status;
5411
5412 if (loginfo == 0x31170000) {
5413 /*
5414 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
5415 * 0x31170000 comes, that means the device missing delay
5416 * is in progressing, the command need retry later.
5417 */
5418 *(pkt->pkt_scbp) = STATUS_BUSY;
5419 return;
5420 }
5421
5422 if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
5423 ((ioc_status & MPI2_IOCSTATUS_MASK) ==
5424 MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
5425 pkt->pkt_reason = CMD_INCOMPLETE;
5426 pkt->pkt_state |= STATE_GOT_BUS;
5427 if (ptgt->m_reset_delay == 0) {
5428 mptsas_set_throttle(mpt, ptgt,
5429 DRAIN_THROTTLE);
5430 }
5431 return;
5432 }
5433
5434 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5435 responsedata &= 0x000000FF;
5436 if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
5437 mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n");
5438 pkt->pkt_reason = CMD_TLR_OFF;
5439 return;
5440 }
5441 }
5442
5443
5444 switch (scsi_status) {
5445 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5446 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5447 arqstat = (void*)(pkt->pkt_scbp);
5448 arqstat->sts_rqpkt_status = *((struct scsi_status *)
5449 (pkt->pkt_scbp));
5450 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
5451 STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
5452 if (cmd->cmd_flags & CFLAG_XARQ) {
5453 pkt->pkt_state |= STATE_XARQ_DONE;
5454 }
5455 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5456 pkt->pkt_state |= STATE_XFERRED_DATA;
5457 }
5458 arqstat->sts_rqpkt_reason = pkt->pkt_reason;
5459 arqstat->sts_rqpkt_state = pkt->pkt_state;
5460 arqstat->sts_rqpkt_state |= STATE_XFERRED_DATA;
5461 arqstat->sts_rqpkt_statistics = pkt->pkt_statistics;
5462 sensedata = (uint8_t *)&arqstat->sts_sensedata;
5463
5464 bcopy((uchar_t *)bp->b_un.b_addr, sensedata,
5465 ((cmd->cmd_rqslen >= sensecount) ? sensecount :
5466 cmd->cmd_rqslen));
5467 arqstat->sts_rqpkt_resid = (cmd->cmd_rqslen - sensecount);
5468 cmd->cmd_flags |= CFLAG_CMDARQ;
5469 /*
5470 * Set proper status for pkt if autosense was valid
5471 */
5472 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5473 struct scsi_status zero_status = { 0 };
5474 arqstat->sts_rqpkt_status = zero_status;
5475 }
5476
5477 /*
5478 * ASC=0x47 is parity error
5479 * ASC=0x48 is initiator detected error received
5480 */
5481 if ((scsi_sense_key(sensedata) == KEY_ABORTED_COMMAND) &&
5482 ((scsi_sense_asc(sensedata) == 0x47) ||
5483 (scsi_sense_asc(sensedata) == 0x48))) {
5484 mptsas_log(mpt, CE_NOTE, "Aborted_command!");
5485 }
5486
5487 /*
5488 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5489 * ASC/ASCQ=0x25/0x00 means invalid lun
5490 */
5491 if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
5492 (scsi_sense_asc(sensedata) == 0x3F) &&
5493 (scsi_sense_ascq(sensedata) == 0x0E)) ||
5494 ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
5495 (scsi_sense_asc(sensedata) == 0x25) &&
5496 (scsi_sense_ascq(sensedata) == 0x00))) {
5497 mptsas_topo_change_list_t *topo_node = NULL;
5498
5499 topo_node = kmem_zalloc(
5500 sizeof (mptsas_topo_change_list_t),
5501 KM_NOSLEEP);
5502 if (topo_node == NULL) {
5503 mptsas_log(mpt, CE_NOTE, "No memory"
5504 "resource for handle SAS dynamic"
5505 "reconfigure.\n");
5506 break;
5507 }
5508 topo_node->mpt = mpt;
5509 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
5510 topo_node->un.phymask = ptgt->m_addr.mta_phymask;
5511 topo_node->devhdl = ptgt->m_devhdl;
5512 topo_node->object = (void *)ptgt;
5513 topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5514
5515 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5516 mptsas_handle_dr,
5517 (void *)topo_node,
5518 DDI_NOSLEEP)) != DDI_SUCCESS) {
5519 kmem_free(topo_node,
5520 sizeof (mptsas_topo_change_list_t));
5521 mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5522 "for handle SAS dynamic reconfigure"
5523 "failed. \n");
5524 }
5525 }
5526 break;
5527 case MPI2_SCSI_STATUS_GOOD:
5528 switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5529 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5530 pkt->pkt_reason = CMD_DEV_GONE;
5531 pkt->pkt_state |= STATE_GOT_BUS;
5532 if (ptgt->m_reset_delay == 0) {
5533 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5534 }
5535 NDBG31(("lost disk for target%d, command:%x",
5536 Tgt(cmd), pkt->pkt_cdbp[0]));
5537 break;
5538 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5539 NDBG31(("data overrun: xferred=%d", xferred));
5540 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5541 pkt->pkt_reason = CMD_DATA_OVR;
5542 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5543 | STATE_SENT_CMD | STATE_GOT_STATUS
5544 | STATE_XFERRED_DATA);
5545 pkt->pkt_resid = 0;
5546 break;
5547 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5548 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5549 NDBG31(("data underrun: xferred=%d", xferred));
5550 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5551 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5552 | STATE_SENT_CMD | STATE_GOT_STATUS);
5553 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5554 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5555 pkt->pkt_state |= STATE_XFERRED_DATA;
5556 }
5557 break;
5558 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5559 if (cmd->cmd_active_expiration <= gethrtime()) {
5560 /*
5561 * When timeout requested, propagate
5562 * proper reason and statistics to
5563 * target drivers.
5564 */
5565 mptsas_set_pkt_reason(mpt, cmd, CMD_TIMEOUT,
5566 STAT_BUS_RESET | STAT_TIMEOUT);
5567 } else {
5568 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
5569 STAT_BUS_RESET);
5570 }
5571 break;
5572 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5573 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5574 mptsas_set_pkt_reason(mpt,
5575 cmd, CMD_RESET, STAT_DEV_RESET);
5576 break;
5577 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5578 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5579 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5580 mptsas_set_pkt_reason(mpt,
5581 cmd, CMD_TERMINATED, STAT_TERMINATED);
5582 break;
5583 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5584 case MPI2_IOCSTATUS_BUSY:
5585 /*
5586 * set throttles to drain
5587 */
5588 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
5589 ptgt = refhash_next(mpt->m_targets, ptgt)) {
5590 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5591 }
5592
5593 /*
5594 * retry command
5595 */
5596 cmd->cmd_flags |= CFLAG_RETRY;
5597 cmd->cmd_pkt_flags |= FLAG_HEAD;
5598
5599 (void) mptsas_accept_pkt(mpt, cmd);
5600 break;
5601 default:
5602 mptsas_log(mpt, CE_WARN,
5603 "unknown ioc_status = %x\n", ioc_status);
5604 mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5605 "count = %x, scsi_status = %x", scsi_state,
5606 xferred, scsi_status);
5607 break;
5608 }
5609 break;
5610 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5611 mptsas_handle_qfull(mpt, cmd);
5612 break;
5613 case MPI2_SCSI_STATUS_BUSY:
5614 NDBG31(("scsi_status busy received"));
5615 break;
5616 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5617 NDBG31(("scsi_status reservation conflict received"));
5618 break;
5619 default:
5620 mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x\n",
5621 scsi_status, ioc_status);
5622 mptsas_log(mpt, CE_WARN,
5623 "mptsas_process_intr: invalid scsi status\n");
5624 break;
5625 }
5626 }
5627
5628 static void
5629 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5630 mptsas_cmd_t *cmd)
5631 {
5632 uint8_t task_type;
5633 uint16_t ioc_status;
5634 uint32_t log_info;
5635 uint16_t dev_handle;
5636 struct scsi_pkt *pkt = CMD2PKT(cmd);
5637
5638 task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5639 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5640 log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5641 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5642
5643 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5644 mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5645 "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
5646 task_type, ioc_status, log_info, dev_handle);
5647 pkt->pkt_reason = CMD_INCOMPLETE;
5648 return;
5649 }
5650
5651 switch (task_type) {
5652 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5653 case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5654 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5655 case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5656 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5657 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5658 break;
5659 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5660 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5661 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5662 /*
5663 * Check for invalid DevHandle of 0 in case application
5664 * sends bad command. DevHandle of 0 could cause problems.
5665 */
5666 if (dev_handle == 0) {
5667 mptsas_log(mpt, CE_WARN, "!Can't flush target with"
5668 " DevHandle of 0.");
5669 } else {
5670 mptsas_flush_target(mpt, dev_handle, Lun(cmd),
5671 task_type);
5672 }
5673 break;
5674 default:
5675 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5676 task_type);
5677 mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5678 break;
5679 }
5680 }
5681
5682 static void
5683 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5684 {
5685 mptsas_t *mpt = arg->mpt;
5686 uint64_t t = arg->t;
5687 mptsas_cmd_t *cmd;
5688 struct scsi_pkt *pkt;
5689 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
5690
5691 mutex_enter(&item->mutex);
5692 while (item->flag & MPTSAS_DONEQ_THREAD_ACTIVE) {
5693 if (!item->doneq) {
5694 cv_wait(&item->cv, &item->mutex);
5695 }
5696 pkt = NULL;
5697 if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5698 cmd->cmd_flags |= CFLAG_COMPLETED;
5699 pkt = CMD2PKT(cmd);
5700 }
5701 mutex_exit(&item->mutex);
5702 if (pkt) {
5703 mptsas_pkt_comp(pkt, cmd);
5704 }
5705 mutex_enter(&item->mutex);
5706 }
5707 mutex_exit(&item->mutex);
5708 mutex_enter(&mpt->m_doneq_mutex);
5709 mpt->m_doneq_thread_n--;
5710 cv_broadcast(&mpt->m_doneq_thread_cv);
5711 mutex_exit(&mpt->m_doneq_mutex);
5712 }
5713
5714
5715 /*
5716 * mpt interrupt handler.
5717 */
5718 static uint_t
5719 mptsas_intr(caddr_t arg1, caddr_t arg2)
5720 {
5721 mptsas_t *mpt = (void *)arg1;
5722 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
5723 uchar_t did_reply = FALSE;
5724
5725 NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5726
5727 mutex_enter(&mpt->m_mutex);
5728
5729 /*
5730 * If interrupts are shared by two channels then check whether this
5731 * interrupt is genuinely for this channel by making sure first the
5732 * chip is in high power state.
5733 */
5734 if ((mpt->m_options & MPTSAS_OPT_PM) &&
5735 (mpt->m_power_level != PM_LEVEL_D0)) {
5736 mutex_exit(&mpt->m_mutex);
5737 return (DDI_INTR_UNCLAIMED);
5738 }
5739
5740 /*
5741 * If polling, interrupt was triggered by some shared interrupt because
5742 * IOC interrupts are disabled during polling, so polling routine will
5743 * handle any replies. Considering this, if polling is happening,
5744 * return with interrupt unclaimed.
5745 */
5746 if (mpt->m_polled_intr) {
5747 mutex_exit(&mpt->m_mutex);
5748 mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt");
5749 return (DDI_INTR_UNCLAIMED);
5750 }
5751
5752 /*
5753 * Read the istat register.
5754 */
5755 if ((INTPENDING(mpt)) != 0) {
5756 /*
5757 * read fifo until empty.
5758 */
5759 #ifndef __lock_lint
5760 _NOTE(CONSTCOND)
5761 #endif
5762 while (TRUE) {
5763 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5764 DDI_DMA_SYNC_FORCPU);
5765 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5766 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5767
5768 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5769 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5770 ddi_get32(mpt->m_acc_post_queue_hdl,
5771 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5772 break;
5773 }
5774
5775 /*
5776 * The reply is valid, process it according to its
5777 * type. Also, set a flag for updating the reply index
5778 * after they've all been processed.
5779 */
5780 did_reply = TRUE;
5781
5782 mptsas_process_intr(mpt, reply_desc_union);
5783
5784 /*
5785 * Increment post index and roll over if needed.
5786 */
5787 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5788 mpt->m_post_index = 0;
5789 }
5790 }
5791
5792 /*
5793 * Update the global reply index if at least one reply was
5794 * processed.
5795 */
5796 if (did_reply) {
5797 ddi_put32(mpt->m_datap,
5798 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5799 }
5800 } else {
5801 mutex_exit(&mpt->m_mutex);
5802 return (DDI_INTR_UNCLAIMED);
5803 }
5804 NDBG1(("mptsas_intr complete"));
5805
5806 /*
5807 * If no helper threads are created, process the doneq in ISR. If
5808 * helpers are created, use the doneq length as a metric to measure the
5809 * load on the interrupt CPU. If it is long enough, which indicates the
5810 * load is heavy, then we deliver the IO completions to the helpers.
5811 * This measurement has some limitations, although it is simple and
5812 * straightforward and works well for most of the cases at present.
5813 */
5814 if (!mpt->m_doneq_thread_n ||
5815 (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)) {
5816 mptsas_doneq_empty(mpt);
5817 } else {
5818 mptsas_deliver_doneq_thread(mpt);
5819 }
5820
5821 /*
5822 * If there are queued cmd, start them now.
5823 */
5824 if (mpt->m_waitq != NULL) {
5825 mptsas_restart_waitq(mpt);
5826 }
5827
5828 mutex_exit(&mpt->m_mutex);
5829 return (DDI_INTR_CLAIMED);
5830 }
5831
5832 static void
5833 mptsas_process_intr(mptsas_t *mpt,
5834 pMpi2ReplyDescriptorsUnion_t reply_desc_union)
5835 {
5836 uint8_t reply_type;
5837
5838 ASSERT(mutex_owned(&mpt->m_mutex));
5839
5840 /*
5841 * The reply is valid, process it according to its
5842 * type. Also, set a flag for updated the reply index
5843 * after they've all been processed.
5844 */
5845 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
5846 &reply_desc_union->Default.ReplyFlags);
5847 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
5848 if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
5849 reply_type == MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS) {
5850 mptsas_handle_scsi_io_success(mpt, reply_desc_union);
5851 } else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
5852 mptsas_handle_address_reply(mpt, reply_desc_union);
5853 } else {
5854 mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
5855 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5856 }
5857
5858 /*
5859 * Clear the reply descriptor for re-use and increment
5860 * index.
5861 */
5862 ddi_put64(mpt->m_acc_post_queue_hdl,
5863 &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
5864 0xFFFFFFFFFFFFFFFF);
5865 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5866 DDI_DMA_SYNC_FORDEV);
5867 }
5868
5869 /*
5870 * handle qfull condition
5871 */
5872 static void
5873 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
5874 {
5875 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5876
5877 if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
5878 (ptgt->m_qfull_retries == 0)) {
5879 /*
5880 * We have exhausted the retries on QFULL, or,
5881 * the target driver has indicated that it
5882 * wants to handle QFULL itself by setting
5883 * qfull-retries capability to 0. In either case
5884 * we want the target driver's QFULL handling
5885 * to kick in. We do this by having pkt_reason
5886 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
5887 */
5888 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5889 } else {
5890 if (ptgt->m_reset_delay == 0) {
5891 ptgt->m_t_throttle =
5892 max((ptgt->m_t_ncmds - 2), 0);
5893 }
5894
5895 cmd->cmd_pkt_flags |= FLAG_HEAD;
5896 cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
5897 cmd->cmd_flags |= CFLAG_RETRY;
5898
5899 (void) mptsas_accept_pkt(mpt, cmd);
5900
5901 /*
5902 * when target gives queue full status with no commands
5903 * outstanding (m_t_ncmds == 0), throttle is set to 0
5904 * (HOLD_THROTTLE), and the queue full handling start
5905 * (see psarc/1994/313); if there are commands outstanding,
5906 * throttle is set to (m_t_ncmds - 2)
5907 */
5908 if (ptgt->m_t_throttle == HOLD_THROTTLE) {
5909 /*
5910 * By setting throttle to QFULL_THROTTLE, we
5911 * avoid submitting new commands and in
5912 * mptsas_restart_cmd find out slots which need
5913 * their throttles to be cleared.
5914 */
5915 mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
5916 if (mpt->m_restart_cmd_timeid == 0) {
5917 mpt->m_restart_cmd_timeid =
5918 timeout(mptsas_restart_cmd, mpt,
5919 ptgt->m_qfull_retry_interval);
5920 }
5921 }
5922 }
5923 }
5924
5925 mptsas_phymask_t
5926 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
5927 {
5928 mptsas_phymask_t phy_mask = 0;
5929 uint8_t i = 0;
5930
5931 NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
5932
5933 ASSERT(mutex_owned(&mpt->m_mutex));
5934
5935 /*
5936 * If physport is 0xFF, this is a RAID volume. Use phymask of 0.
5937 */
5938 if (physport == 0xFF) {
5939 return (0);
5940 }
5941
5942 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
5943 if (mpt->m_phy_info[i].attached_devhdl &&
5944 (mpt->m_phy_info[i].phy_mask != 0) &&
5945 (mpt->m_phy_info[i].port_num == physport)) {
5946 phy_mask = mpt->m_phy_info[i].phy_mask;
5947 break;
5948 }
5949 }
5950 NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
5951 mpt->m_instance, physport, phy_mask));
5952 return (phy_mask);
5953 }
5954
5955 /*
5956 * mpt free device handle after device gone, by use of passthrough
5957 */
5958 static int
5959 mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl)
5960 {
5961 Mpi2SasIoUnitControlRequest_t req;
5962 Mpi2SasIoUnitControlReply_t rep;
5963 int ret;
5964
5965 ASSERT(mutex_owned(&mpt->m_mutex));
5966
5967 /*
5968 * Need to compose a SAS IO Unit Control request message
5969 * and call mptsas_do_passthru() function
5970 */
5971 bzero(&req, sizeof (req));
5972 bzero(&rep, sizeof (rep));
5973
5974 req.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
5975 req.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
5976 req.DevHandle = LE_16(devhdl);
5977
5978 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
5979 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
5980 if (ret != 0) {
5981 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5982 "Control error %d", ret);
5983 return (DDI_FAILURE);
5984 }
5985
5986 /* do passthrough success, check the ioc status */
5987 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
5988 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5989 "Control IOCStatus %d", LE_16(rep.IOCStatus));
5990 return (DDI_FAILURE);
5991 }
5992
5993 return (DDI_SUCCESS);
5994 }
5995
5996 static void
5997 mptsas_update_phymask(mptsas_t *mpt)
5998 {
5999 mptsas_phymask_t mask = 0, phy_mask;
6000 char *phy_mask_name;
6001 uint8_t current_port;
6002 int i, j;
6003
6004 NDBG20(("mptsas%d update phymask ", mpt->m_instance));
6005
6006 ASSERT(mutex_owned(&mpt->m_mutex));
6007
6008 (void) mptsas_get_sas_io_unit_page(mpt);
6009
6010 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6011
6012 for (i = 0; i < mpt->m_num_phys; i++) {
6013 phy_mask = 0x00;
6014
6015 if (mpt->m_phy_info[i].attached_devhdl == 0)
6016 continue;
6017
6018 bzero(phy_mask_name, sizeof (phy_mask_name));
6019
6020 current_port = mpt->m_phy_info[i].port_num;
6021
6022 if ((mask & (1 << i)) != 0)
6023 continue;
6024
6025 for (j = 0; j < mpt->m_num_phys; j++) {
6026 if (mpt->m_phy_info[j].attached_devhdl &&
6027 (mpt->m_phy_info[j].port_num == current_port)) {
6028 phy_mask |= (1 << j);
6029 }
6030 }
6031 mask = mask | phy_mask;
6032
6033 for (j = 0; j < mpt->m_num_phys; j++) {
6034 if ((phy_mask >> j) & 0x01) {
6035 mpt->m_phy_info[j].phy_mask = phy_mask;
6036 }
6037 }
6038
6039 (void) sprintf(phy_mask_name, "%x", phy_mask);
6040
6041 mutex_exit(&mpt->m_mutex);
6042 /*
6043 * register a iport, if the port has already been existed
6044 * SCSA will do nothing and just return.
6045 */
6046 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
6047 mutex_enter(&mpt->m_mutex);
6048 }
6049 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6050 NDBG20(("mptsas%d update phymask return", mpt->m_instance));
6051 }
6052
6053 /*
6054 * mptsas_handle_dr is a task handler for DR, the DR action includes:
6055 * 1. Directly attched Device Added/Removed.
6056 * 2. Expander Device Added/Removed.
6057 * 3. Indirectly Attached Device Added/Expander.
6058 * 4. LUNs of a existing device status change.
6059 * 5. RAID volume created/deleted.
6060 * 6. Member of RAID volume is released because of RAID deletion.
6061 * 7. Physical disks are removed because of RAID creation.
6062 */
6063 static void
6064 mptsas_handle_dr(void *args) {
6065 mptsas_topo_change_list_t *topo_node = NULL;
6066 mptsas_topo_change_list_t *save_node = NULL;
6067 mptsas_t *mpt;
6068 dev_info_t *parent = NULL;
6069 mptsas_phymask_t phymask = 0;
6070 char *phy_mask_name;
6071 uint8_t flags = 0, physport = 0xff;
6072 uint8_t port_update = 0;
6073 uint_t event;
6074
6075 topo_node = (mptsas_topo_change_list_t *)args;
6076
6077 mpt = topo_node->mpt;
6078 event = topo_node->event;
6079 flags = topo_node->flags;
6080
6081 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6082
6083 NDBG20(("mptsas%d handle_dr enter", mpt->m_instance));
6084
6085 switch (event) {
6086 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6087 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6088 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE) ||
6089 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6090 /*
6091 * Direct attached or expander attached device added
6092 * into system or a Phys Disk that is being unhidden.
6093 */
6094 port_update = 1;
6095 }
6096 break;
6097 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6098 /*
6099 * New expander added into system, it must be the head
6100 * of topo_change_list_t
6101 */
6102 port_update = 1;
6103 break;
6104 default:
6105 port_update = 0;
6106 break;
6107 }
6108 /*
6109 * All cases port_update == 1 may cause initiator port form change
6110 */
6111 mutex_enter(&mpt->m_mutex);
6112 if (mpt->m_port_chng && port_update) {
6113 /*
6114 * mpt->m_port_chng flag indicates some PHYs of initiator
6115 * port have changed to online. So when expander added or
6116 * directly attached device online event come, we force to
6117 * update port information by issueing SAS IO Unit Page and
6118 * update PHYMASKs.
6119 */
6120 (void) mptsas_update_phymask(mpt);
6121 mpt->m_port_chng = 0;
6122
6123 }
6124 mutex_exit(&mpt->m_mutex);
6125 while (topo_node) {
6126 phymask = 0;
6127 if (parent == NULL) {
6128 physport = topo_node->un.physport;
6129 event = topo_node->event;
6130 flags = topo_node->flags;
6131 if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET |
6132 MPTSAS_DR_EVENT_OFFLINE_SMP)) {
6133 /*
6134 * For all offline events, phymask is known
6135 */
6136 phymask = topo_node->un.phymask;
6137 goto find_parent;
6138 }
6139 if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6140 goto handle_topo_change;
6141 }
6142 if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) {
6143 phymask = topo_node->un.phymask;
6144 goto find_parent;
6145 }
6146
6147 if ((flags ==
6148 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) &&
6149 (event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) {
6150 /*
6151 * There is no any field in IR_CONFIG_CHANGE
6152 * event indicate physport/phynum, let's get
6153 * parent after SAS Device Page0 request.
6154 */
6155 goto handle_topo_change;
6156 }
6157
6158 mutex_enter(&mpt->m_mutex);
6159 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6160 /*
6161 * If the direct attached device added or a
6162 * phys disk is being unhidden, argument
6163 * physport actually is PHY#, so we have to get
6164 * phymask according PHY#.
6165 */
6166 physport = mpt->m_phy_info[physport].port_num;
6167 }
6168
6169 /*
6170 * Translate physport to phymask so that we can search
6171 * parent dip.
6172 */
6173 phymask = mptsas_physport_to_phymask(mpt,
6174 physport);
6175 mutex_exit(&mpt->m_mutex);
6176
6177 find_parent:
6178 bzero(phy_mask_name, MPTSAS_MAX_PHYS);
6179 /*
6180 * For RAID topology change node, write the iport name
6181 * as v0.
6182 */
6183 if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6184 (void) sprintf(phy_mask_name, "v0");
6185 } else {
6186 /*
6187 * phymask can bo 0 if the drive has been
6188 * pulled by the time an add event is
6189 * processed. If phymask is 0, just skip this
6190 * event and continue.
6191 */
6192 if (phymask == 0) {
6193 mutex_enter(&mpt->m_mutex);
6194 save_node = topo_node;
6195 topo_node = topo_node->next;
6196 ASSERT(save_node);
6197 kmem_free(save_node,
6198 sizeof (mptsas_topo_change_list_t));
6199 mutex_exit(&mpt->m_mutex);
6200
6201 parent = NULL;
6202 continue;
6203 }
6204 (void) sprintf(phy_mask_name, "%x", phymask);
6205 }
6206 parent = scsi_hba_iport_find(mpt->m_dip,
6207 phy_mask_name);
6208 if (parent == NULL) {
6209 mptsas_log(mpt, CE_WARN, "Failed to find an "
6210 "iport, should not happen!");
6211 goto out;
6212 }
6213
6214 }
6215 ASSERT(parent);
6216 handle_topo_change:
6217
6218 mutex_enter(&mpt->m_mutex);
6219 /*
6220 * If HBA is being reset, don't perform operations depending
6221 * on the IOC. We must free the topo list, however.
6222 */
6223 if (!mpt->m_in_reset)
6224 mptsas_handle_topo_change(topo_node, parent);
6225 else
6226 NDBG20(("skipping topo change received during reset"));
6227 save_node = topo_node;
6228 topo_node = topo_node->next;
6229 ASSERT(save_node);
6230 kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
6231 mutex_exit(&mpt->m_mutex);
6232
6233 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6234 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
6235 (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
6236 /*
6237 * If direct attached device associated, make sure
6238 * reset the parent before start the next one. But
6239 * all devices associated with expander shares the
6240 * parent. Also, reset parent if this is for RAID.
6241 */
6242 parent = NULL;
6243 }
6244 }
6245 out:
6246 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6247 }
6248
6249 static void
6250 mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
6251 dev_info_t *parent)
6252 {
6253 mptsas_target_t *ptgt = NULL;
6254 mptsas_smp_t *psmp = NULL;
6255 mptsas_t *mpt = (void *)topo_node->mpt;
6256 uint16_t devhdl;
6257 uint16_t attached_devhdl;
6258 uint64_t sas_wwn = 0;
6259 int rval = 0;
6260 uint32_t page_address;
6261 uint8_t phy, flags;
6262 char *addr = NULL;
6263 dev_info_t *lundip;
6264 int circ = 0, circ1 = 0;
6265 char attached_wwnstr[MPTSAS_WWN_STRLEN];
6266
6267 NDBG20(("mptsas%d handle_topo_change enter, devhdl 0x%x,"
6268 "event 0x%x, flags 0x%x", mpt->m_instance, topo_node->devhdl,
6269 topo_node->event, topo_node->flags));
6270
6271 ASSERT(mutex_owned(&mpt->m_mutex));
6272
6273 switch (topo_node->event) {
6274 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6275 {
6276 char *phy_mask_name;
6277 mptsas_phymask_t phymask = 0;
6278
6279 if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6280 /*
6281 * Get latest RAID info.
6282 */
6283 (void) mptsas_get_raid_info(mpt);
6284 ptgt = refhash_linear_search(mpt->m_targets,
6285 mptsas_target_eval_devhdl, &topo_node->devhdl);
6286 if (ptgt == NULL)
6287 break;
6288 } else {
6289 ptgt = (void *)topo_node->object;
6290 }
6291
6292 if (ptgt == NULL) {
6293 /*
6294 * If a Phys Disk was deleted, RAID info needs to be
6295 * updated to reflect the new topology.
6296 */
6297 (void) mptsas_get_raid_info(mpt);
6298
6299 /*
6300 * Get sas device page 0 by DevHandle to make sure if
6301 * SSP/SATA end device exist.
6302 */
6303 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
6304 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
6305 topo_node->devhdl;
6306
6307 rval = mptsas_get_target_device_info(mpt, page_address,
6308 &devhdl, &ptgt);
6309 if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
6310 mptsas_log(mpt, CE_NOTE,
6311 "mptsas_handle_topo_change: target %d is "
6312 "not a SAS/SATA device. \n",
6313 topo_node->devhdl);
6314 } else if (rval == DEV_INFO_FAIL_ALLOC) {
6315 mptsas_log(mpt, CE_NOTE,
6316 "mptsas_handle_topo_change: could not "
6317 "allocate memory. \n");
6318 }
6319 /*
6320 * If rval is DEV_INFO_PHYS_DISK than there is nothing
6321 * else to do, just leave.
6322 */
6323 if (rval != DEV_INFO_SUCCESS) {
6324 return;
6325 }
6326 }
6327
6328 ASSERT(ptgt->m_devhdl == topo_node->devhdl);
6329
6330 mutex_exit(&mpt->m_mutex);
6331 flags = topo_node->flags;
6332
6333 if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
6334 phymask = ptgt->m_addr.mta_phymask;
6335 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6336 (void) sprintf(phy_mask_name, "%x", phymask);
6337 parent = scsi_hba_iport_find(mpt->m_dip,
6338 phy_mask_name);
6339 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6340 if (parent == NULL) {
6341 mptsas_log(mpt, CE_WARN, "Failed to find a "
6342 "iport for PD, should not happen!");
6343 mutex_enter(&mpt->m_mutex);
6344 break;
6345 }
6346 }
6347
6348 if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6349 ndi_devi_enter(parent, &circ1);
6350 (void) mptsas_config_raid(parent, topo_node->devhdl,
6351 &lundip);
6352 ndi_devi_exit(parent, circ1);
6353 } else {
6354 /*
6355 * hold nexus for bus configure
6356 */
6357 ndi_devi_enter(scsi_vhci_dip, &circ);
6358 ndi_devi_enter(parent, &circ1);
6359 rval = mptsas_config_target(parent, ptgt);
6360 /*
6361 * release nexus for bus configure
6362 */
6363 ndi_devi_exit(parent, circ1);
6364 ndi_devi_exit(scsi_vhci_dip, circ);
6365
6366 /*
6367 * Add parent's props for SMHBA support
6368 */
6369 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6370 bzero(attached_wwnstr,
6371 sizeof (attached_wwnstr));
6372 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
6373 ptgt->m_addr.mta_wwn);
6374 if (ddi_prop_update_string(DDI_DEV_T_NONE,
6375 parent,
6376 SCSI_ADDR_PROP_ATTACHED_PORT,
6377 attached_wwnstr)
6378 != DDI_PROP_SUCCESS) {
6379 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6380 parent,
6381 SCSI_ADDR_PROP_ATTACHED_PORT);
6382 mptsas_log(mpt, CE_WARN, "Failed to"
6383 "attached-port props");
6384 return;
6385 }
6386 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6387 MPTSAS_NUM_PHYS, 1) !=
6388 DDI_PROP_SUCCESS) {
6389 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6390 parent, MPTSAS_NUM_PHYS);
6391 mptsas_log(mpt, CE_WARN, "Failed to"
6392 " create num-phys props");
6393 return;
6394 }
6395
6396 /*
6397 * Update PHY info for smhba
6398 */
6399 mutex_enter(&mpt->m_mutex);
6400 if (mptsas_smhba_phy_init(mpt)) {
6401 mutex_exit(&mpt->m_mutex);
6402 mptsas_log(mpt, CE_WARN, "mptsas phy"
6403 " update failed");
6404 return;
6405 }
6406 mutex_exit(&mpt->m_mutex);
6407
6408 /*
6409 * topo_node->un.physport is really the PHY#
6410 * for direct attached devices
6411 */
6412 mptsas_smhba_set_one_phy_props(mpt, parent,
6413 topo_node->un.physport, &attached_devhdl);
6414
6415 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6416 MPTSAS_VIRTUAL_PORT, 0) !=
6417 DDI_PROP_SUCCESS) {
6418 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6419 parent, MPTSAS_VIRTUAL_PORT);
6420 mptsas_log(mpt, CE_WARN,
6421 "mptsas virtual-port"
6422 "port prop update failed");
6423 return;
6424 }
6425 }
6426 }
6427 mutex_enter(&mpt->m_mutex);
6428
6429 NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
6430 "phymask:%x.", mpt->m_instance, ptgt->m_devhdl,
6431 ptgt->m_addr.mta_phymask));
6432 break;
6433 }
6434 case MPTSAS_DR_EVENT_OFFLINE_TARGET:
6435 {
6436 devhdl = topo_node->devhdl;
6437 ptgt = refhash_linear_search(mpt->m_targets,
6438 mptsas_target_eval_devhdl, &devhdl);
6439 if (ptgt == NULL)
6440 break;
6441
6442 sas_wwn = ptgt->m_addr.mta_wwn;
6443 phy = ptgt->m_phynum;
6444
6445 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
6446
6447 if (sas_wwn) {
6448 (void) sprintf(addr, "w%016"PRIx64, sas_wwn);
6449 } else {
6450 (void) sprintf(addr, "p%x", phy);
6451 }
6452 ASSERT(ptgt->m_devhdl == devhdl);
6453
6454 if ((topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) ||
6455 (topo_node->flags ==
6456 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6457 /*
6458 * Get latest RAID info if RAID volume status changes
6459 * or Phys Disk status changes
6460 */
6461 (void) mptsas_get_raid_info(mpt);
6462 }
6463 /*
6464 * Abort all outstanding command on the device
6465 */
6466 rval = mptsas_do_scsi_reset(mpt, devhdl);
6467 if (rval) {
6468 NDBG20(("mptsas%d handle_topo_change to reset target "
6469 "before offline devhdl:%x, phymask:%x, rval:%x",
6470 mpt->m_instance, ptgt->m_devhdl,
6471 ptgt->m_addr.mta_phymask, rval));
6472 }
6473
6474 mutex_exit(&mpt->m_mutex);
6475
6476 ndi_devi_enter(scsi_vhci_dip, &circ);
6477 ndi_devi_enter(parent, &circ1);
6478 rval = mptsas_offline_target(parent, addr);
6479 ndi_devi_exit(parent, circ1);
6480 ndi_devi_exit(scsi_vhci_dip, circ);
6481 NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
6482 "phymask:%x, rval:%x", mpt->m_instance,
6483 ptgt->m_devhdl, ptgt->m_addr.mta_phymask, rval));
6484
6485 kmem_free(addr, SCSI_MAXNAMELEN);
6486
6487 /*
6488 * Clear parent's props for SMHBA support
6489 */
6490 flags = topo_node->flags;
6491 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6492 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6493 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6494 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6495 DDI_PROP_SUCCESS) {
6496 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6497 SCSI_ADDR_PROP_ATTACHED_PORT);
6498 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6499 "prop update failed");
6500 break;
6501 }
6502 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6503 MPTSAS_NUM_PHYS, 0) !=
6504 DDI_PROP_SUCCESS) {
6505 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6506 MPTSAS_NUM_PHYS);
6507 mptsas_log(mpt, CE_WARN, "mptsas num phys "
6508 "prop update failed");
6509 break;
6510 }
6511 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6512 MPTSAS_VIRTUAL_PORT, 1) !=
6513 DDI_PROP_SUCCESS) {
6514 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6515 MPTSAS_VIRTUAL_PORT);
6516 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6517 "prop update failed");
6518 break;
6519 }
6520 }
6521
6522 mutex_enter(&mpt->m_mutex);
6523 ptgt->m_led_status = 0;
6524 (void) mptsas_flush_led_status(mpt, ptgt);
6525 if (rval == DDI_SUCCESS) {
6526 refhash_remove(mpt->m_targets, ptgt);
6527 ptgt = NULL;
6528 } else {
6529 /*
6530 * clean DR_INTRANSITION flag to allow I/O down to
6531 * PHCI driver since failover finished.
6532 * Invalidate the devhdl
6533 */
6534 ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6535 ptgt->m_tgt_unconfigured = 0;
6536 mutex_enter(&mpt->m_tx_waitq_mutex);
6537 ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6538 mutex_exit(&mpt->m_tx_waitq_mutex);
6539 }
6540
6541 /*
6542 * Send SAS IO Unit Control to free the dev handle
6543 */
6544 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6545 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
6546 rval = mptsas_free_devhdl(mpt, devhdl);
6547
6548 NDBG20(("mptsas%d handle_topo_change to remove "
6549 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6550 rval));
6551 }
6552
6553 break;
6554 }
6555 case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
6556 {
6557 devhdl = topo_node->devhdl;
6558 /*
6559 * If this is the remove handle event, do a reset first.
6560 */
6561 if (topo_node->event == MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6562 rval = mptsas_do_scsi_reset(mpt, devhdl);
6563 if (rval) {
6564 NDBG20(("mpt%d reset target before remove "
6565 "devhdl:%x, rval:%x", mpt->m_instance,
6566 devhdl, rval));
6567 }
6568 }
6569
6570 /*
6571 * Send SAS IO Unit Control to free the dev handle
6572 */
6573 rval = mptsas_free_devhdl(mpt, devhdl);
6574 NDBG20(("mptsas%d handle_topo_change to remove "
6575 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6576 rval));
6577 break;
6578 }
6579 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6580 {
6581 mptsas_smp_t smp;
6582 dev_info_t *smpdip;
6583
6584 devhdl = topo_node->devhdl;
6585
6586 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
6587 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)devhdl;
6588 rval = mptsas_get_sas_expander_page0(mpt, page_address, &smp);
6589 if (rval != DDI_SUCCESS) {
6590 mptsas_log(mpt, CE_WARN, "failed to online smp, "
6591 "handle %x", devhdl);
6592 return;
6593 }
6594
6595 psmp = mptsas_smp_alloc(mpt, &smp);
6596 if (psmp == NULL) {
6597 return;
6598 }
6599
6600 mutex_exit(&mpt->m_mutex);
6601 ndi_devi_enter(parent, &circ1);
6602 (void) mptsas_online_smp(parent, psmp, &smpdip);
6603 ndi_devi_exit(parent, circ1);
6604
6605 mutex_enter(&mpt->m_mutex);
6606 break;
6607 }
6608 case MPTSAS_DR_EVENT_OFFLINE_SMP:
6609 {
6610 devhdl = topo_node->devhdl;
6611 uint32_t dev_info;
6612
6613 psmp = refhash_linear_search(mpt->m_smp_targets,
6614 mptsas_smp_eval_devhdl, &devhdl);
6615 if (psmp == NULL)
6616 break;
6617 /*
6618 * The mptsas_smp_t data is released only if the dip is offlined
6619 * successfully.
6620 */
6621 mutex_exit(&mpt->m_mutex);
6622
6623 ndi_devi_enter(parent, &circ1);
6624 rval = mptsas_offline_smp(parent, psmp, NDI_DEVI_REMOVE);
6625 ndi_devi_exit(parent, circ1);
6626
6627 dev_info = psmp->m_deviceinfo;
6628 if ((dev_info & DEVINFO_DIRECT_ATTACHED) ==
6629 DEVINFO_DIRECT_ATTACHED) {
6630 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6631 MPTSAS_VIRTUAL_PORT, 1) !=
6632 DDI_PROP_SUCCESS) {
6633 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6634 MPTSAS_VIRTUAL_PORT);
6635 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6636 "prop update failed");
6637 return;
6638 }
6639 /*
6640 * Check whether the smp connected to the iport,
6641 */
6642 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6643 MPTSAS_NUM_PHYS, 0) !=
6644 DDI_PROP_SUCCESS) {
6645 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6646 MPTSAS_NUM_PHYS);
6647 mptsas_log(mpt, CE_WARN, "mptsas num phys"
6648 "prop update failed");
6649 return;
6650 }
6651 /*
6652 * Clear parent's attached-port props
6653 */
6654 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6655 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6656 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6657 DDI_PROP_SUCCESS) {
6658 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6659 SCSI_ADDR_PROP_ATTACHED_PORT);
6660 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6661 "prop update failed");
6662 return;
6663 }
6664 }
6665
6666 mutex_enter(&mpt->m_mutex);
6667 NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
6668 "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
6669 if (rval == DDI_SUCCESS) {
6670 refhash_remove(mpt->m_smp_targets, psmp);
6671 } else {
6672 psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
6673 }
6674
6675 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6676
6677 break;
6678 }
6679 default:
6680 return;
6681 }
6682 }
6683
6684 /*
6685 * Record the event if its type is enabled in mpt instance by ioctl.
6686 */
6687 static void
6688 mptsas_record_event(void *args)
6689 {
6690 m_replyh_arg_t *replyh_arg;
6691 pMpi2EventNotificationReply_t eventreply;
6692 uint32_t event, rfm;
6693 mptsas_t *mpt;
6694 int i, j;
6695 uint16_t event_data_len;
6696 boolean_t sendAEN = FALSE;
6697
6698 replyh_arg = (m_replyh_arg_t *)args;
6699 rfm = replyh_arg->rfm;
6700 mpt = replyh_arg->mpt;
6701
6702 eventreply = (pMpi2EventNotificationReply_t)
6703 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6704 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6705
6706
6707 /*
6708 * Generate a system event to let anyone who cares know that a
6709 * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the
6710 * event mask is set to.
6711 */
6712 if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
6713 sendAEN = TRUE;
6714 }
6715
6716 /*
6717 * Record the event only if it is not masked. Determine which dword
6718 * and bit of event mask to test.
6719 */
6720 i = (uint8_t)(event / 32);
6721 j = (uint8_t)(event % 32);
6722 if ((i < 4) && ((1 << j) & mpt->m_event_mask[i])) {
6723 i = mpt->m_event_index;
6724 mpt->m_events[i].Type = event;
6725 mpt->m_events[i].Number = ++mpt->m_event_number;
6726 bzero(mpt->m_events[i].Data, MPTSAS_MAX_EVENT_DATA_LENGTH * 4);
6727 event_data_len = ddi_get16(mpt->m_acc_reply_frame_hdl,
6728 &eventreply->EventDataLength);
6729
6730 if (event_data_len > 0) {
6731 /*
6732 * Limit data to size in m_event entry
6733 */
6734 if (event_data_len > MPTSAS_MAX_EVENT_DATA_LENGTH) {
6735 event_data_len = MPTSAS_MAX_EVENT_DATA_LENGTH;
6736 }
6737 for (j = 0; j < event_data_len; j++) {
6738 mpt->m_events[i].Data[j] =
6739 ddi_get32(mpt->m_acc_reply_frame_hdl,
6740 &(eventreply->EventData[j]));
6741 }
6742
6743 /*
6744 * check for index wrap-around
6745 */
6746 if (++i == MPTSAS_EVENT_QUEUE_SIZE) {
6747 i = 0;
6748 }
6749 mpt->m_event_index = (uint8_t)i;
6750
6751 /*
6752 * Set flag to send the event.
6753 */
6754 sendAEN = TRUE;
6755 }
6756 }
6757
6758 /*
6759 * Generate a system event if flag is set to let anyone who cares know
6760 * that an event has occurred.
6761 */
6762 if (sendAEN) {
6763 (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
6764 "SAS", NULL, NULL, DDI_NOSLEEP);
6765 }
6766 }
6767
6768 #define SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS
6769 /*
6770 * handle sync events from ioc in interrupt
6771 * return value:
6772 * DDI_SUCCESS: The event is handled by this func
6773 * DDI_FAILURE: Event is not handled
6774 */
6775 static int
6776 mptsas_handle_event_sync(void *args)
6777 {
6778 m_replyh_arg_t *replyh_arg;
6779 pMpi2EventNotificationReply_t eventreply;
6780 uint32_t event, rfm;
6781 mptsas_t *mpt;
6782 uint_t iocstatus;
6783
6784 replyh_arg = (m_replyh_arg_t *)args;
6785 rfm = replyh_arg->rfm;
6786 mpt = replyh_arg->mpt;
6787
6788 ASSERT(mutex_owned(&mpt->m_mutex));
6789
6790 eventreply = (pMpi2EventNotificationReply_t)
6791 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6792 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6793
6794 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
6795 &eventreply->IOCStatus)) {
6796 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
6797 mptsas_log(mpt, CE_WARN,
6798 "!mptsas_handle_event_sync: event 0x%x, "
6799 "IOCStatus=0x%x, "
6800 "IOCLogInfo=0x%x", event, iocstatus,
6801 ddi_get32(mpt->m_acc_reply_frame_hdl,
6802 &eventreply->IOCLogInfo));
6803 } else {
6804 mptsas_log(mpt, CE_WARN,
6805 "mptsas_handle_event_sync: event 0x%x, "
6806 "IOCStatus=0x%x, "
6807 "(IOCLogInfo=0x%x)", event, iocstatus,
6808 ddi_get32(mpt->m_acc_reply_frame_hdl,
6809 &eventreply->IOCLogInfo));
6810 }
6811 }
6812
6813 /*
6814 * figure out what kind of event we got and handle accordingly
6815 */
6816 switch (event) {
6817 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
6818 {
6819 pMpi2EventDataSasTopologyChangeList_t sas_topo_change_list;
6820 uint8_t num_entries, expstatus, phy;
6821 uint8_t phystatus, physport, state, i;
6822 uint8_t start_phy_num, link_rate;
6823 uint16_t dev_handle, reason_code;
6824 uint16_t enc_handle, expd_handle;
6825 char string[80], curr[80], prev[80];
6826 mptsas_topo_change_list_t *topo_head = NULL;
6827 mptsas_topo_change_list_t *topo_tail = NULL;
6828 mptsas_topo_change_list_t *topo_node = NULL;
6829 mptsas_target_t *ptgt;
6830 mptsas_smp_t *psmp;
6831 uint8_t flags = 0, exp_flag;
6832 smhba_info_t *pSmhba = NULL;
6833
6834 NDBG20(("mptsas_handle_event_sync: SAS topology change"));
6835
6836 sas_topo_change_list = (pMpi2EventDataSasTopologyChangeList_t)
6837 eventreply->EventData;
6838
6839 enc_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6840 &sas_topo_change_list->EnclosureHandle);
6841 expd_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6842 &sas_topo_change_list->ExpanderDevHandle);
6843 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6844 &sas_topo_change_list->NumEntries);
6845 start_phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
6846 &sas_topo_change_list->StartPhyNum);
6847 expstatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6848 &sas_topo_change_list->ExpStatus);
6849 physport = ddi_get8(mpt->m_acc_reply_frame_hdl,
6850 &sas_topo_change_list->PhysicalPort);
6851
6852 string[0] = 0;
6853 if (expd_handle) {
6854 flags = MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED;
6855 switch (expstatus) {
6856 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6857 (void) sprintf(string, " added");
6858 /*
6859 * New expander device added
6860 */
6861 mpt->m_port_chng = 1;
6862 topo_node = kmem_zalloc(
6863 sizeof (mptsas_topo_change_list_t),
6864 KM_SLEEP);
6865 topo_node->mpt = mpt;
6866 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_SMP;
6867 topo_node->un.physport = physport;
6868 topo_node->devhdl = expd_handle;
6869 topo_node->flags = flags;
6870 topo_node->object = NULL;
6871 if (topo_head == NULL) {
6872 topo_head = topo_tail = topo_node;
6873 } else {
6874 topo_tail->next = topo_node;
6875 topo_tail = topo_node;
6876 }
6877 break;
6878 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6879 (void) sprintf(string, " not responding, "
6880 "removed");
6881 psmp = refhash_linear_search(mpt->m_smp_targets,
6882 mptsas_smp_eval_devhdl, &expd_handle);
6883 if (psmp == NULL)
6884 break;
6885
6886 topo_node = kmem_zalloc(
6887 sizeof (mptsas_topo_change_list_t),
6888 KM_SLEEP);
6889 topo_node->mpt = mpt;
6890 topo_node->un.phymask =
6891 psmp->m_addr.mta_phymask;
6892 topo_node->event = MPTSAS_DR_EVENT_OFFLINE_SMP;
6893 topo_node->devhdl = expd_handle;
6894 topo_node->flags = flags;
6895 topo_node->object = NULL;
6896 if (topo_head == NULL) {
6897 topo_head = topo_tail = topo_node;
6898 } else {
6899 topo_tail->next = topo_node;
6900 topo_tail = topo_node;
6901 }
6902 break;
6903 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6904 break;
6905 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6906 (void) sprintf(string, " not responding, "
6907 "delaying removal");
6908 break;
6909 default:
6910 break;
6911 }
6912 } else {
6913 flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
6914 }
6915
6916 NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
6917 enc_handle, expd_handle, string));
6918 for (i = 0; i < num_entries; i++) {
6919 phy = i + start_phy_num;
6920 phystatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6921 &sas_topo_change_list->PHY[i].PhyStatus);
6922 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6923 &sas_topo_change_list->PHY[i].AttachedDevHandle);
6924 reason_code = phystatus & MPI2_EVENT_SAS_TOPO_RC_MASK;
6925 /*
6926 * Filter out processing of Phy Vacant Status unless
6927 * the reason code is "Not Responding". Process all
6928 * other combinations of Phy Status and Reason Codes.
6929 */
6930 if ((phystatus &
6931 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) &&
6932 (reason_code !=
6933 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
6934 continue;
6935 }
6936 curr[0] = 0;
6937 prev[0] = 0;
6938 string[0] = 0;
6939 switch (reason_code) {
6940 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6941 {
6942 NDBG20(("mptsas%d phy %d physical_port %d "
6943 "dev_handle %d added", mpt->m_instance, phy,
6944 physport, dev_handle));
6945 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6946 &sas_topo_change_list->PHY[i].LinkRate);
6947 state = (link_rate &
6948 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6949 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6950 switch (state) {
6951 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6952 (void) sprintf(curr, "is disabled");
6953 break;
6954 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6955 (void) sprintf(curr, "is offline, "
6956 "failed speed negotiation");
6957 break;
6958 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6959 (void) sprintf(curr, "SATA OOB "
6960 "complete");
6961 break;
6962 case SMP_RESET_IN_PROGRESS:
6963 (void) sprintf(curr, "SMP reset in "
6964 "progress");
6965 break;
6966 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6967 (void) sprintf(curr, "is online at "
6968 "1.5 Gbps");
6969 break;
6970 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6971 (void) sprintf(curr, "is online at 3.0 "
6972 "Gbps");
6973 break;
6974 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6975 (void) sprintf(curr, "is online at 6.0 "
6976 "Gbps");
6977 break;
6978 case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
6979 (void) sprintf(curr,
6980 "is online at 12.0 Gbps");
6981 break;
6982 default:
6983 (void) sprintf(curr, "state is "
6984 "unknown");
6985 break;
6986 }
6987 /*
6988 * New target device added into the system.
6989 * Set association flag according to if an
6990 * expander is used or not.
6991 */
6992 exp_flag =
6993 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6994 if (flags ==
6995 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6996 flags = exp_flag;
6997 }
6998 topo_node = kmem_zalloc(
6999 sizeof (mptsas_topo_change_list_t),
7000 KM_SLEEP);
7001 topo_node->mpt = mpt;
7002 topo_node->event =
7003 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7004 if (expd_handle == 0) {
7005 /*
7006 * Per MPI 2, if expander dev handle
7007 * is 0, it's a directly attached
7008 * device. So driver use PHY to decide
7009 * which iport is associated
7010 */
7011 physport = phy;
7012 mpt->m_port_chng = 1;
7013 }
7014 topo_node->un.physport = physport;
7015 topo_node->devhdl = dev_handle;
7016 topo_node->flags = flags;
7017 topo_node->object = NULL;
7018 if (topo_head == NULL) {
7019 topo_head = topo_tail = topo_node;
7020 } else {
7021 topo_tail->next = topo_node;
7022 topo_tail = topo_node;
7023 }
7024 break;
7025 }
7026 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7027 {
7028 NDBG20(("mptsas%d phy %d physical_port %d "
7029 "dev_handle %d removed", mpt->m_instance,
7030 phy, physport, dev_handle));
7031 /*
7032 * Set association flag according to if an
7033 * expander is used or not.
7034 */
7035 exp_flag =
7036 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
7037 if (flags ==
7038 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
7039 flags = exp_flag;
7040 }
7041 /*
7042 * Target device is removed from the system
7043 * Before the device is really offline from
7044 * from system.
7045 */
7046 ptgt = refhash_linear_search(mpt->m_targets,
7047 mptsas_target_eval_devhdl, &dev_handle);
7048 /*
7049 * If ptgt is NULL here, it means that the
7050 * DevHandle is not in the hash table. This is
7051 * reasonable sometimes. For example, if a
7052 * disk was pulled, then added, then pulled
7053 * again, the disk will not have been put into
7054 * the hash table because the add event will
7055 * have an invalid phymask. BUT, this does not
7056 * mean that the DevHandle is invalid. The
7057 * controller will still have a valid DevHandle
7058 * that must be removed. To do this, use the
7059 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
7060 */
7061 if (ptgt == NULL) {
7062 topo_node = kmem_zalloc(
7063 sizeof (mptsas_topo_change_list_t),
7064 KM_SLEEP);
7065 topo_node->mpt = mpt;
7066 topo_node->un.phymask = 0;
7067 topo_node->event =
7068 MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
7069 topo_node->devhdl = dev_handle;
7070 topo_node->flags = flags;
7071 topo_node->object = NULL;
7072 if (topo_head == NULL) {
7073 topo_head = topo_tail =
7074 topo_node;
7075 } else {
7076 topo_tail->next = topo_node;
7077 topo_tail = topo_node;
7078 }
7079 break;
7080 }
7081
7082 /*
7083 * Update DR flag immediately avoid I/O failure
7084 * before failover finish. Pay attention to the
7085 * mutex protect, we need grab m_tx_waitq_mutex
7086 * during set m_dr_flag because we won't add
7087 * the following command into waitq, instead,
7088 * we need return TRAN_BUSY in the tran_start
7089 * context.
7090 */
7091 mutex_enter(&mpt->m_tx_waitq_mutex);
7092 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7093 mutex_exit(&mpt->m_tx_waitq_mutex);
7094
7095 topo_node = kmem_zalloc(
7096 sizeof (mptsas_topo_change_list_t),
7097 KM_SLEEP);
7098 topo_node->mpt = mpt;
7099 topo_node->un.phymask =
7100 ptgt->m_addr.mta_phymask;
7101 topo_node->event =
7102 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7103 topo_node->devhdl = dev_handle;
7104 topo_node->flags = flags;
7105 topo_node->object = NULL;
7106 if (topo_head == NULL) {
7107 topo_head = topo_tail = topo_node;
7108 } else {
7109 topo_tail->next = topo_node;
7110 topo_tail = topo_node;
7111 }
7112 break;
7113 }
7114 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7115 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
7116 &sas_topo_change_list->PHY[i].LinkRate);
7117 state = (link_rate &
7118 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
7119 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
7120 pSmhba = &mpt->m_phy_info[i].smhba_info;
7121 pSmhba->negotiated_link_rate = state;
7122 switch (state) {
7123 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7124 (void) sprintf(curr, "is disabled");
7125 mptsas_smhba_log_sysevent(mpt,
7126 ESC_SAS_PHY_EVENT,
7127 SAS_PHY_REMOVE,
7128 &mpt->m_phy_info[i].smhba_info);
7129 mpt->m_phy_info[i].smhba_info.
7130 negotiated_link_rate
7131 = 0x1;
7132 break;
7133 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7134 (void) sprintf(curr, "is offline, "
7135 "failed speed negotiation");
7136 mptsas_smhba_log_sysevent(mpt,
7137 ESC_SAS_PHY_EVENT,
7138 SAS_PHY_OFFLINE,
7139 &mpt->m_phy_info[i].smhba_info);
7140 break;
7141 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7142 (void) sprintf(curr, "SATA OOB "
7143 "complete");
7144 break;
7145 case SMP_RESET_IN_PROGRESS:
7146 (void) sprintf(curr, "SMP reset in "
7147 "progress");
7148 break;
7149 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7150 (void) sprintf(curr, "is online at "
7151 "1.5 Gbps");
7152 if ((expd_handle == 0) &&
7153 (enc_handle == 1)) {
7154 mpt->m_port_chng = 1;
7155 }
7156 mptsas_smhba_log_sysevent(mpt,
7157 ESC_SAS_PHY_EVENT,
7158 SAS_PHY_ONLINE,
7159 &mpt->m_phy_info[i].smhba_info);
7160 break;
7161 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7162 (void) sprintf(curr, "is online at 3.0 "
7163 "Gbps");
7164 if ((expd_handle == 0) &&
7165 (enc_handle == 1)) {
7166 mpt->m_port_chng = 1;
7167 }
7168 mptsas_smhba_log_sysevent(mpt,
7169 ESC_SAS_PHY_EVENT,
7170 SAS_PHY_ONLINE,
7171 &mpt->m_phy_info[i].smhba_info);
7172 break;
7173 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7174 (void) sprintf(curr, "is online at "
7175 "6.0 Gbps");
7176 if ((expd_handle == 0) &&
7177 (enc_handle == 1)) {
7178 mpt->m_port_chng = 1;
7179 }
7180 mptsas_smhba_log_sysevent(mpt,
7181 ESC_SAS_PHY_EVENT,
7182 SAS_PHY_ONLINE,
7183 &mpt->m_phy_info[i].smhba_info);
7184 break;
7185 case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7186 (void) sprintf(curr, "is online at "
7187 "12.0 Gbps");
7188 if ((expd_handle == 0) &&
7189 (enc_handle == 1)) {
7190 mpt->m_port_chng = 1;
7191 }
7192 mptsas_smhba_log_sysevent(mpt,
7193 ESC_SAS_PHY_EVENT,
7194 SAS_PHY_ONLINE,
7195 &mpt->m_phy_info[i].smhba_info);
7196 break;
7197 default:
7198 (void) sprintf(curr, "state is "
7199 "unknown");
7200 break;
7201 }
7202
7203 state = (link_rate &
7204 MPI2_EVENT_SAS_TOPO_LR_PREV_MASK) >>
7205 MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT;
7206 switch (state) {
7207 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7208 (void) sprintf(prev, ", was disabled");
7209 break;
7210 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7211 (void) sprintf(prev, ", was offline, "
7212 "failed speed negotiation");
7213 break;
7214 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7215 (void) sprintf(prev, ", was SATA OOB "
7216 "complete");
7217 break;
7218 case SMP_RESET_IN_PROGRESS:
7219 (void) sprintf(prev, ", was SMP reset "
7220 "in progress");
7221 break;
7222 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7223 (void) sprintf(prev, ", was online at "
7224 "1.5 Gbps");
7225 break;
7226 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7227 (void) sprintf(prev, ", was online at "
7228 "3.0 Gbps");
7229 break;
7230 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7231 (void) sprintf(prev, ", was online at "
7232 "6.0 Gbps");
7233 break;
7234 case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7235 (void) sprintf(prev, ", was online at "
7236 "12.0 Gbps");
7237 break;
7238 default:
7239 break;
7240 }
7241 (void) sprintf(&string[strlen(string)], "link "
7242 "changed, ");
7243 break;
7244 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7245 continue;
7246 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7247 (void) sprintf(&string[strlen(string)],
7248 "target not responding, delaying "
7249 "removal");
7250 break;
7251 }
7252 NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
7253 mpt->m_instance, phy, dev_handle, string, curr,
7254 prev));
7255 }
7256 if (topo_head != NULL) {
7257 /*
7258 * Launch DR taskq to handle topology change
7259 */
7260 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7261 mptsas_handle_dr, (void *)topo_head,
7262 DDI_NOSLEEP)) != DDI_SUCCESS) {
7263 while (topo_head != NULL) {
7264 topo_node = topo_head;
7265 topo_head = topo_head->next;
7266 kmem_free(topo_node,
7267 sizeof (mptsas_topo_change_list_t));
7268 }
7269 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7270 "for handle SAS DR event failed. \n");
7271 }
7272 }
7273 break;
7274 }
7275 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7276 {
7277 Mpi2EventDataIrConfigChangeList_t *irChangeList;
7278 mptsas_topo_change_list_t *topo_head = NULL;
7279 mptsas_topo_change_list_t *topo_tail = NULL;
7280 mptsas_topo_change_list_t *topo_node = NULL;
7281 mptsas_target_t *ptgt;
7282 uint8_t num_entries, i, reason;
7283 uint16_t volhandle, diskhandle;
7284
7285 irChangeList = (pMpi2EventDataIrConfigChangeList_t)
7286 eventreply->EventData;
7287 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
7288 &irChangeList->NumElements);
7289
7290 NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
7291 mpt->m_instance));
7292
7293 for (i = 0; i < num_entries; i++) {
7294 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7295 &irChangeList->ConfigElement[i].ReasonCode);
7296 volhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7297 &irChangeList->ConfigElement[i].VolDevHandle);
7298 diskhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7299 &irChangeList->ConfigElement[i].PhysDiskDevHandle);
7300
7301 switch (reason) {
7302 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
7303 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
7304 {
7305 NDBG20(("mptsas %d volume added\n",
7306 mpt->m_instance));
7307
7308 topo_node = kmem_zalloc(
7309 sizeof (mptsas_topo_change_list_t),
7310 KM_SLEEP);
7311
7312 topo_node->mpt = mpt;
7313 topo_node->event =
7314 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7315 topo_node->un.physport = 0xff;
7316 topo_node->devhdl = volhandle;
7317 topo_node->flags =
7318 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7319 topo_node->object = NULL;
7320 if (topo_head == NULL) {
7321 topo_head = topo_tail = topo_node;
7322 } else {
7323 topo_tail->next = topo_node;
7324 topo_tail = topo_node;
7325 }
7326 break;
7327 }
7328 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
7329 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
7330 {
7331 NDBG20(("mptsas %d volume deleted\n",
7332 mpt->m_instance));
7333 ptgt = refhash_linear_search(mpt->m_targets,
7334 mptsas_target_eval_devhdl, &volhandle);
7335 if (ptgt == NULL)
7336 break;
7337
7338 /*
7339 * Clear any flags related to volume
7340 */
7341 (void) mptsas_delete_volume(mpt, volhandle);
7342
7343 /*
7344 * Update DR flag immediately avoid I/O failure
7345 */
7346 mutex_enter(&mpt->m_tx_waitq_mutex);
7347 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7348 mutex_exit(&mpt->m_tx_waitq_mutex);
7349
7350 topo_node = kmem_zalloc(
7351 sizeof (mptsas_topo_change_list_t),
7352 KM_SLEEP);
7353 topo_node->mpt = mpt;
7354 topo_node->un.phymask =
7355 ptgt->m_addr.mta_phymask;
7356 topo_node->event =
7357 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7358 topo_node->devhdl = volhandle;
7359 topo_node->flags =
7360 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7361 topo_node->object = (void *)ptgt;
7362 if (topo_head == NULL) {
7363 topo_head = topo_tail = topo_node;
7364 } else {
7365 topo_tail->next = topo_node;
7366 topo_tail = topo_node;
7367 }
7368 break;
7369 }
7370 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
7371 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
7372 {
7373 ptgt = refhash_linear_search(mpt->m_targets,
7374 mptsas_target_eval_devhdl, &diskhandle);
7375 if (ptgt == NULL)
7376 break;
7377
7378 /*
7379 * Update DR flag immediately avoid I/O failure
7380 */
7381 mutex_enter(&mpt->m_tx_waitq_mutex);
7382 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7383 mutex_exit(&mpt->m_tx_waitq_mutex);
7384
7385 topo_node = kmem_zalloc(
7386 sizeof (mptsas_topo_change_list_t),
7387 KM_SLEEP);
7388 topo_node->mpt = mpt;
7389 topo_node->un.phymask =
7390 ptgt->m_addr.mta_phymask;
7391 topo_node->event =
7392 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7393 topo_node->devhdl = diskhandle;
7394 topo_node->flags =
7395 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7396 topo_node->object = (void *)ptgt;
7397 if (topo_head == NULL) {
7398 topo_head = topo_tail = topo_node;
7399 } else {
7400 topo_tail->next = topo_node;
7401 topo_tail = topo_node;
7402 }
7403 break;
7404 }
7405 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
7406 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
7407 {
7408 /*
7409 * The physical drive is released by a IR
7410 * volume. But we cannot get the the physport
7411 * or phynum from the event data, so we only
7412 * can get the physport/phynum after SAS
7413 * Device Page0 request for the devhdl.
7414 */
7415 topo_node = kmem_zalloc(
7416 sizeof (mptsas_topo_change_list_t),
7417 KM_SLEEP);
7418 topo_node->mpt = mpt;
7419 topo_node->un.phymask = 0;
7420 topo_node->event =
7421 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7422 topo_node->devhdl = diskhandle;
7423 topo_node->flags =
7424 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7425 topo_node->object = NULL;
7426 mpt->m_port_chng = 1;
7427 if (topo_head == NULL) {
7428 topo_head = topo_tail = topo_node;
7429 } else {
7430 topo_tail->next = topo_node;
7431 topo_tail = topo_node;
7432 }
7433 break;
7434 }
7435 default:
7436 break;
7437 }
7438 }
7439
7440 if (topo_head != NULL) {
7441 /*
7442 * Launch DR taskq to handle topology change
7443 */
7444 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7445 mptsas_handle_dr, (void *)topo_head,
7446 DDI_NOSLEEP)) != DDI_SUCCESS) {
7447 while (topo_head != NULL) {
7448 topo_node = topo_head;
7449 topo_head = topo_head->next;
7450 kmem_free(topo_node,
7451 sizeof (mptsas_topo_change_list_t));
7452 }
7453 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7454 "for handle SAS DR event failed. \n");
7455 }
7456 }
7457 break;
7458 }
7459 default:
7460 return (DDI_FAILURE);
7461 }
7462
7463 return (DDI_SUCCESS);
7464 }
7465
7466 /*
7467 * handle events from ioc
7468 */
7469 static void
7470 mptsas_handle_event(void *args)
7471 {
7472 m_replyh_arg_t *replyh_arg;
7473 pMpi2EventNotificationReply_t eventreply;
7474 uint32_t event, iocloginfo, rfm;
7475 uint32_t status;
7476 uint8_t port;
7477 mptsas_t *mpt;
7478 uint_t iocstatus;
7479
7480 replyh_arg = (m_replyh_arg_t *)args;
7481 rfm = replyh_arg->rfm;
7482 mpt = replyh_arg->mpt;
7483
7484 mutex_enter(&mpt->m_mutex);
7485 /*
7486 * If HBA is being reset, drop incoming event.
7487 */
7488 if (mpt->m_in_reset) {
7489 NDBG20(("dropping event received prior to reset"));
7490 mutex_exit(&mpt->m_mutex);
7491 return;
7492 }
7493
7494 eventreply = (pMpi2EventNotificationReply_t)
7495 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
7496 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7497
7498 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7499 &eventreply->IOCStatus)) {
7500 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7501 mptsas_log(mpt, CE_WARN,
7502 "!mptsas_handle_event: IOCStatus=0x%x, "
7503 "IOCLogInfo=0x%x", iocstatus,
7504 ddi_get32(mpt->m_acc_reply_frame_hdl,
7505 &eventreply->IOCLogInfo));
7506 } else {
7507 mptsas_log(mpt, CE_WARN,
7508 "mptsas_handle_event: IOCStatus=0x%x, "
7509 "IOCLogInfo=0x%x", iocstatus,
7510 ddi_get32(mpt->m_acc_reply_frame_hdl,
7511 &eventreply->IOCLogInfo));
7512 }
7513 }
7514
7515 /*
7516 * figure out what kind of event we got and handle accordingly
7517 */
7518 switch (event) {
7519 case MPI2_EVENT_LOG_ENTRY_ADDED:
7520 break;
7521 case MPI2_EVENT_LOG_DATA:
7522 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7523 &eventreply->IOCLogInfo);
7524 NDBG20(("mptsas %d log info %x received.\n", mpt->m_instance,
7525 iocloginfo));
7526 break;
7527 case MPI2_EVENT_STATE_CHANGE:
7528 NDBG20(("mptsas%d state change.", mpt->m_instance));
7529 break;
7530 case MPI2_EVENT_HARD_RESET_RECEIVED:
7531 NDBG20(("mptsas%d event change.", mpt->m_instance));
7532 break;
7533 case MPI2_EVENT_SAS_DISCOVERY:
7534 {
7535 MPI2_EVENT_DATA_SAS_DISCOVERY *sasdiscovery;
7536 char string[80];
7537 uint8_t rc;
7538
7539 sasdiscovery =
7540 (pMpi2EventDataSasDiscovery_t)eventreply->EventData;
7541
7542 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7543 &sasdiscovery->ReasonCode);
7544 port = ddi_get8(mpt->m_acc_reply_frame_hdl,
7545 &sasdiscovery->PhysicalPort);
7546 status = ddi_get32(mpt->m_acc_reply_frame_hdl,
7547 &sasdiscovery->DiscoveryStatus);
7548
7549 string[0] = 0;
7550 switch (rc) {
7551 case MPI2_EVENT_SAS_DISC_RC_STARTED:
7552 (void) sprintf(string, "STARTING");
7553 break;
7554 case MPI2_EVENT_SAS_DISC_RC_COMPLETED:
7555 (void) sprintf(string, "COMPLETED");
7556 break;
7557 default:
7558 (void) sprintf(string, "UNKNOWN");
7559 break;
7560 }
7561
7562 NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string,
7563 port, status));
7564
7565 break;
7566 }
7567 case MPI2_EVENT_EVENT_CHANGE:
7568 NDBG20(("mptsas%d event change.", mpt->m_instance));
7569 break;
7570 case MPI2_EVENT_TASK_SET_FULL:
7571 {
7572 pMpi2EventDataTaskSetFull_t taskfull;
7573
7574 taskfull = (pMpi2EventDataTaskSetFull_t)eventreply->EventData;
7575
7576 NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
7577 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7578 &taskfull->CurrentDepth)));
7579 break;
7580 }
7581 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7582 {
7583 /*
7584 * SAS TOPOLOGY CHANGE LIST Event has already been handled
7585 * in mptsas_handle_event_sync() of interrupt context
7586 */
7587 break;
7588 }
7589 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
7590 {
7591 pMpi2EventDataSasEnclDevStatusChange_t encstatus;
7592 uint8_t rc;
7593 char string[80];
7594
7595 encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
7596 eventreply->EventData;
7597
7598 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7599 &encstatus->ReasonCode);
7600 switch (rc) {
7601 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7602 (void) sprintf(string, "added");
7603 break;
7604 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7605 (void) sprintf(string, ", not responding");
7606 break;
7607 default:
7608 break;
7609 }
7610 NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure "
7611 "%x%s\n", mpt->m_instance,
7612 ddi_get16(mpt->m_acc_reply_frame_hdl,
7613 &encstatus->EnclosureHandle), string));
7614 break;
7615 }
7616
7617 /*
7618 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
7619 * mptsas_handle_event_sync,in here just send ack message.
7620 */
7621 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
7622 {
7623 pMpi2EventDataSasDeviceStatusChange_t statuschange;
7624 uint8_t rc;
7625 uint16_t devhdl;
7626 uint64_t wwn = 0;
7627 uint32_t wwn_lo, wwn_hi;
7628
7629 statuschange = (pMpi2EventDataSasDeviceStatusChange_t)
7630 eventreply->EventData;
7631 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7632 &statuschange->ReasonCode);
7633 wwn_lo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7634 (uint32_t *)(void *)&statuschange->SASAddress);
7635 wwn_hi = ddi_get32(mpt->m_acc_reply_frame_hdl,
7636 (uint32_t *)(void *)&statuschange->SASAddress + 1);
7637 wwn = ((uint64_t)wwn_hi << 32) | wwn_lo;
7638 devhdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
7639 &statuschange->DevHandle);
7640
7641 NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64,
7642 wwn));
7643
7644 switch (rc) {
7645 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7646 NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x",
7647 ddi_get8(mpt->m_acc_reply_frame_hdl,
7648 &statuschange->ASC),
7649 ddi_get8(mpt->m_acc_reply_frame_hdl,
7650 &statuschange->ASCQ)));
7651 break;
7652
7653 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7654 NDBG20(("Device not supported"));
7655 break;
7656
7657 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7658 NDBG20(("IOC internally generated the Target Reset "
7659 "for devhdl:%x", devhdl));
7660 break;
7661
7662 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7663 NDBG20(("IOC's internally generated Target Reset "
7664 "completed for devhdl:%x", devhdl));
7665 break;
7666
7667 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7668 NDBG20(("IOC internally generated Abort Task"));
7669 break;
7670
7671 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7672 NDBG20(("IOC's internally generated Abort Task "
7673 "completed"));
7674 break;
7675
7676 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7677 NDBG20(("IOC internally generated Abort Task Set"));
7678 break;
7679
7680 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7681 NDBG20(("IOC internally generated Clear Task Set"));
7682 break;
7683
7684 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7685 NDBG20(("IOC internally generated Query Task"));
7686 break;
7687
7688 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7689 NDBG20(("Device sent an Asynchronous Notification"));
7690 break;
7691
7692 default:
7693 break;
7694 }
7695 break;
7696 }
7697 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7698 {
7699 /*
7700 * IR TOPOLOGY CHANGE LIST Event has already been handled
7701 * in mpt_handle_event_sync() of interrupt context
7702 */
7703 break;
7704 }
7705 case MPI2_EVENT_IR_OPERATION_STATUS:
7706 {
7707 Mpi2EventDataIrOperationStatus_t *irOpStatus;
7708 char reason_str[80];
7709 uint8_t rc, percent;
7710 uint16_t handle;
7711
7712 irOpStatus = (pMpi2EventDataIrOperationStatus_t)
7713 eventreply->EventData;
7714 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7715 &irOpStatus->RAIDOperation);
7716 percent = ddi_get8(mpt->m_acc_reply_frame_hdl,
7717 &irOpStatus->PercentComplete);
7718 handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7719 &irOpStatus->VolDevHandle);
7720
7721 switch (rc) {
7722 case MPI2_EVENT_IR_RAIDOP_RESYNC:
7723 (void) sprintf(reason_str, "resync");
7724 break;
7725 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
7726 (void) sprintf(reason_str, "online capacity "
7727 "expansion");
7728 break;
7729 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
7730 (void) sprintf(reason_str, "consistency check");
7731 break;
7732 default:
7733 (void) sprintf(reason_str, "unknown reason %x",
7734 rc);
7735 }
7736
7737 NDBG20(("mptsas%d raid operational status: (%s)"
7738 "\thandle(0x%04x), percent complete(%d)\n",
7739 mpt->m_instance, reason_str, handle, percent));
7740 break;
7741 }
7742 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
7743 {
7744 pMpi2EventDataSasBroadcastPrimitive_t sas_broadcast;
7745 uint8_t phy_num;
7746 uint8_t primitive;
7747
7748 sas_broadcast = (pMpi2EventDataSasBroadcastPrimitive_t)
7749 eventreply->EventData;
7750
7751 phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
7752 &sas_broadcast->PhyNum);
7753 primitive = ddi_get8(mpt->m_acc_reply_frame_hdl,
7754 &sas_broadcast->Primitive);
7755
7756 switch (primitive) {
7757 case MPI2_EVENT_PRIMITIVE_CHANGE:
7758 mptsas_smhba_log_sysevent(mpt,
7759 ESC_SAS_HBA_PORT_BROADCAST,
7760 SAS_PORT_BROADCAST_CHANGE,
7761 &mpt->m_phy_info[phy_num].smhba_info);
7762 break;
7763 case MPI2_EVENT_PRIMITIVE_SES:
7764 mptsas_smhba_log_sysevent(mpt,
7765 ESC_SAS_HBA_PORT_BROADCAST,
7766 SAS_PORT_BROADCAST_SES,
7767 &mpt->m_phy_info[phy_num].smhba_info);
7768 break;
7769 case MPI2_EVENT_PRIMITIVE_EXPANDER:
7770 mptsas_smhba_log_sysevent(mpt,
7771 ESC_SAS_HBA_PORT_BROADCAST,
7772 SAS_PORT_BROADCAST_D01_4,
7773 &mpt->m_phy_info[phy_num].smhba_info);
7774 break;
7775 case MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT:
7776 mptsas_smhba_log_sysevent(mpt,
7777 ESC_SAS_HBA_PORT_BROADCAST,
7778 SAS_PORT_BROADCAST_D04_7,
7779 &mpt->m_phy_info[phy_num].smhba_info);
7780 break;
7781 case MPI2_EVENT_PRIMITIVE_RESERVED3:
7782 mptsas_smhba_log_sysevent(mpt,
7783 ESC_SAS_HBA_PORT_BROADCAST,
7784 SAS_PORT_BROADCAST_D16_7,
7785 &mpt->m_phy_info[phy_num].smhba_info);
7786 break;
7787 case MPI2_EVENT_PRIMITIVE_RESERVED4:
7788 mptsas_smhba_log_sysevent(mpt,
7789 ESC_SAS_HBA_PORT_BROADCAST,
7790 SAS_PORT_BROADCAST_D29_7,
7791 &mpt->m_phy_info[phy_num].smhba_info);
7792 break;
7793 case MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED:
7794 mptsas_smhba_log_sysevent(mpt,
7795 ESC_SAS_HBA_PORT_BROADCAST,
7796 SAS_PORT_BROADCAST_D24_0,
7797 &mpt->m_phy_info[phy_num].smhba_info);
7798 break;
7799 case MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED:
7800 mptsas_smhba_log_sysevent(mpt,
7801 ESC_SAS_HBA_PORT_BROADCAST,
7802 SAS_PORT_BROADCAST_D27_4,
7803 &mpt->m_phy_info[phy_num].smhba_info);
7804 break;
7805 default:
7806 NDBG16(("mptsas%d: unknown BROADCAST PRIMITIVE"
7807 " %x received",
7808 mpt->m_instance, primitive));
7809 break;
7810 }
7811 NDBG16(("mptsas%d sas broadcast primitive: "
7812 "\tprimitive(0x%04x), phy(%d) complete\n",
7813 mpt->m_instance, primitive, phy_num));
7814 break;
7815 }
7816 case MPI2_EVENT_IR_VOLUME:
7817 {
7818 Mpi2EventDataIrVolume_t *irVolume;
7819 uint16_t devhandle;
7820 uint32_t state;
7821 int config, vol;
7822 uint8_t found = FALSE;
7823
7824 irVolume = (pMpi2EventDataIrVolume_t)eventreply->EventData;
7825 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7826 &irVolume->NewValue);
7827 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7828 &irVolume->VolDevHandle);
7829
7830 NDBG20(("EVENT_IR_VOLUME event is received"));
7831
7832 /*
7833 * Get latest RAID info and then find the DevHandle for this
7834 * event in the configuration. If the DevHandle is not found
7835 * just exit the event.
7836 */
7837 (void) mptsas_get_raid_info(mpt);
7838 for (config = 0; (config < mpt->m_num_raid_configs) &&
7839 (!found); config++) {
7840 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
7841 if (mpt->m_raidconfig[config].m_raidvol[vol].
7842 m_raidhandle == devhandle) {
7843 found = TRUE;
7844 break;
7845 }
7846 }
7847 }
7848 if (!found) {
7849 break;
7850 }
7851
7852 switch (irVolume->ReasonCode) {
7853 case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
7854 {
7855 uint32_t i;
7856 mpt->m_raidconfig[config].m_raidvol[vol].m_settings =
7857 state;
7858
7859 i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
7860 mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
7861 ", auto-config of hot-swap drives is %s"
7862 ", write caching is %s"
7863 ", hot-spare pool mask is %02x\n",
7864 vol, state &
7865 MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
7866 ? "disabled" : "enabled",
7867 i == MPI2_RAIDVOL0_SETTING_UNCHANGED
7868 ? "controlled by member disks" :
7869 i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
7870 ? "disabled" :
7871 i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
7872 ? "enabled" :
7873 "incorrectly set",
7874 (state >> 16) & 0xff);
7875 break;
7876 }
7877 case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
7878 {
7879 mpt->m_raidconfig[config].m_raidvol[vol].m_state =
7880 (uint8_t)state;
7881
7882 mptsas_log(mpt, CE_NOTE,
7883 "Volume %d is now %s\n", vol,
7884 state == MPI2_RAID_VOL_STATE_OPTIMAL
7885 ? "optimal" :
7886 state == MPI2_RAID_VOL_STATE_DEGRADED
7887 ? "degraded" :
7888 state == MPI2_RAID_VOL_STATE_ONLINE
7889 ? "online" :
7890 state == MPI2_RAID_VOL_STATE_INITIALIZING
7891 ? "initializing" :
7892 state == MPI2_RAID_VOL_STATE_FAILED
7893 ? "failed" :
7894 state == MPI2_RAID_VOL_STATE_MISSING
7895 ? "missing" :
7896 "state unknown");
7897 break;
7898 }
7899 case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
7900 {
7901 mpt->m_raidconfig[config].m_raidvol[vol].
7902 m_statusflags = state;
7903
7904 mptsas_log(mpt, CE_NOTE,
7905 " Volume %d is now %s%s%s%s%s%s%s%s%s\n",
7906 vol,
7907 state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
7908 ? ", enabled" : ", disabled",
7909 state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
7910 ? ", quiesced" : "",
7911 state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
7912 ? ", inactive" : ", active",
7913 state &
7914 MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
7915 ? ", bad block table is full" : "",
7916 state &
7917 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
7918 ? ", resync in progress" : "",
7919 state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
7920 ? ", background initialization in progress" : "",
7921 state &
7922 MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
7923 ? ", capacity expansion in progress" : "",
7924 state &
7925 MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
7926 ? ", consistency check in progress" : "",
7927 state & MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB
7928 ? ", data scrub in progress" : "");
7929 break;
7930 }
7931 default:
7932 break;
7933 }
7934 break;
7935 }
7936 case MPI2_EVENT_IR_PHYSICAL_DISK:
7937 {
7938 Mpi2EventDataIrPhysicalDisk_t *irPhysDisk;
7939 uint16_t devhandle, enchandle, slot;
7940 uint32_t status, state;
7941 uint8_t physdisknum, reason;
7942
7943 irPhysDisk = (Mpi2EventDataIrPhysicalDisk_t *)
7944 eventreply->EventData;
7945 physdisknum = ddi_get8(mpt->m_acc_reply_frame_hdl,
7946 &irPhysDisk->PhysDiskNum);
7947 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7948 &irPhysDisk->PhysDiskDevHandle);
7949 enchandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7950 &irPhysDisk->EnclosureHandle);
7951 slot = ddi_get16(mpt->m_acc_reply_frame_hdl,
7952 &irPhysDisk->Slot);
7953 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7954 &irPhysDisk->NewValue);
7955 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7956 &irPhysDisk->ReasonCode);
7957
7958 NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
7959
7960 switch (reason) {
7961 case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
7962 mptsas_log(mpt, CE_NOTE,
7963 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7964 "for enclosure with handle 0x%x is now in hot "
7965 "spare pool %d",
7966 physdisknum, devhandle, slot, enchandle,
7967 (state >> 16) & 0xff);
7968 break;
7969
7970 case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
7971 status = state;
7972 mptsas_log(mpt, CE_NOTE,
7973 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7974 "for enclosure with handle 0x%x is now "
7975 "%s%s%s%s%s\n", physdisknum, devhandle, slot,
7976 enchandle,
7977 status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
7978 ? ", inactive" : ", active",
7979 status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
7980 ? ", out of sync" : "",
7981 status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
7982 ? ", quiesced" : "",
7983 status &
7984 MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
7985 ? ", write cache enabled" : "",
7986 status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
7987 ? ", capacity expansion target" : "");
7988 break;
7989
7990 case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
7991 mptsas_log(mpt, CE_NOTE,
7992 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7993 "for enclosure with handle 0x%x is now %s\n",
7994 physdisknum, devhandle, slot, enchandle,
7995 state == MPI2_RAID_PD_STATE_OPTIMAL
7996 ? "optimal" :
7997 state == MPI2_RAID_PD_STATE_REBUILDING
7998 ? "rebuilding" :
7999 state == MPI2_RAID_PD_STATE_DEGRADED
8000 ? "degraded" :
8001 state == MPI2_RAID_PD_STATE_HOT_SPARE
8002 ? "a hot spare" :
8003 state == MPI2_RAID_PD_STATE_ONLINE
8004 ? "online" :
8005 state == MPI2_RAID_PD_STATE_OFFLINE
8006 ? "offline" :
8007 state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
8008 ? "not compatible" :
8009 state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
8010 ? "not configured" :
8011 "state unknown");
8012 break;
8013 }
8014 break;
8015 }
8016 default:
8017 NDBG20(("mptsas%d: unknown event %x received",
8018 mpt->m_instance, event));
8019 break;
8020 }
8021
8022 /*
8023 * Return the reply frame to the free queue.
8024 */
8025 ddi_put32(mpt->m_acc_free_queue_hdl,
8026 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm);
8027 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
8028 DDI_DMA_SYNC_FORDEV);
8029 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
8030 mpt->m_free_index = 0;
8031 }
8032 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
8033 mpt->m_free_index);
8034 mutex_exit(&mpt->m_mutex);
8035 }
8036
8037 /*
8038 * invoked from timeout() to restart qfull cmds with throttle == 0
8039 */
8040 static void
8041 mptsas_restart_cmd(void *arg)
8042 {
8043 mptsas_t *mpt = arg;
8044 mptsas_target_t *ptgt = NULL;
8045
8046 mutex_enter(&mpt->m_mutex);
8047
8048 mpt->m_restart_cmd_timeid = 0;
8049
8050 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
8051 ptgt = refhash_next(mpt->m_targets, ptgt)) {
8052 if (ptgt->m_reset_delay == 0) {
8053 if (ptgt->m_t_throttle == QFULL_THROTTLE) {
8054 mptsas_set_throttle(mpt, ptgt,
8055 MAX_THROTTLE);
8056 }
8057 }
8058 }
8059 mptsas_restart_hba(mpt);
8060 mutex_exit(&mpt->m_mutex);
8061 }
8062
8063 void
8064 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8065 {
8066 int slot;
8067 mptsas_slots_t *slots = mpt->m_active;
8068 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8069
8070 ASSERT(cmd != NULL);
8071 ASSERT(cmd->cmd_queued == FALSE);
8072
8073 /*
8074 * Task Management cmds are removed in their own routines. Also,
8075 * we don't want to modify timeout based on TM cmds.
8076 */
8077 if (cmd->cmd_flags & CFLAG_TM_CMD) {
8078 return;
8079 }
8080
8081 slot = cmd->cmd_slot;
8082
8083 /*
8084 * remove the cmd.
8085 */
8086 if (cmd == slots->m_slot[slot]) {
8087 NDBG31(("mptsas_remove_cmd: removing cmd=0x%p, flags "
8088 "0x%x", (void *)cmd, cmd->cmd_flags));
8089 slots->m_slot[slot] = NULL;
8090 mpt->m_ncmds--;
8091
8092 /*
8093 * only decrement per target ncmds if command
8094 * has a target associated with it.
8095 */
8096 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
8097 ptgt->m_t_ncmds--;
8098 /*
8099 * reset throttle if we just ran an untagged command
8100 * to a tagged target
8101 */
8102 if ((ptgt->m_t_ncmds == 0) &&
8103 ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
8104 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8105 }
8106
8107 /*
8108 * Remove this command from the active queue.
8109 */
8110 if (cmd->cmd_active_expiration != 0) {
8111 TAILQ_REMOVE(&ptgt->m_active_cmdq, cmd,
8112 cmd_active_link);
8113 cmd->cmd_active_expiration = 0;
8114 }
8115 }
8116 }
8117
8118 /*
8119 * This is all we need to do for ioc commands.
8120 */
8121 if (cmd->cmd_flags & CFLAG_CMDIOC) {
8122 mptsas_return_to_pool(mpt, cmd);
8123 return;
8124 }
8125
8126 ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);
8127 }
8128
8129 /*
8130 * accept all cmds on the tx_waitq if any and then
8131 * start a fresh request from the top of the device queue.
8132 *
8133 * since there are always cmds queued on the tx_waitq, and rare cmds on
8134 * the instance waitq, so this function should not be invoked in the ISR,
8135 * the mptsas_restart_waitq() is invoked in the ISR instead. otherwise, the
8136 * burden belongs to the IO dispatch CPUs is moved the interrupt CPU.
8137 */
8138 static void
8139 mptsas_restart_hba(mptsas_t *mpt)
8140 {
8141 ASSERT(mutex_owned(&mpt->m_mutex));
8142
8143 mutex_enter(&mpt->m_tx_waitq_mutex);
8144 if (mpt->m_tx_waitq) {
8145 mptsas_accept_tx_waitq(mpt);
8146 }
8147 mutex_exit(&mpt->m_tx_waitq_mutex);
8148 mptsas_restart_waitq(mpt);
8149 }
8150
8151 /*
8152 * start a fresh request from the top of the device queue
8153 */
8154 static void
8155 mptsas_restart_waitq(mptsas_t *mpt)
8156 {
8157 mptsas_cmd_t *cmd, *next_cmd;
8158 mptsas_target_t *ptgt = NULL;
8159
8160 NDBG1(("mptsas_restart_waitq: mpt=0x%p", (void *)mpt));
8161
8162 ASSERT(mutex_owned(&mpt->m_mutex));
8163
8164 /*
8165 * If there is a reset delay, don't start any cmds. Otherwise, start
8166 * as many cmds as possible.
8167 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
8168 * commands is m_max_requests - 2.
8169 */
8170 cmd = mpt->m_waitq;
8171
8172 while (cmd != NULL) {
8173 next_cmd = cmd->cmd_linkp;
8174 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
8175 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8176 /*
8177 * passthru command get slot need
8178 * set CFLAG_PREPARED.
8179 */
8180 cmd->cmd_flags |= CFLAG_PREPARED;
8181 mptsas_waitq_delete(mpt, cmd);
8182 mptsas_start_passthru(mpt, cmd);
8183 }
8184 cmd = next_cmd;
8185 continue;
8186 }
8187 if (cmd->cmd_flags & CFLAG_CONFIG) {
8188 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8189 /*
8190 * Send the config page request and delete it
8191 * from the waitq.
8192 */
8193 cmd->cmd_flags |= CFLAG_PREPARED;
8194 mptsas_waitq_delete(mpt, cmd);
8195 mptsas_start_config_page_access(mpt, cmd);
8196 }
8197 cmd = next_cmd;
8198 continue;
8199 }
8200 if (cmd->cmd_flags & CFLAG_FW_DIAG) {
8201 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8202 /*
8203 * Send the FW Diag request and delete if from
8204 * the waitq.
8205 */
8206 cmd->cmd_flags |= CFLAG_PREPARED;
8207 mptsas_waitq_delete(mpt, cmd);
8208 mptsas_start_diag(mpt, cmd);
8209 }
8210 cmd = next_cmd;
8211 continue;
8212 }
8213
8214 ptgt = cmd->cmd_tgt_addr;
8215 if (ptgt && (ptgt->m_t_throttle == DRAIN_THROTTLE) &&
8216 (ptgt->m_t_ncmds == 0)) {
8217 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8218 }
8219 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
8220 (ptgt && (ptgt->m_reset_delay == 0)) &&
8221 (ptgt && (ptgt->m_t_ncmds <
8222 ptgt->m_t_throttle))) {
8223 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8224 mptsas_waitq_delete(mpt, cmd);
8225 (void) mptsas_start_cmd(mpt, cmd);
8226 }
8227 }
8228 cmd = next_cmd;
8229 }
8230 }
8231 /*
8232 * Cmds are queued if tran_start() doesn't get the m_mutexlock(no wait).
8233 * Accept all those queued cmds before new cmd is accept so that the
8234 * cmds are sent in order.
8235 */
8236 static void
8237 mptsas_accept_tx_waitq(mptsas_t *mpt)
8238 {
8239 mptsas_cmd_t *cmd;
8240
8241 ASSERT(mutex_owned(&mpt->m_mutex));
8242 ASSERT(mutex_owned(&mpt->m_tx_waitq_mutex));
8243
8244 /*
8245 * A Bus Reset could occur at any time and flush the tx_waitq,
8246 * so we cannot count on the tx_waitq to contain even one cmd.
8247 * And when the m_tx_waitq_mutex is released and run
8248 * mptsas_accept_pkt(), the tx_waitq may be flushed.
8249 */
8250 cmd = mpt->m_tx_waitq;
8251 for (;;) {
8252 if ((cmd = mpt->m_tx_waitq) == NULL) {
8253 mpt->m_tx_draining = 0;
8254 break;
8255 }
8256 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL) {
8257 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8258 }
8259 cmd->cmd_linkp = NULL;
8260 mutex_exit(&mpt->m_tx_waitq_mutex);
8261 if (mptsas_accept_pkt(mpt, cmd) != TRAN_ACCEPT)
8262 cmn_err(CE_WARN, "mpt: mptsas_accept_tx_waitq: failed "
8263 "to accept cmd on queue\n");
8264 mutex_enter(&mpt->m_tx_waitq_mutex);
8265 }
8266 }
8267
8268
8269 /*
8270 * mpt tag type lookup
8271 */
8272 static char mptsas_tag_lookup[] =
8273 {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
8274
8275 static int
8276 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8277 {
8278 struct scsi_pkt *pkt = CMD2PKT(cmd);
8279 uint32_t control = 0;
8280 caddr_t mem;
8281 pMpi2SCSIIORequest_t io_request;
8282 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
8283 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
8284 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8285 uint16_t SMID, io_flags = 0;
8286 uint32_t request_desc_low, request_desc_high;
8287 mptsas_cmd_t *c;
8288
8289 NDBG1(("mptsas_start_cmd: cmd=0x%p, flags 0x%x", (void *)cmd,
8290 cmd->cmd_flags));
8291
8292 /*
8293 * Set SMID and increment index. Rollover to 1 instead of 0 if index
8294 * is at the max. 0 is an invalid SMID, so we call the first index 1.
8295 */
8296 SMID = cmd->cmd_slot;
8297
8298 /*
8299 * It is possible for back to back device reset to
8300 * happen before the reset delay has expired. That's
8301 * ok, just let the device reset go out on the bus.
8302 */
8303 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8304 ASSERT(ptgt->m_reset_delay == 0);
8305 }
8306
8307 /*
8308 * if a non-tagged cmd is submitted to an active tagged target
8309 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
8310 * to be untagged
8311 */
8312 if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
8313 (ptgt->m_t_ncmds > 1) &&
8314 ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
8315 (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
8316 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8317 NDBG23(("target=%d, untagged cmd, start draining\n",
8318 ptgt->m_devhdl));
8319
8320 if (ptgt->m_reset_delay == 0) {
8321 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
8322 }
8323
8324 mptsas_remove_cmd(mpt, cmd);
8325 cmd->cmd_pkt_flags |= FLAG_HEAD;
8326 mptsas_waitq_add(mpt, cmd);
8327 }
8328 return (DDI_FAILURE);
8329 }
8330
8331 /*
8332 * Set correct tag bits.
8333 */
8334 if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
8335 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
8336 FLAG_TAGMASK) >> 12)]) {
8337 case MSG_SIMPLE_QTAG:
8338 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8339 break;
8340 case MSG_HEAD_QTAG:
8341 control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
8342 break;
8343 case MSG_ORDERED_QTAG:
8344 control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
8345 break;
8346 default:
8347 mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
8348 break;
8349 }
8350 } else {
8351 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
8352 ptgt->m_t_throttle = 1;
8353 }
8354 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8355 }
8356
8357 if (cmd->cmd_pkt_flags & FLAG_TLR) {
8358 control |= MPI2_SCSIIO_CONTROL_TLR_ON;
8359 }
8360
8361 mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
8362 io_request = (pMpi2SCSIIORequest_t)mem;
8363
8364 bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
8365 ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
8366 (MPI2_SCSI_IO_REQUEST, SGL) / 4);
8367 mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
8368 MPI2_FUNCTION_SCSI_IO_REQUEST);
8369
8370 (void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
8371 io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
8372
8373 io_flags = cmd->cmd_cdblen;
8374 if (mptsas_use_fastpath &&
8375 ptgt->m_io_flags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
8376 io_flags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
8377 request_desc_low = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
8378 } else {
8379 request_desc_low = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
8380 }
8381 ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
8382 /*
8383 * setup the Scatter/Gather DMA list for this request
8384 */
8385 if (cmd->cmd_cookiec > 0) {
8386 mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
8387 } else {
8388 ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
8389 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
8390 MPI2_SGE_FLAGS_END_OF_BUFFER |
8391 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
8392 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
8393 }
8394
8395 /*
8396 * save ARQ information
8397 */
8398 ddi_put8(acc_hdl, &io_request->SenseBufferLength, cmd->cmd_rqslen);
8399 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
8400 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
8401 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8402 cmd->cmd_ext_arqcookie.dmac_address);
8403 } else {
8404 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8405 cmd->cmd_arqcookie.dmac_address);
8406 }
8407
8408 ddi_put32(acc_hdl, &io_request->Control, control);
8409
8410 NDBG31(("starting message=%d(0x%p), with cmd=0x%p",
8411 SMID, (void *)io_request, (void *)cmd));
8412
8413 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
8414
8415 /*
8416 * Build request descriptor and write it to the request desc post reg.
8417 */
8418 request_desc_low |= (SMID << 16);
8419 request_desc_high = ptgt->m_devhdl << 16;
8420 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
8421
8422 /*
8423 * Start timeout.
8424 */
8425 cmd->cmd_active_expiration =
8426 gethrtime() + (hrtime_t)pkt->pkt_time * NANOSEC;
8427 #ifdef MPTSAS_TEST
8428 /*
8429 * Force timeouts to happen immediately.
8430 */
8431 if (mptsas_test_timeouts)
8432 cmd->cmd_active_expiration = gethrtime();
8433 #endif
8434 c = TAILQ_FIRST(&ptgt->m_active_cmdq);
8435 if (c == NULL ||
8436 c->cmd_active_expiration < cmd->cmd_active_expiration) {
8437 /*
8438 * Common case is that this is the last pending expiration
8439 * (or queue is empty). Insert at head of the queue.
8440 */
8441 TAILQ_INSERT_HEAD(&ptgt->m_active_cmdq, cmd, cmd_active_link);
8442 } else {
8443 /*
8444 * Queue is not empty and first element expires later than
8445 * this command. Search for element expiring sooner.
8446 */
8447 while ((c = TAILQ_NEXT(c, cmd_active_link)) != NULL) {
8448 if (c->cmd_active_expiration <
8449 cmd->cmd_active_expiration) {
8450 TAILQ_INSERT_BEFORE(c, cmd, cmd_active_link);
8451 break;
8452 }
8453 }
8454 if (c == NULL) {
8455 /*
8456 * No element found expiring sooner, append to
8457 * non-empty queue.
8458 */
8459 TAILQ_INSERT_TAIL(&ptgt->m_active_cmdq, cmd,
8460 cmd_active_link);
8461 }
8462 }
8463
8464 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8465 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8466 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8467 return (DDI_FAILURE);
8468 }
8469 return (DDI_SUCCESS);
8470 }
8471
8472 /*
8473 * Select a helper thread to handle current doneq
8474 */
8475 static void
8476 mptsas_deliver_doneq_thread(mptsas_t *mpt)
8477 {
8478 uint64_t t, i;
8479 uint32_t min = 0xffffffff;
8480 mptsas_doneq_thread_list_t *item;
8481
8482 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
8483 item = &mpt->m_doneq_thread_id[i];
8484 /*
8485 * If the completed command on help thread[i] less than
8486 * doneq_thread_threshold, then pick the thread[i]. Otherwise
8487 * pick a thread which has least completed command.
8488 */
8489
8490 mutex_enter(&item->mutex);
8491 if (item->len < mpt->m_doneq_thread_threshold) {
8492 t = i;
8493 mutex_exit(&item->mutex);
8494 break;
8495 }
8496 if (item->len < min) {
8497 min = item->len;
8498 t = i;
8499 }
8500 mutex_exit(&item->mutex);
8501 }
8502 mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
8503 mptsas_doneq_mv(mpt, t);
8504 cv_signal(&mpt->m_doneq_thread_id[t].cv);
8505 mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
8506 }
8507
8508 /*
8509 * move the current global doneq to the doneq of thead[t]
8510 */
8511 static void
8512 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
8513 {
8514 mptsas_cmd_t *cmd;
8515 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8516
8517 ASSERT(mutex_owned(&item->mutex));
8518 while ((cmd = mpt->m_doneq) != NULL) {
8519 if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
8520 mpt->m_donetail = &mpt->m_doneq;
8521 }
8522 cmd->cmd_linkp = NULL;
8523 *item->donetail = cmd;
8524 item->donetail = &cmd->cmd_linkp;
8525 mpt->m_doneq_len--;
8526 item->len++;
8527 }
8528 }
8529
8530 void
8531 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
8532 {
8533 struct scsi_pkt *pkt = CMD2PKT(cmd);
8534
8535 /* Check all acc and dma handles */
8536 if ((mptsas_check_acc_handle(mpt->m_datap) !=
8537 DDI_SUCCESS) ||
8538 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
8539 DDI_SUCCESS) ||
8540 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
8541 DDI_SUCCESS) ||
8542 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
8543 DDI_SUCCESS) ||
8544 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
8545 DDI_SUCCESS) ||
8546 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
8547 DDI_SUCCESS) ||
8548 (mptsas_check_acc_handle(mpt->m_config_handle) !=
8549 DDI_SUCCESS)) {
8550 ddi_fm_service_impact(mpt->m_dip,
8551 DDI_SERVICE_UNAFFECTED);
8552 ddi_fm_acc_err_clear(mpt->m_config_handle,
8553 DDI_FME_VER0);
8554 pkt->pkt_reason = CMD_TRAN_ERR;
8555 pkt->pkt_statistics = 0;
8556 }
8557 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
8558 DDI_SUCCESS) ||
8559 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
8560 DDI_SUCCESS) ||
8561 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
8562 DDI_SUCCESS) ||
8563 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
8564 DDI_SUCCESS) ||
8565 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
8566 DDI_SUCCESS)) {
8567 ddi_fm_service_impact(mpt->m_dip,
8568 DDI_SERVICE_UNAFFECTED);
8569 pkt->pkt_reason = CMD_TRAN_ERR;
8570 pkt->pkt_statistics = 0;
8571 }
8572 if (cmd->cmd_dmahandle &&
8573 (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
8574 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8575 pkt->pkt_reason = CMD_TRAN_ERR;
8576 pkt->pkt_statistics = 0;
8577 }
8578 if ((cmd->cmd_extra_frames &&
8579 ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
8580 DDI_SUCCESS) ||
8581 (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
8582 DDI_SUCCESS)))) {
8583 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8584 pkt->pkt_reason = CMD_TRAN_ERR;
8585 pkt->pkt_statistics = 0;
8586 }
8587 if (cmd->cmd_arqhandle &&
8588 (mptsas_check_dma_handle(cmd->cmd_arqhandle) != DDI_SUCCESS)) {
8589 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8590 pkt->pkt_reason = CMD_TRAN_ERR;
8591 pkt->pkt_statistics = 0;
8592 }
8593 if (cmd->cmd_ext_arqhandle &&
8594 (mptsas_check_dma_handle(cmd->cmd_ext_arqhandle) != DDI_SUCCESS)) {
8595 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8596 pkt->pkt_reason = CMD_TRAN_ERR;
8597 pkt->pkt_statistics = 0;
8598 }
8599 }
8600
8601 /*
8602 * These routines manipulate the queue of commands that
8603 * are waiting for their completion routines to be called.
8604 * The queue is usually in FIFO order but on an MP system
8605 * it's possible for the completion routines to get out
8606 * of order. If that's a problem you need to add a global
8607 * mutex around the code that calls the completion routine
8608 * in the interrupt handler.
8609 */
8610 static void
8611 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8612 {
8613 struct scsi_pkt *pkt = CMD2PKT(cmd);
8614
8615 NDBG31(("mptsas_doneq_add: cmd=0x%p", (void *)cmd));
8616
8617 ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
8618 cmd->cmd_linkp = NULL;
8619 cmd->cmd_flags |= CFLAG_FINISHED;
8620 cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
8621
8622 mptsas_fma_check(mpt, cmd);
8623
8624 /*
8625 * only add scsi pkts that have completion routines to
8626 * the doneq. no intr cmds do not have callbacks.
8627 */
8628 if (pkt && (pkt->pkt_comp)) {
8629 *mpt->m_donetail = cmd;
8630 mpt->m_donetail = &cmd->cmd_linkp;
8631 mpt->m_doneq_len++;
8632 }
8633 }
8634
8635 static mptsas_cmd_t *
8636 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
8637 {
8638 mptsas_cmd_t *cmd;
8639 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8640
8641 /* pop one off the done queue */
8642 if ((cmd = item->doneq) != NULL) {
8643 /* if the queue is now empty fix the tail pointer */
8644 NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
8645 if ((item->doneq = cmd->cmd_linkp) == NULL) {
8646 item->donetail = &item->doneq;
8647 }
8648 cmd->cmd_linkp = NULL;
8649 item->len--;
8650 }
8651 return (cmd);
8652 }
8653
8654 static void
8655 mptsas_doneq_empty(mptsas_t *mpt)
8656 {
8657 if (mpt->m_doneq && !mpt->m_in_callback) {
8658 mptsas_cmd_t *cmd, *next;
8659 struct scsi_pkt *pkt;
8660
8661 mpt->m_in_callback = 1;
8662 cmd = mpt->m_doneq;
8663 mpt->m_doneq = NULL;
8664 mpt->m_donetail = &mpt->m_doneq;
8665 mpt->m_doneq_len = 0;
8666
8667 mutex_exit(&mpt->m_mutex);
8668 /*
8669 * run the completion routines of all the
8670 * completed commands
8671 */
8672 while (cmd != NULL) {
8673 next = cmd->cmd_linkp;
8674 cmd->cmd_linkp = NULL;
8675 /* run this command's completion routine */
8676 cmd->cmd_flags |= CFLAG_COMPLETED;
8677 pkt = CMD2PKT(cmd);
8678 mptsas_pkt_comp(pkt, cmd);
8679 cmd = next;
8680 }
8681 mutex_enter(&mpt->m_mutex);
8682 mpt->m_in_callback = 0;
8683 }
8684 }
8685
8686 /*
8687 * These routines manipulate the target's queue of pending requests
8688 */
8689 void
8690 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8691 {
8692 NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
8693 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8694 cmd->cmd_queued = TRUE;
8695 if (ptgt)
8696 ptgt->m_t_nwait++;
8697 if (cmd->cmd_pkt_flags & FLAG_HEAD) {
8698 if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
8699 mpt->m_waitqtail = &cmd->cmd_linkp;
8700 }
8701 mpt->m_waitq = cmd;
8702 } else {
8703 cmd->cmd_linkp = NULL;
8704 *(mpt->m_waitqtail) = cmd;
8705 mpt->m_waitqtail = &cmd->cmd_linkp;
8706 }
8707 }
8708
8709 static mptsas_cmd_t *
8710 mptsas_waitq_rm(mptsas_t *mpt)
8711 {
8712 mptsas_cmd_t *cmd;
8713 mptsas_target_t *ptgt;
8714 NDBG7(("mptsas_waitq_rm"));
8715
8716 MPTSAS_WAITQ_RM(mpt, cmd);
8717
8718 NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
8719 if (cmd) {
8720 ptgt = cmd->cmd_tgt_addr;
8721 if (ptgt) {
8722 ptgt->m_t_nwait--;
8723 ASSERT(ptgt->m_t_nwait >= 0);
8724 }
8725 }
8726 return (cmd);
8727 }
8728
8729 /*
8730 * remove specified cmd from the middle of the wait queue.
8731 */
8732 static void
8733 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8734 {
8735 mptsas_cmd_t *prevp = mpt->m_waitq;
8736 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8737
8738 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8739 (void *)mpt, (void *)cmd));
8740 if (ptgt) {
8741 ptgt->m_t_nwait--;
8742 ASSERT(ptgt->m_t_nwait >= 0);
8743 }
8744
8745 if (prevp == cmd) {
8746 if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
8747 mpt->m_waitqtail = &mpt->m_waitq;
8748
8749 cmd->cmd_linkp = NULL;
8750 cmd->cmd_queued = FALSE;
8751 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8752 (void *)mpt, (void *)cmd));
8753 return;
8754 }
8755
8756 while (prevp != NULL) {
8757 if (prevp->cmd_linkp == cmd) {
8758 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8759 mpt->m_waitqtail = &prevp->cmd_linkp;
8760
8761 cmd->cmd_linkp = NULL;
8762 cmd->cmd_queued = FALSE;
8763 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8764 (void *)mpt, (void *)cmd));
8765 return;
8766 }
8767 prevp = prevp->cmd_linkp;
8768 }
8769 cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
8770 }
8771
8772 static mptsas_cmd_t *
8773 mptsas_tx_waitq_rm(mptsas_t *mpt)
8774 {
8775 mptsas_cmd_t *cmd;
8776 NDBG7(("mptsas_tx_waitq_rm"));
8777
8778 MPTSAS_TX_WAITQ_RM(mpt, cmd);
8779
8780 NDBG7(("mptsas_tx_waitq_rm: cmd=0x%p", (void *)cmd));
8781
8782 return (cmd);
8783 }
8784
8785 /*
8786 * remove specified cmd from the middle of the tx_waitq.
8787 */
8788 static void
8789 mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8790 {
8791 mptsas_cmd_t *prevp = mpt->m_tx_waitq;
8792
8793 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8794 (void *)mpt, (void *)cmd));
8795
8796 if (prevp == cmd) {
8797 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL)
8798 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8799
8800 cmd->cmd_linkp = NULL;
8801 cmd->cmd_queued = FALSE;
8802 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8803 (void *)mpt, (void *)cmd));
8804 return;
8805 }
8806
8807 while (prevp != NULL) {
8808 if (prevp->cmd_linkp == cmd) {
8809 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8810 mpt->m_tx_waitqtail = &prevp->cmd_linkp;
8811
8812 cmd->cmd_linkp = NULL;
8813 cmd->cmd_queued = FALSE;
8814 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8815 (void *)mpt, (void *)cmd));
8816 return;
8817 }
8818 prevp = prevp->cmd_linkp;
8819 }
8820 cmn_err(CE_PANIC, "mpt: mptsas_tx_waitq_delete: queue botch");
8821 }
8822
8823 /*
8824 * device and bus reset handling
8825 *
8826 * Notes:
8827 * - RESET_ALL: reset the controller
8828 * - RESET_TARGET: reset the target specified in scsi_address
8829 */
8830 static int
8831 mptsas_scsi_reset(struct scsi_address *ap, int level)
8832 {
8833 mptsas_t *mpt = ADDR2MPT(ap);
8834 int rval;
8835 mptsas_tgt_private_t *tgt_private;
8836 mptsas_target_t *ptgt = NULL;
8837
8838 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
8839 ptgt = tgt_private->t_private;
8840 if (ptgt == NULL) {
8841 return (FALSE);
8842 }
8843 NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,
8844 level));
8845
8846 mutex_enter(&mpt->m_mutex);
8847 /*
8848 * if we are not in panic set up a reset delay for this target
8849 */
8850 if (!ddi_in_panic()) {
8851 mptsas_setup_bus_reset_delay(mpt);
8852 } else {
8853 drv_usecwait(mpt->m_scsi_reset_delay * 1000);
8854 }
8855 rval = mptsas_do_scsi_reset(mpt, ptgt->m_devhdl);
8856 mutex_exit(&mpt->m_mutex);
8857
8858 /*
8859 * The transport layer expect to only see TRUE and
8860 * FALSE. Therefore, we will adjust the return value
8861 * if mptsas_do_scsi_reset returns FAILED.
8862 */
8863 if (rval == FAILED)
8864 rval = FALSE;
8865 return (rval);
8866 }
8867
8868 static int
8869 mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl)
8870 {
8871 int rval = FALSE;
8872 uint8_t config, disk;
8873
8874 ASSERT(mutex_owned(&mpt->m_mutex));
8875
8876 if (mptsas_debug_resets) {
8877 mptsas_log(mpt, CE_WARN, "mptsas_do_scsi_reset: target=%d",
8878 devhdl);
8879 }
8880
8881 /*
8882 * Issue a Target Reset message to the target specified but not to a
8883 * disk making up a raid volume. Just look through the RAID config
8884 * Phys Disk list of DevHandles. If the target's DevHandle is in this
8885 * list, then don't reset this target.
8886 */
8887 for (config = 0; config < mpt->m_num_raid_configs; config++) {
8888 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
8889 if (devhdl == mpt->m_raidconfig[config].
8890 m_physdisk_devhdl[disk]) {
8891 return (TRUE);
8892 }
8893 }
8894 }
8895
8896 rval = mptsas_ioc_task_management(mpt,
8897 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, devhdl, 0, NULL, 0, 0);
8898
8899 mptsas_doneq_empty(mpt);
8900 return (rval);
8901 }
8902
8903 static int
8904 mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
8905 void (*callback)(caddr_t), caddr_t arg)
8906 {
8907 mptsas_t *mpt = ADDR2MPT(ap);
8908
8909 NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap->a_target));
8910
8911 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
8912 &mpt->m_mutex, &mpt->m_reset_notify_listf));
8913 }
8914
8915 static int
8916 mptsas_get_name(struct scsi_device *sd, char *name, int len)
8917 {
8918 dev_info_t *lun_dip = NULL;
8919
8920 ASSERT(sd != NULL);
8921 ASSERT(name != NULL);
8922 lun_dip = sd->sd_dev;
8923 ASSERT(lun_dip != NULL);
8924
8925 if (mptsas_name_child(lun_dip, name, len) == DDI_SUCCESS) {
8926 return (1);
8927 } else {
8928 return (0);
8929 }
8930 }
8931
8932 static int
8933 mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len)
8934 {
8935 return (mptsas_get_name(sd, name, len));
8936 }
8937
8938 void
8939 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
8940 {
8941
8942 NDBG25(("mptsas_set_throttle: throttle=%x", what));
8943
8944 /*
8945 * if the bus is draining/quiesced, no changes to the throttles
8946 * are allowed. Not allowing change of throttles during draining
8947 * limits error recovery but will reduce draining time
8948 *
8949 * all throttles should have been set to HOLD_THROTTLE
8950 */
8951 if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
8952 return;
8953 }
8954
8955 if (what == HOLD_THROTTLE) {
8956 ptgt->m_t_throttle = HOLD_THROTTLE;
8957 } else if (ptgt->m_reset_delay == 0) {
8958 ptgt->m_t_throttle = what;
8959 }
8960 }
8961
8962 /*
8963 * Clean up from a device reset.
8964 * For the case of target reset, this function clears the waitq of all
8965 * commands for a particular target. For the case of abort task set, this
8966 * function clears the waitq of all commonds for a particular target/lun.
8967 */
8968 static void
8969 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
8970 {
8971 mptsas_slots_t *slots = mpt->m_active;
8972 mptsas_cmd_t *cmd, *next_cmd;
8973 int slot;
8974 uchar_t reason;
8975 uint_t stat;
8976 hrtime_t timestamp;
8977
8978 NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
8979
8980 timestamp = gethrtime();
8981
8982 /*
8983 * Make sure the I/O Controller has flushed all cmds
8984 * that are associated with this target for a target reset
8985 * and target/lun for abort task set.
8986 * Account for TM requests, which use the last SMID.
8987 */
8988 for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
8989 if ((cmd = slots->m_slot[slot]) == NULL)
8990 continue;
8991 reason = CMD_RESET;
8992 stat = STAT_DEV_RESET;
8993 switch (tasktype) {
8994 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8995 if (Tgt(cmd) == target) {
8996 if (cmd->cmd_active_expiration <= timestamp) {
8997 /*
8998 * When timeout requested, propagate
8999 * proper reason and statistics to
9000 * target drivers.
9001 */
9002 reason = CMD_TIMEOUT;
9003 stat |= STAT_TIMEOUT;
9004 }
9005 NDBG25(("mptsas_flush_target discovered non-"
9006 "NULL cmd in slot %d, tasktype 0x%x", slot,
9007 tasktype));
9008 mptsas_dump_cmd(mpt, cmd);
9009 mptsas_remove_cmd(mpt, cmd);
9010 mptsas_set_pkt_reason(mpt, cmd, reason, stat);
9011 mptsas_doneq_add(mpt, cmd);
9012 }
9013 break;
9014 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9015 reason = CMD_ABORTED;
9016 stat = STAT_ABORTED;
9017 /*FALLTHROUGH*/
9018 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9019 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9020
9021 NDBG25(("mptsas_flush_target discovered non-"
9022 "NULL cmd in slot %d, tasktype 0x%x", slot,
9023 tasktype));
9024 mptsas_dump_cmd(mpt, cmd);
9025 mptsas_remove_cmd(mpt, cmd);
9026 mptsas_set_pkt_reason(mpt, cmd, reason,
9027 stat);
9028 mptsas_doneq_add(mpt, cmd);
9029 }
9030 break;
9031 default:
9032 break;
9033 }
9034 }
9035
9036 /*
9037 * Flush the waitq and tx_waitq of this target's cmds
9038 */
9039 cmd = mpt->m_waitq;
9040
9041 reason = CMD_RESET;
9042 stat = STAT_DEV_RESET;
9043
9044 switch (tasktype) {
9045 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
9046 while (cmd != NULL) {
9047 next_cmd = cmd->cmd_linkp;
9048 if (Tgt(cmd) == target) {
9049 mptsas_waitq_delete(mpt, cmd);
9050 mptsas_set_pkt_reason(mpt, cmd,
9051 reason, stat);
9052 mptsas_doneq_add(mpt, cmd);
9053 }
9054 cmd = next_cmd;
9055 }
9056 mutex_enter(&mpt->m_tx_waitq_mutex);
9057 cmd = mpt->m_tx_waitq;
9058 while (cmd != NULL) {
9059 next_cmd = cmd->cmd_linkp;
9060 if (Tgt(cmd) == target) {
9061 mptsas_tx_waitq_delete(mpt, cmd);
9062 mutex_exit(&mpt->m_tx_waitq_mutex);
9063 mptsas_set_pkt_reason(mpt, cmd,
9064 reason, stat);
9065 mptsas_doneq_add(mpt, cmd);
9066 mutex_enter(&mpt->m_tx_waitq_mutex);
9067 }
9068 cmd = next_cmd;
9069 }
9070 mutex_exit(&mpt->m_tx_waitq_mutex);
9071 break;
9072 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9073 reason = CMD_ABORTED;
9074 stat = STAT_ABORTED;
9075 /*FALLTHROUGH*/
9076 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9077 while (cmd != NULL) {
9078 next_cmd = cmd->cmd_linkp;
9079 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9080 mptsas_waitq_delete(mpt, cmd);
9081 mptsas_set_pkt_reason(mpt, cmd,
9082 reason, stat);
9083 mptsas_doneq_add(mpt, cmd);
9084 }
9085 cmd = next_cmd;
9086 }
9087 mutex_enter(&mpt->m_tx_waitq_mutex);
9088 cmd = mpt->m_tx_waitq;
9089 while (cmd != NULL) {
9090 next_cmd = cmd->cmd_linkp;
9091 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9092 mptsas_tx_waitq_delete(mpt, cmd);
9093 mutex_exit(&mpt->m_tx_waitq_mutex);
9094 mptsas_set_pkt_reason(mpt, cmd,
9095 reason, stat);
9096 mptsas_doneq_add(mpt, cmd);
9097 mutex_enter(&mpt->m_tx_waitq_mutex);
9098 }
9099 cmd = next_cmd;
9100 }
9101 mutex_exit(&mpt->m_tx_waitq_mutex);
9102 break;
9103 default:
9104 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
9105 tasktype);
9106 break;
9107 }
9108 }
9109
9110 /*
9111 * Clean up hba state, abort all outstanding command and commands in waitq
9112 * reset timeout of all targets.
9113 */
9114 static void
9115 mptsas_flush_hba(mptsas_t *mpt)
9116 {
9117 mptsas_slots_t *slots = mpt->m_active;
9118 mptsas_cmd_t *cmd;
9119 int slot;
9120
9121 NDBG25(("mptsas_flush_hba"));
9122
9123 /*
9124 * The I/O Controller should have already sent back
9125 * all commands via the scsi I/O reply frame. Make
9126 * sure all commands have been flushed.
9127 * Account for TM request, which use the last SMID.
9128 */
9129 for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
9130 if ((cmd = slots->m_slot[slot]) == NULL)
9131 continue;
9132
9133 if (cmd->cmd_flags & CFLAG_CMDIOC) {
9134 /*
9135 * Need to make sure to tell everyone that might be
9136 * waiting on this command that it's going to fail. If
9137 * we get here, this command will never timeout because
9138 * the active command table is going to be re-allocated,
9139 * so there will be nothing to check against a time out.
9140 * Instead, mark the command as failed due to reset.
9141 */
9142 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
9143 STAT_BUS_RESET);
9144 if ((cmd->cmd_flags &
9145 (CFLAG_PASSTHRU | CFLAG_CONFIG | CFLAG_FW_DIAG))) {
9146 cmd->cmd_flags |= CFLAG_FINISHED;
9147 cv_broadcast(&mpt->m_passthru_cv);
9148 cv_broadcast(&mpt->m_config_cv);
9149 cv_broadcast(&mpt->m_fw_diag_cv);
9150 }
9151 continue;
9152 }
9153
9154 NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
9155 slot));
9156 mptsas_dump_cmd(mpt, cmd);
9157
9158 mptsas_remove_cmd(mpt, cmd);
9159 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9160 mptsas_doneq_add(mpt, cmd);
9161 }
9162
9163 /*
9164 * Flush the waitq.
9165 */
9166 while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
9167 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9168 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9169 (cmd->cmd_flags & CFLAG_CONFIG) ||
9170 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9171 cmd->cmd_flags |= CFLAG_FINISHED;
9172 cv_broadcast(&mpt->m_passthru_cv);
9173 cv_broadcast(&mpt->m_config_cv);
9174 cv_broadcast(&mpt->m_fw_diag_cv);
9175 } else {
9176 mptsas_doneq_add(mpt, cmd);
9177 }
9178 }
9179
9180 /*
9181 * Flush the tx_waitq
9182 */
9183 mutex_enter(&mpt->m_tx_waitq_mutex);
9184 while ((cmd = mptsas_tx_waitq_rm(mpt)) != NULL) {
9185 mutex_exit(&mpt->m_tx_waitq_mutex);
9186 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9187 mptsas_doneq_add(mpt, cmd);
9188 mutex_enter(&mpt->m_tx_waitq_mutex);
9189 }
9190 mutex_exit(&mpt->m_tx_waitq_mutex);
9191
9192 /*
9193 * Drain the taskqs prior to reallocating resources.
9194 */
9195 mutex_exit(&mpt->m_mutex);
9196 ddi_taskq_wait(mpt->m_event_taskq);
9197 ddi_taskq_wait(mpt->m_dr_taskq);
9198 mutex_enter(&mpt->m_mutex);
9199 }
9200
9201 /*
9202 * set pkt_reason and OR in pkt_statistics flag
9203 */
9204 static void
9205 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
9206 uint_t stat)
9207 {
9208 #ifndef __lock_lint
9209 _NOTE(ARGUNUSED(mpt))
9210 #endif
9211
9212 NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
9213 (void *)cmd, reason, stat));
9214
9215 if (cmd) {
9216 if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
9217 cmd->cmd_pkt->pkt_reason = reason;
9218 }
9219 cmd->cmd_pkt->pkt_statistics |= stat;
9220 }
9221 }
9222
9223 static void
9224 mptsas_start_watch_reset_delay()
9225 {
9226 NDBG22(("mptsas_start_watch_reset_delay"));
9227
9228 mutex_enter(&mptsas_global_mutex);
9229 if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
9230 mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
9231 drv_usectohz((clock_t)
9232 MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
9233 ASSERT(mptsas_reset_watch != NULL);
9234 }
9235 mutex_exit(&mptsas_global_mutex);
9236 }
9237
9238 static void
9239 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
9240 {
9241 mptsas_target_t *ptgt = NULL;
9242
9243 ASSERT(MUTEX_HELD(&mpt->m_mutex));
9244
9245 NDBG22(("mptsas_setup_bus_reset_delay"));
9246 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9247 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9248 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9249 ptgt->m_reset_delay = mpt->m_scsi_reset_delay;
9250 }
9251
9252 mptsas_start_watch_reset_delay();
9253 }
9254
9255 /*
9256 * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
9257 * mpt instance for active reset delays
9258 */
9259 static void
9260 mptsas_watch_reset_delay(void *arg)
9261 {
9262 #ifndef __lock_lint
9263 _NOTE(ARGUNUSED(arg))
9264 #endif
9265
9266 mptsas_t *mpt;
9267 int not_done = 0;
9268
9269 NDBG22(("mptsas_watch_reset_delay"));
9270
9271 mutex_enter(&mptsas_global_mutex);
9272 mptsas_reset_watch = 0;
9273 mutex_exit(&mptsas_global_mutex);
9274 rw_enter(&mptsas_global_rwlock, RW_READER);
9275 for (mpt = mptsas_head; mpt != NULL; mpt = mpt->m_next) {
9276 if (mpt->m_tran == 0) {
9277 continue;
9278 }
9279 mutex_enter(&mpt->m_mutex);
9280 not_done += mptsas_watch_reset_delay_subr(mpt);
9281 mutex_exit(&mpt->m_mutex);
9282 }
9283 rw_exit(&mptsas_global_rwlock);
9284
9285 if (not_done) {
9286 mptsas_start_watch_reset_delay();
9287 }
9288 }
9289
9290 static int
9291 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
9292 {
9293 int done = 0;
9294 int restart = 0;
9295 mptsas_target_t *ptgt = NULL;
9296
9297 NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
9298
9299 ASSERT(mutex_owned(&mpt->m_mutex));
9300
9301 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9302 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9303 if (ptgt->m_reset_delay != 0) {
9304 ptgt->m_reset_delay -=
9305 MPTSAS_WATCH_RESET_DELAY_TICK;
9306 if (ptgt->m_reset_delay <= 0) {
9307 ptgt->m_reset_delay = 0;
9308 mptsas_set_throttle(mpt, ptgt,
9309 MAX_THROTTLE);
9310 restart++;
9311 } else {
9312 done = -1;
9313 }
9314 }
9315 }
9316
9317 if (restart > 0) {
9318 mptsas_restart_hba(mpt);
9319 }
9320 return (done);
9321 }
9322
9323 #ifdef MPTSAS_TEST
9324 static void
9325 mptsas_test_reset(mptsas_t *mpt, int target)
9326 {
9327 mptsas_target_t *ptgt = NULL;
9328
9329 if (mptsas_rtest == target) {
9330 if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
9331 mptsas_rtest = -1;
9332 }
9333 if (mptsas_rtest == -1) {
9334 NDBG22(("mptsas_test_reset success"));
9335 }
9336 }
9337 }
9338 #endif
9339
9340 /*
9341 * abort handling:
9342 *
9343 * Notes:
9344 * - if pkt is not NULL, abort just that command
9345 * - if pkt is NULL, abort all outstanding commands for target
9346 */
9347 static int
9348 mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
9349 {
9350 mptsas_t *mpt = ADDR2MPT(ap);
9351 int rval;
9352 mptsas_tgt_private_t *tgt_private;
9353 int target, lun;
9354
9355 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
9356 tran_tgt_private;
9357 ASSERT(tgt_private != NULL);
9358 target = tgt_private->t_private->m_devhdl;
9359 lun = tgt_private->t_lun;
9360
9361 NDBG23(("mptsas_scsi_abort: target=%d.%d", target, lun));
9362
9363 mutex_enter(&mpt->m_mutex);
9364 rval = mptsas_do_scsi_abort(mpt, target, lun, pkt);
9365 mutex_exit(&mpt->m_mutex);
9366 return (rval);
9367 }
9368
9369 static int
9370 mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, struct scsi_pkt *pkt)
9371 {
9372 mptsas_cmd_t *sp = NULL;
9373 mptsas_slots_t *slots = mpt->m_active;
9374 int rval = FALSE;
9375
9376 ASSERT(mutex_owned(&mpt->m_mutex));
9377
9378 /*
9379 * Abort the command pkt on the target/lun in ap. If pkt is
9380 * NULL, abort all outstanding commands on that target/lun.
9381 * If you can abort them, return 1, else return 0.
9382 * Each packet that's aborted should be sent back to the target
9383 * driver through the callback routine, with pkt_reason set to
9384 * CMD_ABORTED.
9385 *
9386 * abort cmd pkt on HBA hardware; clean out of outstanding
9387 * command lists, etc.
9388 */
9389 if (pkt != NULL) {
9390 /* abort the specified packet */
9391 sp = PKT2CMD(pkt);
9392
9393 if (sp->cmd_queued) {
9394 NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
9395 (void *)sp));
9396 mptsas_waitq_delete(mpt, sp);
9397 mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
9398 STAT_ABORTED);
9399 mptsas_doneq_add(mpt, sp);
9400 rval = TRUE;
9401 goto done;
9402 }
9403
9404 /*
9405 * Have mpt firmware abort this command
9406 */
9407
9408 if (slots->m_slot[sp->cmd_slot] != NULL) {
9409 rval = mptsas_ioc_task_management(mpt,
9410 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
9411 lun, NULL, 0, 0);
9412
9413 /*
9414 * The transport layer expects only TRUE and FALSE.
9415 * Therefore, if mptsas_ioc_task_management returns
9416 * FAILED we will return FALSE.
9417 */
9418 if (rval == FAILED)
9419 rval = FALSE;
9420 goto done;
9421 }
9422 }
9423
9424 /*
9425 * If pkt is NULL then abort task set
9426 */
9427 rval = mptsas_ioc_task_management(mpt,
9428 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0);
9429
9430 /*
9431 * The transport layer expects only TRUE and FALSE.
9432 * Therefore, if mptsas_ioc_task_management returns
9433 * FAILED we will return FALSE.
9434 */
9435 if (rval == FAILED)
9436 rval = FALSE;
9437
9438 #ifdef MPTSAS_TEST
9439 if (rval && mptsas_test_stop) {
9440 debug_enter("mptsas_do_scsi_abort");
9441 }
9442 #endif
9443
9444 done:
9445 mptsas_doneq_empty(mpt);
9446 return (rval);
9447 }
9448
9449 /*
9450 * capability handling:
9451 * (*tran_getcap). Get the capability named, and return its value.
9452 */
9453 static int
9454 mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
9455 {
9456 mptsas_t *mpt = ADDR2MPT(ap);
9457 int ckey;
9458 int rval = FALSE;
9459
9460 NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
9461 ap->a_target, cap, tgtonly));
9462
9463 mutex_enter(&mpt->m_mutex);
9464
9465 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9466 mutex_exit(&mpt->m_mutex);
9467 return (UNDEFINED);
9468 }
9469
9470 switch (ckey) {
9471 case SCSI_CAP_DMA_MAX:
9472 rval = (int)mpt->m_msg_dma_attr.dma_attr_maxxfer;
9473 break;
9474 case SCSI_CAP_ARQ:
9475 rval = TRUE;
9476 break;
9477 case SCSI_CAP_MSG_OUT:
9478 case SCSI_CAP_PARITY:
9479 case SCSI_CAP_UNTAGGED_QING:
9480 rval = TRUE;
9481 break;
9482 case SCSI_CAP_TAGGED_QING:
9483 rval = TRUE;
9484 break;
9485 case SCSI_CAP_RESET_NOTIFICATION:
9486 rval = TRUE;
9487 break;
9488 case SCSI_CAP_LINKED_CMDS:
9489 rval = FALSE;
9490 break;
9491 case SCSI_CAP_QFULL_RETRIES:
9492 rval = ((mptsas_tgt_private_t *)(ap->a_hba_tran->
9493 tran_tgt_private))->t_private->m_qfull_retries;
9494 break;
9495 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9496 rval = drv_hztousec(((mptsas_tgt_private_t *)
9497 (ap->a_hba_tran->tran_tgt_private))->
9498 t_private->m_qfull_retry_interval) / 1000;
9499 break;
9500 case SCSI_CAP_CDB_LEN:
9501 rval = CDB_GROUP4;
9502 break;
9503 case SCSI_CAP_INTERCONNECT_TYPE:
9504 rval = INTERCONNECT_SAS;
9505 break;
9506 case SCSI_CAP_TRAN_LAYER_RETRIES:
9507 if (mpt->m_ioc_capabilities &
9508 MPI2_IOCFACTS_CAPABILITY_TLR)
9509 rval = TRUE;
9510 else
9511 rval = FALSE;
9512 break;
9513 default:
9514 rval = UNDEFINED;
9515 break;
9516 }
9517
9518 NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
9519
9520 mutex_exit(&mpt->m_mutex);
9521 return (rval);
9522 }
9523
9524 /*
9525 * (*tran_setcap). Set the capability named to the value given.
9526 */
9527 static int
9528 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
9529 {
9530 mptsas_t *mpt = ADDR2MPT(ap);
9531 int ckey;
9532 int rval = FALSE;
9533
9534 NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
9535 ap->a_target, cap, value, tgtonly));
9536
9537 if (!tgtonly) {
9538 return (rval);
9539 }
9540
9541 mutex_enter(&mpt->m_mutex);
9542
9543 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9544 mutex_exit(&mpt->m_mutex);
9545 return (UNDEFINED);
9546 }
9547
9548 switch (ckey) {
9549 case SCSI_CAP_DMA_MAX:
9550 case SCSI_CAP_MSG_OUT:
9551 case SCSI_CAP_PARITY:
9552 case SCSI_CAP_INITIATOR_ID:
9553 case SCSI_CAP_LINKED_CMDS:
9554 case SCSI_CAP_UNTAGGED_QING:
9555 case SCSI_CAP_RESET_NOTIFICATION:
9556 /*
9557 * None of these are settable via
9558 * the capability interface.
9559 */
9560 break;
9561 case SCSI_CAP_ARQ:
9562 /*
9563 * We cannot turn off arq so return false if asked to
9564 */
9565 if (value) {
9566 rval = TRUE;
9567 } else {
9568 rval = FALSE;
9569 }
9570 break;
9571 case SCSI_CAP_TAGGED_QING:
9572 mptsas_set_throttle(mpt, ((mptsas_tgt_private_t *)
9573 (ap->a_hba_tran->tran_tgt_private))->t_private,
9574 MAX_THROTTLE);
9575 rval = TRUE;
9576 break;
9577 case SCSI_CAP_QFULL_RETRIES:
9578 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9579 t_private->m_qfull_retries = (uchar_t)value;
9580 rval = TRUE;
9581 break;
9582 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9583 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9584 t_private->m_qfull_retry_interval =
9585 drv_usectohz(value * 1000);
9586 rval = TRUE;
9587 break;
9588 default:
9589 rval = UNDEFINED;
9590 break;
9591 }
9592 mutex_exit(&mpt->m_mutex);
9593 return (rval);
9594 }
9595
9596 /*
9597 * Utility routine for mptsas_ifsetcap/ifgetcap
9598 */
9599 /*ARGSUSED*/
9600 static int
9601 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp)
9602 {
9603 NDBG24(("mptsas_scsi_capchk: cap=%s", cap));
9604
9605 if (!cap)
9606 return (FALSE);
9607
9608 *cidxp = scsi_hba_lookup_capstr(cap);
9609 return (TRUE);
9610 }
9611
9612 static int
9613 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
9614 {
9615 mptsas_slots_t *old_active = mpt->m_active;
9616 mptsas_slots_t *new_active;
9617 size_t size;
9618
9619 /*
9620 * if there are active commands, then we cannot
9621 * change size of active slots array.
9622 */
9623 ASSERT(mpt->m_ncmds == 0);
9624
9625 size = MPTSAS_SLOTS_SIZE(mpt);
9626 new_active = kmem_zalloc(size, flag);
9627 if (new_active == NULL) {
9628 NDBG1(("new active alloc failed"));
9629 return (-1);
9630 }
9631 /*
9632 * Since SMID 0 is reserved and the TM slot is reserved, the
9633 * number of slots that can be used at any one time is
9634 * m_max_requests - 2.
9635 */
9636 new_active->m_n_normal = (mpt->m_max_requests - 2);
9637 new_active->m_size = size;
9638 new_active->m_rotor = 1;
9639 if (old_active)
9640 mptsas_free_active_slots(mpt);
9641 mpt->m_active = new_active;
9642
9643 return (0);
9644 }
9645
9646 static void
9647 mptsas_free_active_slots(mptsas_t *mpt)
9648 {
9649 mptsas_slots_t *active = mpt->m_active;
9650 size_t size;
9651
9652 if (active == NULL)
9653 return;
9654 size = active->m_size;
9655 kmem_free(active, size);
9656 mpt->m_active = NULL;
9657 }
9658
9659 /*
9660 * Error logging, printing, and debug print routines.
9661 */
9662 static char *mptsas_label = "mpt_sas";
9663
9664 /*PRINTFLIKE3*/
9665 void
9666 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
9667 {
9668 dev_info_t *dev;
9669 va_list ap;
9670
9671 if (mpt) {
9672 dev = mpt->m_dip;
9673 } else {
9674 dev = 0;
9675 }
9676
9677 mutex_enter(&mptsas_log_mutex);
9678
9679 va_start(ap, fmt);
9680 (void) vsprintf(mptsas_log_buf, fmt, ap);
9681 va_end(ap);
9682
9683 if (level == CE_CONT) {
9684 scsi_log(dev, mptsas_label, level, "%s\n", mptsas_log_buf);
9685 } else {
9686 scsi_log(dev, mptsas_label, level, "%s", mptsas_log_buf);
9687 }
9688
9689 mutex_exit(&mptsas_log_mutex);
9690 }
9691
9692 #ifdef MPTSAS_DEBUG
9693 /*
9694 * Use a circular buffer to log messages to private memory.
9695 * Increment idx atomically to minimize risk to miss lines.
9696 * It's fast and does not hold up the proceedings too much.
9697 */
9698 static const size_t mptsas_dbglog_linecnt = MPTSAS_DBGLOG_LINECNT;
9699 static const size_t mptsas_dbglog_linelen = MPTSAS_DBGLOG_LINELEN;
9700 static char mptsas_dbglog_bufs[MPTSAS_DBGLOG_LINECNT][MPTSAS_DBGLOG_LINELEN];
9701 static uint32_t mptsas_dbglog_idx = 0;
9702
9703 /*PRINTFLIKE1*/
9704 void
9705 mptsas_debug_log(char *fmt, ...)
9706 {
9707 va_list ap;
9708 uint32_t idx;
9709
9710 idx = atomic_inc_32_nv(&mptsas_dbglog_idx) &
9711 (mptsas_dbglog_linecnt - 1);
9712
9713 va_start(ap, fmt);
9714 (void) vsnprintf(mptsas_dbglog_bufs[idx],
9715 mptsas_dbglog_linelen, fmt, ap);
9716 va_end(ap);
9717 }
9718
9719 /*PRINTFLIKE1*/
9720 void
9721 mptsas_printf(char *fmt, ...)
9722 {
9723 dev_info_t *dev = 0;
9724 va_list ap;
9725
9726 mutex_enter(&mptsas_log_mutex);
9727
9728 va_start(ap, fmt);
9729 (void) vsprintf(mptsas_log_buf, fmt, ap);
9730 va_end(ap);
9731
9732 #ifdef PROM_PRINTF
9733 prom_printf("%s:\t%s\n", mptsas_label, mptsas_log_buf);
9734 #else
9735 scsi_log(dev, mptsas_label, CE_CONT, "!%s\n", mptsas_log_buf);
9736 #endif
9737 mutex_exit(&mptsas_log_mutex);
9738 }
9739 #endif
9740
9741 /*
9742 * timeout handling
9743 */
9744 static void
9745 mptsas_watch(void *arg)
9746 {
9747 #ifndef __lock_lint
9748 _NOTE(ARGUNUSED(arg))
9749 #endif
9750
9751 mptsas_t *mpt;
9752 uint32_t doorbell;
9753
9754 NDBG30(("mptsas_watch"));
9755
9756 rw_enter(&mptsas_global_rwlock, RW_READER);
9757 for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
9758
9759 mutex_enter(&mpt->m_mutex);
9760
9761 /* Skip device if not powered on */
9762 if (mpt->m_options & MPTSAS_OPT_PM) {
9763 if (mpt->m_power_level == PM_LEVEL_D0) {
9764 (void) pm_busy_component(mpt->m_dip, 0);
9765 mpt->m_busy = 1;
9766 } else {
9767 mutex_exit(&mpt->m_mutex);
9768 continue;
9769 }
9770 }
9771
9772 /*
9773 * Check if controller is in a FAULT state. If so, reset it.
9774 */
9775 doorbell = ddi_get32(mpt->m_datap, &mpt->m_reg->Doorbell);
9776 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
9777 doorbell &= MPI2_DOORBELL_DATA_MASK;
9778 mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
9779 "code: %04x", doorbell);
9780 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
9781 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
9782 mptsas_log(mpt, CE_WARN, "Reset failed"
9783 "after fault was detected");
9784 }
9785 }
9786
9787 /*
9788 * For now, always call mptsas_watchsubr.
9789 */
9790 mptsas_watchsubr(mpt);
9791
9792 if (mpt->m_options & MPTSAS_OPT_PM) {
9793 mpt->m_busy = 0;
9794 (void) pm_idle_component(mpt->m_dip, 0);
9795 }
9796
9797 mutex_exit(&mpt->m_mutex);
9798 }
9799 rw_exit(&mptsas_global_rwlock);
9800
9801 mutex_enter(&mptsas_global_mutex);
9802 if (mptsas_timeouts_enabled)
9803 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
9804 mutex_exit(&mptsas_global_mutex);
9805 }
9806
9807 static void
9808 mptsas_watchsubr(mptsas_t *mpt)
9809 {
9810 int i;
9811 mptsas_cmd_t *cmd;
9812 mptsas_target_t *ptgt = NULL;
9813 hrtime_t timestamp = gethrtime();
9814
9815 ASSERT(MUTEX_HELD(&mpt->m_mutex));
9816
9817 NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
9818
9819 #ifdef MPTSAS_TEST
9820 if (mptsas_enable_untagged) {
9821 mptsas_test_untagged++;
9822 }
9823 #endif
9824
9825 /*
9826 * Check for commands stuck in active slot
9827 * Account for TM requests, which use the last SMID.
9828 */
9829 for (i = 0; i <= mpt->m_active->m_n_normal; i++) {
9830 if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
9831 if (cmd->cmd_active_expiration <= timestamp) {
9832 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
9833 /*
9834 * There seems to be a command stuck
9835 * in the active slot. Drain throttle.
9836 */
9837 mptsas_set_throttle(mpt,
9838 cmd->cmd_tgt_addr,
9839 DRAIN_THROTTLE);
9840 } else if (cmd->cmd_flags &
9841 (CFLAG_PASSTHRU | CFLAG_CONFIG |
9842 CFLAG_FW_DIAG)) {
9843 /*
9844 * passthrough command timeout
9845 */
9846 cmd->cmd_flags |= (CFLAG_FINISHED |
9847 CFLAG_TIMEOUT);
9848 cv_broadcast(&mpt->m_passthru_cv);
9849 cv_broadcast(&mpt->m_config_cv);
9850 cv_broadcast(&mpt->m_fw_diag_cv);
9851 }
9852 }
9853 }
9854 }
9855
9856 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9857 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9858 /*
9859 * If we were draining due to a qfull condition,
9860 * go back to full throttle.
9861 */
9862 if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
9863 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
9864 (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
9865 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9866 mptsas_restart_hba(mpt);
9867 }
9868
9869 cmd = TAILQ_LAST(&ptgt->m_active_cmdq, mptsas_active_cmdq);
9870 if (cmd == NULL)
9871 continue;
9872
9873 if (cmd->cmd_active_expiration <= timestamp) {
9874 /*
9875 * Earliest command timeout expired. Drain throttle.
9876 */
9877 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
9878
9879 /*
9880 * Check for remaining commands.
9881 */
9882 cmd = TAILQ_FIRST(&ptgt->m_active_cmdq);
9883 if (cmd->cmd_active_expiration > timestamp) {
9884 /*
9885 * Wait for remaining commands to complete or
9886 * time out.
9887 */
9888 NDBG23(("command timed out, pending drain"));
9889 continue;
9890 }
9891
9892 /*
9893 * All command timeouts expired.
9894 */
9895 mptsas_log(mpt, CE_NOTE, "Timeout of %d seconds "
9896 "expired with %d commands on target %d lun %d.",
9897 cmd->cmd_pkt->pkt_time, ptgt->m_t_ncmds,
9898 ptgt->m_devhdl, Lun(cmd));
9899
9900 mptsas_cmd_timeout(mpt, ptgt);
9901 } else if (cmd->cmd_active_expiration <=
9902 timestamp + (hrtime_t)mptsas_scsi_watchdog_tick * NANOSEC) {
9903 NDBG23(("pending timeout"));
9904 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
9905 }
9906 }
9907 }
9908
9909 /*
9910 * timeout recovery
9911 */
9912 static void
9913 mptsas_cmd_timeout(mptsas_t *mpt, mptsas_target_t *ptgt)
9914 {
9915 uint16_t devhdl;
9916 uint64_t sas_wwn;
9917 uint8_t phy;
9918 char wwn_str[MPTSAS_WWN_STRLEN];
9919
9920 devhdl = ptgt->m_devhdl;
9921 sas_wwn = ptgt->m_addr.mta_wwn;
9922 phy = ptgt->m_phynum;
9923 if (sas_wwn == 0) {
9924 (void) sprintf(wwn_str, "p%x", phy);
9925 } else {
9926 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
9927 }
9928
9929 NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
9930 mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
9931 "target %d %s, enclosure %u", devhdl, wwn_str,
9932 ptgt->m_enclosure);
9933
9934 /*
9935 * Abort all outstanding commands on the device.
9936 */
9937 NDBG29(("mptsas_cmd_timeout: device reset"));
9938 if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
9939 mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
9940 "recovery failed!", devhdl);
9941 }
9942 }
9943
9944 /*
9945 * Device / Hotplug control
9946 */
9947 static int
9948 mptsas_scsi_quiesce(dev_info_t *dip)
9949 {
9950 mptsas_t *mpt;
9951 scsi_hba_tran_t *tran;
9952
9953 tran = ddi_get_driver_private(dip);
9954 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9955 return (-1);
9956
9957 return (mptsas_quiesce_bus(mpt));
9958 }
9959
9960 static int
9961 mptsas_scsi_unquiesce(dev_info_t *dip)
9962 {
9963 mptsas_t *mpt;
9964 scsi_hba_tran_t *tran;
9965
9966 tran = ddi_get_driver_private(dip);
9967 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9968 return (-1);
9969
9970 return (mptsas_unquiesce_bus(mpt));
9971 }
9972
9973 static int
9974 mptsas_quiesce_bus(mptsas_t *mpt)
9975 {
9976 mptsas_target_t *ptgt = NULL;
9977
9978 NDBG28(("mptsas_quiesce_bus"));
9979 mutex_enter(&mpt->m_mutex);
9980
9981 /* Set all the throttles to zero */
9982 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9983 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9984 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9985 }
9986
9987 /* If there are any outstanding commands in the queue */
9988 if (mpt->m_ncmds) {
9989 mpt->m_softstate |= MPTSAS_SS_DRAINING;
9990 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9991 mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
9992 if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
9993 /*
9994 * Quiesce has been interrupted
9995 */
9996 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9997 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9998 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9999 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10000 }
10001 mptsas_restart_hba(mpt);
10002 if (mpt->m_quiesce_timeid != 0) {
10003 timeout_id_t tid = mpt->m_quiesce_timeid;
10004 mpt->m_quiesce_timeid = 0;
10005 mutex_exit(&mpt->m_mutex);
10006 (void) untimeout(tid);
10007 return (-1);
10008 }
10009 mutex_exit(&mpt->m_mutex);
10010 return (-1);
10011 } else {
10012 /* Bus has been quiesced */
10013 ASSERT(mpt->m_quiesce_timeid == 0);
10014 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
10015 mpt->m_softstate |= MPTSAS_SS_QUIESCED;
10016 mutex_exit(&mpt->m_mutex);
10017 return (0);
10018 }
10019 }
10020 /* Bus was not busy - QUIESCED */
10021 mutex_exit(&mpt->m_mutex);
10022
10023 return (0);
10024 }
10025
10026 static int
10027 mptsas_unquiesce_bus(mptsas_t *mpt)
10028 {
10029 mptsas_target_t *ptgt = NULL;
10030
10031 NDBG28(("mptsas_unquiesce_bus"));
10032 mutex_enter(&mpt->m_mutex);
10033 mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
10034 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10035 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10036 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10037 }
10038 mptsas_restart_hba(mpt);
10039 mutex_exit(&mpt->m_mutex);
10040 return (0);
10041 }
10042
10043 static void
10044 mptsas_ncmds_checkdrain(void *arg)
10045 {
10046 mptsas_t *mpt = arg;
10047 mptsas_target_t *ptgt = NULL;
10048
10049 mutex_enter(&mpt->m_mutex);
10050 if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
10051 mpt->m_quiesce_timeid = 0;
10052 if (mpt->m_ncmds == 0) {
10053 /* Command queue has been drained */
10054 cv_signal(&mpt->m_cv);
10055 } else {
10056 /*
10057 * The throttle may have been reset because
10058 * of a SCSI bus reset
10059 */
10060 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10061 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10062 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10063 }
10064
10065 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
10066 mpt, (MPTSAS_QUIESCE_TIMEOUT *
10067 drv_usectohz(1000000)));
10068 }
10069 }
10070 mutex_exit(&mpt->m_mutex);
10071 }
10072
10073 /*ARGSUSED*/
10074 static void
10075 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
10076 {
10077 int i;
10078 uint8_t *cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
10079 char buf[128];
10080
10081 buf[0] = '\0';
10082 NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd,
10083 Tgt(cmd), Lun(cmd)));
10084 (void) sprintf(&buf[0], "\tcdb=[");
10085 for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
10086 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
10087 }
10088 (void) sprintf(&buf[strlen(buf)], " ]");
10089 NDBG25(("?%s\n", buf));
10090 NDBG25(("?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
10091 cmd->cmd_pkt->pkt_flags, cmd->cmd_pkt->pkt_statistics,
10092 cmd->cmd_pkt->pkt_state));
10093 NDBG25(("?pkt_scbp=0x%x cmd_flags=0x%x\n", cmd->cmd_pkt->pkt_scbp ?
10094 *(cmd->cmd_pkt->pkt_scbp) : 0, cmd->cmd_flags));
10095 }
10096
10097 static void
10098 mptsas_passthru_sge(ddi_acc_handle_t acc_hdl, mptsas_pt_request_t *pt,
10099 pMpi2SGESimple64_t sgep)
10100 {
10101 uint32_t sge_flags;
10102 uint32_t data_size, dataout_size;
10103 ddi_dma_cookie_t data_cookie;
10104 ddi_dma_cookie_t dataout_cookie;
10105
10106 data_size = pt->data_size;
10107 dataout_size = pt->dataout_size;
10108 data_cookie = pt->data_cookie;
10109 dataout_cookie = pt->dataout_cookie;
10110
10111 if (dataout_size) {
10112 sge_flags = dataout_size |
10113 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10114 MPI2_SGE_FLAGS_END_OF_BUFFER |
10115 MPI2_SGE_FLAGS_HOST_TO_IOC |
10116 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10117 MPI2_SGE_FLAGS_SHIFT);
10118 ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
10119 ddi_put32(acc_hdl, &sgep->Address.Low,
10120 (uint32_t)(dataout_cookie.dmac_laddress &
10121 0xffffffffull));
10122 ddi_put32(acc_hdl, &sgep->Address.High,
10123 (uint32_t)(dataout_cookie.dmac_laddress
10124 >> 32));
10125 sgep++;
10126 }
10127 sge_flags = data_size;
10128 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10129 MPI2_SGE_FLAGS_LAST_ELEMENT |
10130 MPI2_SGE_FLAGS_END_OF_BUFFER |
10131 MPI2_SGE_FLAGS_END_OF_LIST |
10132 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10133 MPI2_SGE_FLAGS_SHIFT);
10134 if (pt->direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10135 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
10136 MPI2_SGE_FLAGS_SHIFT);
10137 } else {
10138 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
10139 MPI2_SGE_FLAGS_SHIFT);
10140 }
10141 ddi_put32(acc_hdl, &sgep->FlagsLength,
10142 sge_flags);
10143 ddi_put32(acc_hdl, &sgep->Address.Low,
10144 (uint32_t)(data_cookie.dmac_laddress &
10145 0xffffffffull));
10146 ddi_put32(acc_hdl, &sgep->Address.High,
10147 (uint32_t)(data_cookie.dmac_laddress >> 32));
10148 }
10149
10150 static void
10151 mptsas_passthru_ieee_sge(ddi_acc_handle_t acc_hdl, mptsas_pt_request_t *pt,
10152 pMpi2IeeeSgeSimple64_t ieeesgep)
10153 {
10154 uint8_t sge_flags;
10155 uint32_t data_size, dataout_size;
10156 ddi_dma_cookie_t data_cookie;
10157 ddi_dma_cookie_t dataout_cookie;
10158
10159 data_size = pt->data_size;
10160 dataout_size = pt->dataout_size;
10161 data_cookie = pt->data_cookie;
10162 dataout_cookie = pt->dataout_cookie;
10163
10164 sge_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
10165 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
10166 if (dataout_size) {
10167 ddi_put32(acc_hdl, &ieeesgep->Length, dataout_size);
10168 ddi_put32(acc_hdl, &ieeesgep->Address.Low,
10169 (uint32_t)(dataout_cookie.dmac_laddress &
10170 0xffffffffull));
10171 ddi_put32(acc_hdl, &ieeesgep->Address.High,
10172 (uint32_t)(dataout_cookie.dmac_laddress >> 32));
10173 ddi_put8(acc_hdl, &ieeesgep->Flags, sge_flags);
10174 ieeesgep++;
10175 }
10176 sge_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
10177 ddi_put32(acc_hdl, &ieeesgep->Length, data_size);
10178 ddi_put32(acc_hdl, &ieeesgep->Address.Low,
10179 (uint32_t)(data_cookie.dmac_laddress & 0xffffffffull));
10180 ddi_put32(acc_hdl, &ieeesgep->Address.High,
10181 (uint32_t)(data_cookie.dmac_laddress >> 32));
10182 ddi_put8(acc_hdl, &ieeesgep->Flags, sge_flags);
10183 }
10184
10185 static void
10186 mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd)
10187 {
10188 caddr_t memp;
10189 pMPI2RequestHeader_t request_hdrp;
10190 struct scsi_pkt *pkt = cmd->cmd_pkt;
10191 mptsas_pt_request_t *pt = pkt->pkt_ha_private;
10192 uint32_t request_size;
10193 uint32_t request_desc_low, request_desc_high = 0;
10194 uint32_t i, sense_bufp;
10195 uint8_t desc_type;
10196 uint8_t *request, function;
10197 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
10198 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
10199
10200 desc_type = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10201
10202 request = pt->request;
10203 request_size = pt->request_size;
10204
10205 /*
10206 * Store the passthrough message in memory location
10207 * corresponding to our slot number
10208 */
10209 memp = mpt->m_req_frame + (mpt->m_req_frame_size * cmd->cmd_slot);
10210 request_hdrp = (pMPI2RequestHeader_t)memp;
10211 bzero(memp, mpt->m_req_frame_size);
10212
10213 for (i = 0; i < request_size; i++) {
10214 bcopy(request + i, memp + i, 1);
10215 }
10216
10217 NDBG15(("mptsas_start_passthru: Func 0x%x, MsgFlags 0x%x, "
10218 "size=%d, in %d, out %d", request_hdrp->Function,
10219 request_hdrp->MsgFlags, request_size,
10220 pt->data_size, pt->dataout_size));
10221
10222 /*
10223 * Add an SGE, even if the length is zero.
10224 */
10225 if (mpt->m_MPI25 && pt->simple == 0) {
10226 mptsas_passthru_ieee_sge(acc_hdl, pt,
10227 (pMpi2IeeeSgeSimple64_t)
10228 ((uint8_t *)request_hdrp + pt->sgl_offset));
10229 } else {
10230 mptsas_passthru_sge(acc_hdl, pt,
10231 (pMpi2SGESimple64_t)
10232 ((uint8_t *)request_hdrp + pt->sgl_offset));
10233 }
10234
10235 function = request_hdrp->Function;
10236 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10237 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10238 pMpi2SCSIIORequest_t scsi_io_req;
10239
10240 NDBG15(("mptsas_start_passthru: Is SCSI IO Req"));
10241 scsi_io_req = (pMpi2SCSIIORequest_t)request_hdrp;
10242 /*
10243 * Put SGE for data and data_out buffer at the end of
10244 * scsi_io_request message header.(64 bytes in total)
10245 * Following above SGEs, the residual space will be
10246 * used by sense data.
10247 */
10248 ddi_put8(acc_hdl,
10249 &scsi_io_req->SenseBufferLength,
10250 (uint8_t)(request_size - 64));
10251
10252 sense_bufp = mpt->m_req_frame_dma_addr +
10253 (mpt->m_req_frame_size * cmd->cmd_slot);
10254 sense_bufp += 64;
10255 ddi_put32(acc_hdl,
10256 &scsi_io_req->SenseBufferLowAddress, sense_bufp);
10257
10258 /*
10259 * Set SGLOffset0 value
10260 */
10261 ddi_put8(acc_hdl, &scsi_io_req->SGLOffset0,
10262 offsetof(MPI2_SCSI_IO_REQUEST, SGL) / 4);
10263
10264 /*
10265 * Setup descriptor info. RAID passthrough must use the
10266 * default request descriptor which is already set, so if this
10267 * is a SCSI IO request, change the descriptor to SCSI IO.
10268 */
10269 if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
10270 desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
10271 request_desc_high = (ddi_get16(acc_hdl,
10272 &scsi_io_req->DevHandle) << 16);
10273 }
10274 }
10275
10276 /*
10277 * We must wait till the message has been completed before
10278 * beginning the next message so we wait for this one to
10279 * finish.
10280 */
10281 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
10282 request_desc_low = (cmd->cmd_slot << 16) + desc_type;
10283 cmd->cmd_rfm = NULL;
10284 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
10285 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
10286 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
10287 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10288 }
10289 }
10290
10291 typedef void (mptsas_pre_f)(mptsas_t *, mptsas_pt_request_t *);
10292 static mptsas_pre_f mpi_pre_ioc_facts;
10293 static mptsas_pre_f mpi_pre_port_facts;
10294 static mptsas_pre_f mpi_pre_fw_download;
10295 static mptsas_pre_f mpi_pre_fw_25_download;
10296 static mptsas_pre_f mpi_pre_fw_upload;
10297 static mptsas_pre_f mpi_pre_fw_25_upload;
10298 static mptsas_pre_f mpi_pre_sata_passthrough;
10299 static mptsas_pre_f mpi_pre_smp_passthrough;
10300 static mptsas_pre_f mpi_pre_config;
10301 static mptsas_pre_f mpi_pre_sas_io_unit_control;
10302 static mptsas_pre_f mpi_pre_scsi_io_req;
10303
10304 /*
10305 * Prepare the pt for a SAS2 FW_DOWNLOAD request.
10306 */
10307 static void
10308 mpi_pre_fw_download(mptsas_t *mpt, mptsas_pt_request_t *pt)
10309 {
10310 pMpi2FWDownloadTCSGE_t tcsge;
10311 pMpi2FWDownloadRequest req;
10312
10313 /*
10314 * If SAS3, call separate function.
10315 */
10316 if (mpt->m_MPI25) {
10317 mpi_pre_fw_25_download(mpt, pt);
10318 return;
10319 }
10320
10321 /*
10322 * User requests should come in with the Transaction
10323 * context element where the SGL will go. Putting the
10324 * SGL after that seems to work, but don't really know
10325 * why. Other drivers tend to create an extra SGL and
10326 * refer to the TCE through that.
10327 */
10328 req = (pMpi2FWDownloadRequest)pt->request;
10329 tcsge = (pMpi2FWDownloadTCSGE_t)&req->SGL;
10330 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10331 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10332 mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10333 }
10334
10335 pt->sgl_offset = offsetof(MPI2_FW_DOWNLOAD_REQUEST, SGL) +
10336 sizeof (*tcsge);
10337 if (pt->request_size != pt->sgl_offset)
10338 NDBG15(("mpi_pre_fw_download(): Incorrect req size, "
10339 "0x%x, should be 0x%x, dataoutsz 0x%x",
10340 (int)pt->request_size, (int)pt->sgl_offset,
10341 (int)pt->dataout_size));
10342 if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY))
10343 NDBG15(("mpi_pre_fw_download(): Incorrect rep size, "
10344 "0x%x, should be 0x%x", pt->data_size,
10345 (int)sizeof (MPI2_FW_DOWNLOAD_REPLY)));
10346 }
10347
10348 /*
10349 * Prepare the pt for a SAS3 FW_DOWNLOAD request.
10350 */
10351 static void
10352 mpi_pre_fw_25_download(mptsas_t *mpt, mptsas_pt_request_t *pt)
10353 {
10354 pMpi2FWDownloadTCSGE_t tcsge;
10355 pMpi2FWDownloadRequest req2;
10356 pMpi25FWDownloadRequest req25;
10357
10358 /*
10359 * User requests should come in with the Transaction
10360 * context element where the SGL will go. The new firmware
10361 * Doesn't use TCE and has space in the main request for
10362 * this information. So move to the right place.
10363 */
10364 req2 = (pMpi2FWDownloadRequest)pt->request;
10365 req25 = (pMpi25FWDownloadRequest)pt->request;
10366 tcsge = (pMpi2FWDownloadTCSGE_t)&req2->SGL;
10367 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10368 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10369 mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10370 }
10371 req25->ImageOffset = tcsge->ImageOffset;
10372 req25->ImageSize = tcsge->ImageSize;
10373
10374 pt->sgl_offset = offsetof(MPI25_FW_DOWNLOAD_REQUEST, SGL);
10375 if (pt->request_size != pt->sgl_offset)
10376 NDBG15(("mpi_pre_fw_25_download(): Incorrect req size, "
10377 "0x%x, should be 0x%x, dataoutsz 0x%x",
10378 pt->request_size, pt->sgl_offset,
10379 pt->dataout_size));
10380 if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY))
10381 NDBG15(("mpi_pre_fw_25_download(): Incorrect rep size, "
10382 "0x%x, should be 0x%x", pt->data_size,
10383 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10384 }
10385
10386 /*
10387 * Prepare the pt for a SAS2 FW_UPLOAD request.
10388 */
10389 static void
10390 mpi_pre_fw_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10391 {
10392 pMpi2FWUploadTCSGE_t tcsge;
10393 pMpi2FWUploadRequest_t req;
10394
10395 /*
10396 * If SAS3, call separate function.
10397 */
10398 if (mpt->m_MPI25) {
10399 mpi_pre_fw_25_upload(mpt, pt);
10400 return;
10401 }
10402
10403 /*
10404 * User requests should come in with the Transaction
10405 * context element where the SGL will go. Putting the
10406 * SGL after that seems to work, but don't really know
10407 * why. Other drivers tend to create an extra SGL and
10408 * refer to the TCE through that.
10409 */
10410 req = (pMpi2FWUploadRequest_t)pt->request;
10411 tcsge = (pMpi2FWUploadTCSGE_t)&req->SGL;
10412 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10413 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10414 mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10415 }
10416
10417 pt->sgl_offset = offsetof(MPI2_FW_UPLOAD_REQUEST, SGL) +
10418 sizeof (*tcsge);
10419 if (pt->request_size != pt->sgl_offset)
10420 NDBG15(("mpi_pre_fw_upload(): Incorrect req size, "
10421 "0x%x, should be 0x%x, dataoutsz 0x%x",
10422 pt->request_size, pt->sgl_offset,
10423 pt->dataout_size));
10424 if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY))
10425 NDBG15(("mpi_pre_fw_upload(): Incorrect rep size, "
10426 "0x%x, should be 0x%x", pt->data_size,
10427 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10428 }
10429
10430 /*
10431 * Prepare the pt a SAS3 FW_UPLOAD request.
10432 */
10433 static void
10434 mpi_pre_fw_25_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10435 {
10436 pMpi2FWUploadTCSGE_t tcsge;
10437 pMpi2FWUploadRequest_t req2;
10438 pMpi25FWUploadRequest_t req25;
10439
10440 /*
10441 * User requests should come in with the Transaction
10442 * context element where the SGL will go. The new firmware
10443 * Doesn't use TCE and has space in the main request for
10444 * this information. So move to the right place.
10445 */
10446 req2 = (pMpi2FWUploadRequest_t)pt->request;
10447 req25 = (pMpi25FWUploadRequest_t)pt->request;
10448 tcsge = (pMpi2FWUploadTCSGE_t)&req2->SGL;
10449 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10450 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10451 mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10452 }
10453 req25->ImageOffset = tcsge->ImageOffset;
10454 req25->ImageSize = tcsge->ImageSize;
10455
10456 pt->sgl_offset = offsetof(MPI25_FW_UPLOAD_REQUEST, SGL);
10457 if (pt->request_size != pt->sgl_offset)
10458 NDBG15(("mpi_pre_fw_25_upload(): Incorrect req size, "
10459 "0x%x, should be 0x%x, dataoutsz 0x%x",
10460 pt->request_size, pt->sgl_offset,
10461 pt->dataout_size));
10462 if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY))
10463 NDBG15(("mpi_pre_fw_25_upload(): Incorrect rep size, "
10464 "0x%x, should be 0x%x", pt->data_size,
10465 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10466 }
10467
10468 /*
10469 * Prepare the pt for an IOC_FACTS request.
10470 */
10471 static void
10472 mpi_pre_ioc_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
10473 {
10474 #ifndef __lock_lint
10475 _NOTE(ARGUNUSED(mpt))
10476 #endif
10477 if (pt->request_size != sizeof (MPI2_IOC_FACTS_REQUEST))
10478 NDBG15(("mpi_pre_ioc_facts(): Incorrect req size, "
10479 "0x%x, should be 0x%x, dataoutsz 0x%x",
10480 pt->request_size,
10481 (int)sizeof (MPI2_IOC_FACTS_REQUEST),
10482 pt->dataout_size));
10483 if (pt->data_size != sizeof (MPI2_IOC_FACTS_REPLY))
10484 NDBG15(("mpi_pre_ioc_facts(): Incorrect rep size, "
10485 "0x%x, should be 0x%x", pt->data_size,
10486 (int)sizeof (MPI2_IOC_FACTS_REPLY)));
10487 pt->sgl_offset = (uint16_t)pt->request_size;
10488 }
10489
10490 /*
10491 * Prepare the pt for a PORT_FACTS request.
10492 */
10493 static void
10494 mpi_pre_port_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
10495 {
10496 #ifndef __lock_lint
10497 _NOTE(ARGUNUSED(mpt))
10498 #endif
10499 if (pt->request_size != sizeof (MPI2_PORT_FACTS_REQUEST))
10500 NDBG15(("mpi_pre_port_facts(): Incorrect req size, "
10501 "0x%x, should be 0x%x, dataoutsz 0x%x",
10502 pt->request_size,
10503 (int)sizeof (MPI2_PORT_FACTS_REQUEST),
10504 pt->dataout_size));
10505 if (pt->data_size != sizeof (MPI2_PORT_FACTS_REPLY))
10506 NDBG15(("mpi_pre_port_facts(): Incorrect rep size, "
10507 "0x%x, should be 0x%x", pt->data_size,
10508 (int)sizeof (MPI2_PORT_FACTS_REPLY)));
10509 pt->sgl_offset = (uint16_t)pt->request_size;
10510 }
10511
10512 /*
10513 * Prepare pt for a SATA_PASSTHROUGH request.
10514 */
10515 static void
10516 mpi_pre_sata_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
10517 {
10518 #ifndef __lock_lint
10519 _NOTE(ARGUNUSED(mpt))
10520 #endif
10521 pt->sgl_offset = offsetof(MPI2_SATA_PASSTHROUGH_REQUEST, SGL);
10522 if (pt->request_size != pt->sgl_offset)
10523 NDBG15(("mpi_pre_sata_passthrough(): Incorrect req size, "
10524 "0x%x, should be 0x%x, dataoutsz 0x%x",
10525 pt->request_size, pt->sgl_offset,
10526 pt->dataout_size));
10527 if (pt->data_size != sizeof (MPI2_SATA_PASSTHROUGH_REPLY))
10528 NDBG15(("mpi_pre_sata_passthrough(): Incorrect rep size, "
10529 "0x%x, should be 0x%x", pt->data_size,
10530 (int)sizeof (MPI2_SATA_PASSTHROUGH_REPLY)));
10531 }
10532
10533 static void
10534 mpi_pre_smp_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
10535 {
10536 #ifndef __lock_lint
10537 _NOTE(ARGUNUSED(mpt))
10538 #endif
10539 pt->sgl_offset = offsetof(MPI2_SMP_PASSTHROUGH_REQUEST, SGL);
10540 if (pt->request_size != pt->sgl_offset)
10541 NDBG15(("mpi_pre_smp_passthrough(): Incorrect req size, "
10542 "0x%x, should be 0x%x, dataoutsz 0x%x",
10543 pt->request_size, pt->sgl_offset,
10544 pt->dataout_size));
10545 if (pt->data_size != sizeof (MPI2_SMP_PASSTHROUGH_REPLY))
10546 NDBG15(("mpi_pre_smp_passthrough(): Incorrect rep size, "
10547 "0x%x, should be 0x%x", pt->data_size,
10548 (int)sizeof (MPI2_SMP_PASSTHROUGH_REPLY)));
10549 }
10550
10551 /*
10552 * Prepare pt for a CONFIG request.
10553 */
10554 static void
10555 mpi_pre_config(mptsas_t *mpt, mptsas_pt_request_t *pt)
10556 {
10557 #ifndef __lock_lint
10558 _NOTE(ARGUNUSED(mpt))
10559 #endif
10560 pt->sgl_offset = offsetof(MPI2_CONFIG_REQUEST, PageBufferSGE);
10561 if (pt->request_size != pt->sgl_offset)
10562 NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
10563 "should be 0x%x, dataoutsz 0x%x", pt->request_size,
10564 pt->sgl_offset, pt->dataout_size));
10565 if (pt->data_size != sizeof (MPI2_CONFIG_REPLY))
10566 NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
10567 "should be 0x%x", pt->data_size,
10568 (int)sizeof (MPI2_CONFIG_REPLY)));
10569 pt->simple = 1;
10570 }
10571
10572 /*
10573 * Prepare pt for a SCSI_IO_REQ request.
10574 */
10575 static void
10576 mpi_pre_scsi_io_req(mptsas_t *mpt, mptsas_pt_request_t *pt)
10577 {
10578 #ifndef __lock_lint
10579 _NOTE(ARGUNUSED(mpt))
10580 #endif
10581 pt->sgl_offset = offsetof(MPI2_SCSI_IO_REQUEST, SGL);
10582 if (pt->request_size != pt->sgl_offset)
10583 NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
10584 "should be 0x%x, dataoutsz 0x%x", pt->request_size,
10585 pt->sgl_offset,
10586 pt->dataout_size));
10587 if (pt->data_size != sizeof (MPI2_SCSI_IO_REPLY))
10588 NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
10589 "should be 0x%x", pt->data_size,
10590 (int)sizeof (MPI2_SCSI_IO_REPLY)));
10591 }
10592
10593 /*
10594 * Prepare the mptsas_cmd for a SAS_IO_UNIT_CONTROL request.
10595 */
10596 static void
10597 mpi_pre_sas_io_unit_control(mptsas_t *mpt, mptsas_pt_request_t *pt)
10598 {
10599 #ifndef __lock_lint
10600 _NOTE(ARGUNUSED(mpt))
10601 #endif
10602 pt->sgl_offset = (uint16_t)pt->request_size;
10603 }
10604
10605 /*
10606 * A set of functions to prepare an mptsas_cmd for the various
10607 * supported requests.
10608 */
10609 static struct mptsas_func {
10610 U8 Function;
10611 char *Name;
10612 mptsas_pre_f *f_pre;
10613 } mptsas_func_list[] = {
10614 { MPI2_FUNCTION_IOC_FACTS, "IOC_FACTS", mpi_pre_ioc_facts },
10615 { MPI2_FUNCTION_PORT_FACTS, "PORT_FACTS", mpi_pre_port_facts },
10616 { MPI2_FUNCTION_FW_DOWNLOAD, "FW_DOWNLOAD", mpi_pre_fw_download },
10617 { MPI2_FUNCTION_FW_UPLOAD, "FW_UPLOAD", mpi_pre_fw_upload },
10618 { MPI2_FUNCTION_SATA_PASSTHROUGH, "SATA_PASSTHROUGH",
10619 mpi_pre_sata_passthrough },
10620 { MPI2_FUNCTION_SMP_PASSTHROUGH, "SMP_PASSTHROUGH",
10621 mpi_pre_smp_passthrough},
10622 { MPI2_FUNCTION_SCSI_IO_REQUEST, "SCSI_IO_REQUEST",
10623 mpi_pre_scsi_io_req},
10624 { MPI2_FUNCTION_CONFIG, "CONFIG", mpi_pre_config},
10625 { MPI2_FUNCTION_SAS_IO_UNIT_CONTROL, "SAS_IO_UNIT_CONTROL",
10626 mpi_pre_sas_io_unit_control },
10627 { 0xFF, NULL, NULL } /* list end */
10628 };
10629
10630 static void
10631 mptsas_prep_sgl_offset(mptsas_t *mpt, mptsas_pt_request_t *pt)
10632 {
10633 pMPI2RequestHeader_t hdr;
10634 struct mptsas_func *f;
10635
10636 hdr = (pMPI2RequestHeader_t)pt->request;
10637
10638 for (f = mptsas_func_list; f->f_pre != NULL; f++) {
10639 if (hdr->Function == f->Function) {
10640 f->f_pre(mpt, pt);
10641 NDBG15(("mptsas_prep_sgl_offset: Function %s,"
10642 " sgl_offset 0x%x", f->Name,
10643 pt->sgl_offset));
10644 return;
10645 }
10646 }
10647 NDBG15(("mptsas_prep_sgl_offset: Unknown Function 0x%02x,"
10648 " returning req_size 0x%x for sgl_offset",
10649 hdr->Function, pt->request_size));
10650 pt->sgl_offset = (uint16_t)pt->request_size;
10651 }
10652
10653
10654 static int
10655 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
10656 uint8_t *data, uint32_t request_size, uint32_t reply_size,
10657 uint32_t data_size, uint32_t direction, uint8_t *dataout,
10658 uint32_t dataout_size, short timeout, int mode)
10659 {
10660 mptsas_pt_request_t pt;
10661 mptsas_dma_alloc_state_t data_dma_state;
10662 mptsas_dma_alloc_state_t dataout_dma_state;
10663 caddr_t memp;
10664 mptsas_cmd_t *cmd = NULL;
10665 struct scsi_pkt *pkt;
10666 uint32_t reply_len = 0, sense_len = 0;
10667 pMPI2RequestHeader_t request_hdrp;
10668 pMPI2RequestHeader_t request_msg;
10669 pMPI2DefaultReply_t reply_msg;
10670 Mpi2SCSIIOReply_t rep_msg;
10671 int i, status = 0, pt_flags = 0, rv = 0;
10672 int rvalue;
10673 uint8_t function;
10674
10675 ASSERT(mutex_owned(&mpt->m_mutex));
10676
10677 reply_msg = (pMPI2DefaultReply_t)(&rep_msg);
10678 bzero(reply_msg, sizeof (MPI2_DEFAULT_REPLY));
10679 request_msg = kmem_zalloc(request_size, KM_SLEEP);
10680
10681 mutex_exit(&mpt->m_mutex);
10682 /*
10683 * copy in the request buffer since it could be used by
10684 * another thread when the pt request into waitq
10685 */
10686 if (ddi_copyin(request, request_msg, request_size, mode)) {
10687 mutex_enter(&mpt->m_mutex);
10688 status = EFAULT;
10689 mptsas_log(mpt, CE_WARN, "failed to copy request data");
10690 goto out;
10691 }
10692 mutex_enter(&mpt->m_mutex);
10693
10694 function = request_msg->Function;
10695 if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
10696 pMpi2SCSITaskManagementRequest_t task;
10697 task = (pMpi2SCSITaskManagementRequest_t)request_msg;
10698 mptsas_setup_bus_reset_delay(mpt);
10699 rv = mptsas_ioc_task_management(mpt, task->TaskType,
10700 task->DevHandle, (int)task->LUN[1], reply, reply_size,
10701 mode);
10702
10703 if (rv != TRUE) {
10704 status = EIO;
10705 mptsas_log(mpt, CE_WARN, "task management failed");
10706 }
10707 goto out;
10708 }
10709
10710 if (data_size != 0) {
10711 data_dma_state.size = data_size;
10712 if (mptsas_dma_alloc(mpt, &data_dma_state) != DDI_SUCCESS) {
10713 status = ENOMEM;
10714 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
10715 "resource");
10716 goto out;
10717 }
10718 pt_flags |= MPTSAS_DATA_ALLOCATED;
10719 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10720 mutex_exit(&mpt->m_mutex);
10721 for (i = 0; i < data_size; i++) {
10722 if (ddi_copyin(data + i, (uint8_t *)
10723 data_dma_state.memp + i, 1, mode)) {
10724 mutex_enter(&mpt->m_mutex);
10725 status = EFAULT;
10726 mptsas_log(mpt, CE_WARN, "failed to "
10727 "copy read data");
10728 goto out;
10729 }
10730 }
10731 mutex_enter(&mpt->m_mutex);
10732 }
10733 } else {
10734 bzero(&data_dma_state, sizeof (data_dma_state));
10735 }
10736
10737 if (dataout_size != 0) {
10738 dataout_dma_state.size = dataout_size;
10739 if (mptsas_dma_alloc(mpt, &dataout_dma_state) != DDI_SUCCESS) {
10740 status = ENOMEM;
10741 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
10742 "resource");
10743 goto out;
10744 }
10745 pt_flags |= MPTSAS_DATAOUT_ALLOCATED;
10746 mutex_exit(&mpt->m_mutex);
10747 for (i = 0; i < dataout_size; i++) {
10748 if (ddi_copyin(dataout + i, (uint8_t *)
10749 dataout_dma_state.memp + i, 1, mode)) {
10750 mutex_enter(&mpt->m_mutex);
10751 mptsas_log(mpt, CE_WARN, "failed to copy out"
10752 " data");
10753 status = EFAULT;
10754 goto out;
10755 }
10756 }
10757 mutex_enter(&mpt->m_mutex);
10758 } else {
10759 bzero(&dataout_dma_state, sizeof (dataout_dma_state));
10760 }
10761
10762 if ((rvalue = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10763 status = EAGAIN;
10764 mptsas_log(mpt, CE_NOTE, "event ack command pool is full");
10765 goto out;
10766 }
10767 pt_flags |= MPTSAS_REQUEST_POOL_CMD;
10768
10769 bzero((caddr_t)cmd, sizeof (*cmd));
10770 bzero((caddr_t)pkt, scsi_pkt_size());
10771 bzero((caddr_t)&pt, sizeof (pt));
10772
10773 cmd->ioc_cmd_slot = (uint32_t)(rvalue);
10774
10775 pt.request = (uint8_t *)request_msg;
10776 pt.direction = direction;
10777 pt.simple = 0;
10778 pt.request_size = request_size;
10779 pt.data_size = data_size;
10780 pt.dataout_size = dataout_size;
10781 pt.data_cookie = data_dma_state.cookie;
10782 pt.dataout_cookie = dataout_dma_state.cookie;
10783 mptsas_prep_sgl_offset(mpt, &pt);
10784
10785 /*
10786 * Form a blank cmd/pkt to store the acknowledgement message
10787 */
10788 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb[0];
10789 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
10790 pkt->pkt_ha_private = (opaque_t)&pt;
10791 pkt->pkt_flags = FLAG_HEAD;
10792 pkt->pkt_time = timeout;
10793 cmd->cmd_pkt = pkt;
10794 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_PASSTHRU;
10795
10796 /*
10797 * Save the command in a slot
10798 */
10799 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10800 /*
10801 * Once passthru command get slot, set cmd_flags
10802 * CFLAG_PREPARED.
10803 */
10804 cmd->cmd_flags |= CFLAG_PREPARED;
10805 mptsas_start_passthru(mpt, cmd);
10806 } else {
10807 mptsas_waitq_add(mpt, cmd);
10808 }
10809
10810 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10811 cv_wait(&mpt->m_passthru_cv, &mpt->m_mutex);
10812 }
10813
10814 if (cmd->cmd_flags & CFLAG_PREPARED) {
10815 memp = mpt->m_req_frame + (mpt->m_req_frame_size *
10816 cmd->cmd_slot);
10817 request_hdrp = (pMPI2RequestHeader_t)memp;
10818 }
10819
10820 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10821 status = ETIMEDOUT;
10822 mptsas_log(mpt, CE_WARN, "passthrough command timeout");
10823 pt_flags |= MPTSAS_CMD_TIMEOUT;
10824 goto out;
10825 }
10826
10827 if (cmd->cmd_rfm) {
10828 /*
10829 * cmd_rfm is zero means the command reply is a CONTEXT
10830 * reply and no PCI Write to post the free reply SMFA
10831 * because no reply message frame is used.
10832 * cmd_rfm is non-zero means the reply is a ADDRESS
10833 * reply and reply message frame is used.
10834 */
10835 pt_flags |= MPTSAS_ADDRESS_REPLY;
10836 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10837 DDI_DMA_SYNC_FORCPU);
10838 reply_msg = (pMPI2DefaultReply_t)
10839 (mpt->m_reply_frame + (cmd->cmd_rfm -
10840 mpt->m_reply_frame_dma_addr));
10841 }
10842
10843 mptsas_fma_check(mpt, cmd);
10844 if (pkt->pkt_reason == CMD_TRAN_ERR) {
10845 status = EAGAIN;
10846 mptsas_log(mpt, CE_WARN, "passthru fma error");
10847 goto out;
10848 }
10849 if (pkt->pkt_reason == CMD_RESET) {
10850 status = EAGAIN;
10851 mptsas_log(mpt, CE_WARN, "ioc reset abort passthru");
10852 goto out;
10853 }
10854
10855 if (pkt->pkt_reason == CMD_INCOMPLETE) {
10856 status = EIO;
10857 mptsas_log(mpt, CE_WARN, "passthrough command incomplete");
10858 goto out;
10859 }
10860
10861 mutex_exit(&mpt->m_mutex);
10862 if (cmd->cmd_flags & CFLAG_PREPARED) {
10863 function = request_hdrp->Function;
10864 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10865 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10866 reply_len = sizeof (MPI2_SCSI_IO_REPLY);
10867 sense_len = reply_size - reply_len;
10868 } else {
10869 reply_len = reply_size;
10870 sense_len = 0;
10871 }
10872
10873 for (i = 0; i < reply_len; i++) {
10874 if (ddi_copyout((uint8_t *)reply_msg + i, reply + i, 1,
10875 mode)) {
10876 mutex_enter(&mpt->m_mutex);
10877 status = EFAULT;
10878 mptsas_log(mpt, CE_WARN, "failed to copy out "
10879 "reply data");
10880 goto out;
10881 }
10882 }
10883 for (i = 0; i < sense_len; i++) {
10884 if (ddi_copyout((uint8_t *)request_hdrp + 64 + i,
10885 reply + reply_len + i, 1, mode)) {
10886 mutex_enter(&mpt->m_mutex);
10887 status = EFAULT;
10888 mptsas_log(mpt, CE_WARN, "failed to copy out "
10889 "sense data");
10890 goto out;
10891 }
10892 }
10893 }
10894
10895 if (data_size) {
10896 if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10897 (void) ddi_dma_sync(data_dma_state.handle, 0, 0,
10898 DDI_DMA_SYNC_FORCPU);
10899 for (i = 0; i < data_size; i++) {
10900 if (ddi_copyout((uint8_t *)(
10901 data_dma_state.memp + i), data + i, 1,
10902 mode)) {
10903 mutex_enter(&mpt->m_mutex);
10904 status = EFAULT;
10905 mptsas_log(mpt, CE_WARN, "failed to "
10906 "copy out the reply data");
10907 goto out;
10908 }
10909 }
10910 }
10911 }
10912 mutex_enter(&mpt->m_mutex);
10913 out:
10914 /*
10915 * Put the reply frame back on the free queue, increment the free
10916 * index, and write the new index to the free index register. But only
10917 * if this reply is an ADDRESS reply.
10918 */
10919 if (pt_flags & MPTSAS_ADDRESS_REPLY) {
10920 ddi_put32(mpt->m_acc_free_queue_hdl,
10921 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10922 cmd->cmd_rfm);
10923 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10924 DDI_DMA_SYNC_FORDEV);
10925 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10926 mpt->m_free_index = 0;
10927 }
10928 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10929 mpt->m_free_index);
10930 }
10931 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10932 mptsas_remove_cmd(mpt, cmd);
10933 pt_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10934 }
10935 if (pt_flags & MPTSAS_REQUEST_POOL_CMD)
10936 mptsas_return_to_pool(mpt, cmd);
10937 if (pt_flags & MPTSAS_DATA_ALLOCATED) {
10938 if (mptsas_check_dma_handle(data_dma_state.handle) !=
10939 DDI_SUCCESS) {
10940 ddi_fm_service_impact(mpt->m_dip,
10941 DDI_SERVICE_UNAFFECTED);
10942 status = EFAULT;
10943 }
10944 mptsas_dma_free(&data_dma_state);
10945 }
10946 if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
10947 if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
10948 DDI_SUCCESS) {
10949 ddi_fm_service_impact(mpt->m_dip,
10950 DDI_SERVICE_UNAFFECTED);
10951 status = EFAULT;
10952 }
10953 mptsas_dma_free(&dataout_dma_state);
10954 }
10955 if (pt_flags & MPTSAS_CMD_TIMEOUT) {
10956 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
10957 mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
10958 }
10959 }
10960 if (request_msg)
10961 kmem_free(request_msg, request_size);
10962
10963 return (status);
10964 }
10965
10966 static int
10967 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
10968 {
10969 /*
10970 * If timeout is 0, set timeout to default of 60 seconds.
10971 */
10972 if (data->Timeout == 0) {
10973 data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
10974 }
10975
10976 if (((data->DataSize == 0) &&
10977 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_NONE)) ||
10978 ((data->DataSize != 0) &&
10979 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_READ) ||
10980 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_WRITE) ||
10981 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) &&
10982 (data->DataOutSize != 0))))) {
10983 if (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) {
10984 data->DataDirection = MPTSAS_PASS_THRU_DIRECTION_READ;
10985 } else {
10986 data->DataOutSize = 0;
10987 }
10988 /*
10989 * Send passthru request messages
10990 */
10991 return (mptsas_do_passthru(mpt,
10992 (uint8_t *)((uintptr_t)data->PtrRequest),
10993 (uint8_t *)((uintptr_t)data->PtrReply),
10994 (uint8_t *)((uintptr_t)data->PtrData),
10995 data->RequestSize, data->ReplySize,
10996 data->DataSize, data->DataDirection,
10997 (uint8_t *)((uintptr_t)data->PtrDataOut),
10998 data->DataOutSize, data->Timeout, mode));
10999 } else {
11000 return (EINVAL);
11001 }
11002 }
11003
11004 static uint8_t
11005 mptsas_get_fw_diag_buffer_number(mptsas_t *mpt, uint32_t unique_id)
11006 {
11007 uint8_t index;
11008
11009 for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
11010 if (mpt->m_fw_diag_buffer_list[index].unique_id == unique_id) {
11011 return (index);
11012 }
11013 }
11014
11015 return (MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND);
11016 }
11017
11018 static void
11019 mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd)
11020 {
11021 pMpi2DiagBufferPostRequest_t pDiag_post_msg;
11022 pMpi2DiagReleaseRequest_t pDiag_release_msg;
11023 struct scsi_pkt *pkt = cmd->cmd_pkt;
11024 mptsas_diag_request_t *diag = pkt->pkt_ha_private;
11025 uint32_t request_desc_low, i;
11026
11027 ASSERT(mutex_owned(&mpt->m_mutex));
11028
11029 /*
11030 * Form the diag message depending on the post or release function.
11031 */
11032 if (diag->function == MPI2_FUNCTION_DIAG_BUFFER_POST) {
11033 pDiag_post_msg = (pMpi2DiagBufferPostRequest_t)
11034 (mpt->m_req_frame + (mpt->m_req_frame_size *
11035 cmd->cmd_slot));
11036 bzero(pDiag_post_msg, mpt->m_req_frame_size);
11037 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->Function,
11038 diag->function);
11039 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->BufferType,
11040 diag->pBuffer->buffer_type);
11041 ddi_put8(mpt->m_acc_req_frame_hdl,
11042 &pDiag_post_msg->ExtendedType,
11043 diag->pBuffer->extended_type);
11044 ddi_put32(mpt->m_acc_req_frame_hdl,
11045 &pDiag_post_msg->BufferLength,
11046 diag->pBuffer->buffer_data.size);
11047 for (i = 0; i < (sizeof (pDiag_post_msg->ProductSpecific) / 4);
11048 i++) {
11049 ddi_put32(mpt->m_acc_req_frame_hdl,
11050 &pDiag_post_msg->ProductSpecific[i],
11051 diag->pBuffer->product_specific[i]);
11052 }
11053 ddi_put32(mpt->m_acc_req_frame_hdl,
11054 &pDiag_post_msg->BufferAddress.Low,
11055 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
11056 & 0xffffffffull));
11057 ddi_put32(mpt->m_acc_req_frame_hdl,
11058 &pDiag_post_msg->BufferAddress.High,
11059 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
11060 >> 32));
11061 } else {
11062 pDiag_release_msg = (pMpi2DiagReleaseRequest_t)
11063 (mpt->m_req_frame + (mpt->m_req_frame_size *
11064 cmd->cmd_slot));
11065 bzero(pDiag_release_msg, mpt->m_req_frame_size);
11066 ddi_put8(mpt->m_acc_req_frame_hdl,
11067 &pDiag_release_msg->Function, diag->function);
11068 ddi_put8(mpt->m_acc_req_frame_hdl,
11069 &pDiag_release_msg->BufferType,
11070 diag->pBuffer->buffer_type);
11071 }
11072
11073 /*
11074 * Send the message
11075 */
11076 (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
11077 DDI_DMA_SYNC_FORDEV);
11078 request_desc_low = (cmd->cmd_slot << 16) +
11079 MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
11080 cmd->cmd_rfm = NULL;
11081 MPTSAS_START_CMD(mpt, request_desc_low, 0);
11082 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
11083 DDI_SUCCESS) ||
11084 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
11085 DDI_SUCCESS)) {
11086 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11087 }
11088 }
11089
11090 static int
11091 mptsas_post_fw_diag_buffer(mptsas_t *mpt,
11092 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
11093 {
11094 mptsas_diag_request_t diag;
11095 int status, slot_num, post_flags = 0;
11096 mptsas_cmd_t *cmd = NULL;
11097 struct scsi_pkt *pkt;
11098 pMpi2DiagBufferPostReply_t reply;
11099 uint16_t iocstatus;
11100 uint32_t iocloginfo, transfer_length;
11101
11102 /*
11103 * If buffer is not enabled, just leave.
11104 */
11105 *return_code = MPTSAS_FW_DIAG_ERROR_POST_FAILED;
11106 if (!pBuffer->enabled) {
11107 status = DDI_FAILURE;
11108 goto out;
11109 }
11110
11111 /*
11112 * Clear some flags initially.
11113 */
11114 pBuffer->force_release = FALSE;
11115 pBuffer->valid_data = FALSE;
11116 pBuffer->owned_by_firmware = FALSE;
11117
11118 /*
11119 * Get a cmd buffer from the cmd buffer pool
11120 */
11121 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11122 status = DDI_FAILURE;
11123 mptsas_log(mpt, CE_NOTE, "command pool is full: Post FW Diag");
11124 goto out;
11125 }
11126 post_flags |= MPTSAS_REQUEST_POOL_CMD;
11127
11128 bzero((caddr_t)cmd, sizeof (*cmd));
11129 bzero((caddr_t)pkt, scsi_pkt_size());
11130
11131 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
11132
11133 diag.pBuffer = pBuffer;
11134 diag.function = MPI2_FUNCTION_DIAG_BUFFER_POST;
11135
11136 /*
11137 * Form a blank cmd/pkt to store the acknowledgement message
11138 */
11139 pkt->pkt_ha_private = (opaque_t)&diag;
11140 pkt->pkt_flags = FLAG_HEAD;
11141 pkt->pkt_time = 60;
11142 cmd->cmd_pkt = pkt;
11143 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
11144
11145 /*
11146 * Save the command in a slot
11147 */
11148 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11149 /*
11150 * Once passthru command get slot, set cmd_flags
11151 * CFLAG_PREPARED.
11152 */
11153 cmd->cmd_flags |= CFLAG_PREPARED;
11154 mptsas_start_diag(mpt, cmd);
11155 } else {
11156 mptsas_waitq_add(mpt, cmd);
11157 }
11158
11159 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11160 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
11161 }
11162
11163 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11164 status = DDI_FAILURE;
11165 mptsas_log(mpt, CE_WARN, "Post FW Diag command timeout");
11166 goto out;
11167 }
11168
11169 /*
11170 * cmd_rfm points to the reply message if a reply was given. Check the
11171 * IOCStatus to make sure everything went OK with the FW diag request
11172 * and set buffer flags.
11173 */
11174 if (cmd->cmd_rfm) {
11175 post_flags |= MPTSAS_ADDRESS_REPLY;
11176 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11177 DDI_DMA_SYNC_FORCPU);
11178 reply = (pMpi2DiagBufferPostReply_t)(mpt->m_reply_frame +
11179 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
11180
11181 /*
11182 * Get the reply message data
11183 */
11184 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
11185 &reply->IOCStatus);
11186 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
11187 &reply->IOCLogInfo);
11188 transfer_length = ddi_get32(mpt->m_acc_reply_frame_hdl,
11189 &reply->TransferLength);
11190
11191 /*
11192 * If post failed quit.
11193 */
11194 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
11195 status = DDI_FAILURE;
11196 NDBG13(("post FW Diag Buffer failed: IOCStatus=0x%x, "
11197 "IOCLogInfo=0x%x, TransferLength=0x%x", iocstatus,
11198 iocloginfo, transfer_length));
11199 goto out;
11200 }
11201
11202 /*
11203 * Post was successful.
11204 */
11205 pBuffer->valid_data = TRUE;
11206 pBuffer->owned_by_firmware = TRUE;
11207 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11208 status = DDI_SUCCESS;
11209 }
11210
11211 out:
11212 /*
11213 * Put the reply frame back on the free queue, increment the free
11214 * index, and write the new index to the free index register. But only
11215 * if this reply is an ADDRESS reply.
11216 */
11217 if (post_flags & MPTSAS_ADDRESS_REPLY) {
11218 ddi_put32(mpt->m_acc_free_queue_hdl,
11219 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11220 cmd->cmd_rfm);
11221 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11222 DDI_DMA_SYNC_FORDEV);
11223 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11224 mpt->m_free_index = 0;
11225 }
11226 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11227 mpt->m_free_index);
11228 }
11229 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
11230 mptsas_remove_cmd(mpt, cmd);
11231 post_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11232 }
11233 if (post_flags & MPTSAS_REQUEST_POOL_CMD) {
11234 mptsas_return_to_pool(mpt, cmd);
11235 }
11236
11237 return (status);
11238 }
11239
11240 static int
11241 mptsas_release_fw_diag_buffer(mptsas_t *mpt,
11242 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
11243 uint32_t diag_type)
11244 {
11245 mptsas_diag_request_t diag;
11246 int status, slot_num, rel_flags = 0;
11247 mptsas_cmd_t *cmd = NULL;
11248 struct scsi_pkt *pkt;
11249 pMpi2DiagReleaseReply_t reply;
11250 uint16_t iocstatus;
11251 uint32_t iocloginfo;
11252
11253 /*
11254 * If buffer is not enabled, just leave.
11255 */
11256 *return_code = MPTSAS_FW_DIAG_ERROR_RELEASE_FAILED;
11257 if (!pBuffer->enabled) {
11258 mptsas_log(mpt, CE_NOTE, "This buffer type is not supported "
11259 "by the IOC");
11260 status = DDI_FAILURE;
11261 goto out;
11262 }
11263
11264 /*
11265 * Clear some flags initially.
11266 */
11267 pBuffer->force_release = FALSE;
11268 pBuffer->valid_data = FALSE;
11269 pBuffer->owned_by_firmware = FALSE;
11270
11271 /*
11272 * Get a cmd buffer from the cmd buffer pool
11273 */
11274 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11275 status = DDI_FAILURE;
11276 mptsas_log(mpt, CE_NOTE, "command pool is full: Release FW "
11277 "Diag");
11278 goto out;
11279 }
11280 rel_flags |= MPTSAS_REQUEST_POOL_CMD;
11281
11282 bzero((caddr_t)cmd, sizeof (*cmd));
11283 bzero((caddr_t)pkt, scsi_pkt_size());
11284
11285 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
11286
11287 diag.pBuffer = pBuffer;
11288 diag.function = MPI2_FUNCTION_DIAG_RELEASE;
11289
11290 /*
11291 * Form a blank cmd/pkt to store the acknowledgement message
11292 */
11293 pkt->pkt_ha_private = (opaque_t)&diag;
11294 pkt->pkt_flags = FLAG_HEAD;
11295 pkt->pkt_time = 60;
11296 cmd->cmd_pkt = pkt;
11297 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
11298
11299 /*
11300 * Save the command in a slot
11301 */
11302 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11303 /*
11304 * Once passthru command get slot, set cmd_flags
11305 * CFLAG_PREPARED.
11306 */
11307 cmd->cmd_flags |= CFLAG_PREPARED;
11308 mptsas_start_diag(mpt, cmd);
11309 } else {
11310 mptsas_waitq_add(mpt, cmd);
11311 }
11312
11313 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11314 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
11315 }
11316
11317 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11318 status = DDI_FAILURE;
11319 mptsas_log(mpt, CE_WARN, "Release FW Diag command timeout");
11320 goto out;
11321 }
11322
11323 /*
11324 * cmd_rfm points to the reply message if a reply was given. Check the
11325 * IOCStatus to make sure everything went OK with the FW diag request
11326 * and set buffer flags.
11327 */
11328 if (cmd->cmd_rfm) {
11329 rel_flags |= MPTSAS_ADDRESS_REPLY;
11330 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11331 DDI_DMA_SYNC_FORCPU);
11332 reply = (pMpi2DiagReleaseReply_t)(mpt->m_reply_frame +
11333 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
11334
11335 /*
11336 * Get the reply message data
11337 */
11338 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
11339 &reply->IOCStatus);
11340 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
11341 &reply->IOCLogInfo);
11342
11343 /*
11344 * If release failed quit.
11345 */
11346 if ((iocstatus != MPI2_IOCSTATUS_SUCCESS) ||
11347 pBuffer->owned_by_firmware) {
11348 status = DDI_FAILURE;
11349 NDBG13(("release FW Diag Buffer failed: "
11350 "IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus,
11351 iocloginfo));
11352 goto out;
11353 }
11354
11355 /*
11356 * Release was successful.
11357 */
11358 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11359 status = DDI_SUCCESS;
11360
11361 /*
11362 * If this was for an UNREGISTER diag type command, clear the
11363 * unique ID.
11364 */
11365 if (diag_type == MPTSAS_FW_DIAG_TYPE_UNREGISTER) {
11366 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
11367 }
11368 }
11369
11370 out:
11371 /*
11372 * Put the reply frame back on the free queue, increment the free
11373 * index, and write the new index to the free index register. But only
11374 * if this reply is an ADDRESS reply.
11375 */
11376 if (rel_flags & MPTSAS_ADDRESS_REPLY) {
11377 ddi_put32(mpt->m_acc_free_queue_hdl,
11378 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11379 cmd->cmd_rfm);
11380 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11381 DDI_DMA_SYNC_FORDEV);
11382 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11383 mpt->m_free_index = 0;
11384 }
11385 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11386 mpt->m_free_index);
11387 }
11388 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
11389 mptsas_remove_cmd(mpt, cmd);
11390 rel_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11391 }
11392 if (rel_flags & MPTSAS_REQUEST_POOL_CMD) {
11393 mptsas_return_to_pool(mpt, cmd);
11394 }
11395
11396 return (status);
11397 }
11398
11399 static int
11400 mptsas_diag_register(mptsas_t *mpt, mptsas_fw_diag_register_t *diag_register,
11401 uint32_t *return_code)
11402 {
11403 mptsas_fw_diagnostic_buffer_t *pBuffer;
11404 uint8_t extended_type, buffer_type, i;
11405 uint32_t buffer_size;
11406 uint32_t unique_id;
11407 int status;
11408
11409 ASSERT(mutex_owned(&mpt->m_mutex));
11410
11411 extended_type = diag_register->ExtendedType;
11412 buffer_type = diag_register->BufferType;
11413 buffer_size = diag_register->RequestedBufferSize;
11414 unique_id = diag_register->UniqueId;
11415
11416 /*
11417 * Check for valid buffer type
11418 */
11419 if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
11420 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11421 return (DDI_FAILURE);
11422 }
11423
11424 /*
11425 * Get the current buffer and look up the unique ID. The unique ID
11426 * should not be found. If it is, the ID is already in use.
11427 */
11428 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11429 pBuffer = &mpt->m_fw_diag_buffer_list[buffer_type];
11430 if (i != MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11431 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11432 return (DDI_FAILURE);
11433 }
11434
11435 /*
11436 * The buffer's unique ID should not be registered yet, and the given
11437 * unique ID cannot be 0.
11438 */
11439 if ((pBuffer->unique_id != MPTSAS_FW_DIAG_INVALID_UID) ||
11440 (unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
11441 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11442 return (DDI_FAILURE);
11443 }
11444
11445 /*
11446 * If this buffer is already posted as immediate, just change owner.
11447 */
11448 if (pBuffer->immediate && pBuffer->owned_by_firmware &&
11449 (pBuffer->unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
11450 pBuffer->immediate = FALSE;
11451 pBuffer->unique_id = unique_id;
11452 return (DDI_SUCCESS);
11453 }
11454
11455 /*
11456 * Post a new buffer after checking if it's enabled. The DMA buffer
11457 * that is allocated will be contiguous (sgl_len = 1).
11458 */
11459 if (!pBuffer->enabled) {
11460 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
11461 return (DDI_FAILURE);
11462 }
11463 bzero(&pBuffer->buffer_data, sizeof (mptsas_dma_alloc_state_t));
11464 pBuffer->buffer_data.size = buffer_size;
11465 if (mptsas_dma_alloc(mpt, &pBuffer->buffer_data) != DDI_SUCCESS) {
11466 mptsas_log(mpt, CE_WARN, "failed to alloc DMA resource for "
11467 "diag buffer: size = %d bytes", buffer_size);
11468 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
11469 return (DDI_FAILURE);
11470 }
11471
11472 /*
11473 * Copy the given info to the diag buffer and post the buffer.
11474 */
11475 pBuffer->buffer_type = buffer_type;
11476 pBuffer->immediate = FALSE;
11477 if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
11478 for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
11479 i++) {
11480 pBuffer->product_specific[i] =
11481 diag_register->ProductSpecific[i];
11482 }
11483 }
11484 pBuffer->extended_type = extended_type;
11485 pBuffer->unique_id = unique_id;
11486 status = mptsas_post_fw_diag_buffer(mpt, pBuffer, return_code);
11487
11488 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
11489 DDI_SUCCESS) {
11490 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed in "
11491 "mptsas_diag_register.");
11492 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11493 status = DDI_FAILURE;
11494 }
11495
11496 /*
11497 * In case there was a failure, free the DMA buffer.
11498 */
11499 if (status == DDI_FAILURE) {
11500 mptsas_dma_free(&pBuffer->buffer_data);
11501 }
11502
11503 return (status);
11504 }
11505
11506 static int
11507 mptsas_diag_unregister(mptsas_t *mpt,
11508 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
11509 {
11510 mptsas_fw_diagnostic_buffer_t *pBuffer;
11511 uint8_t i;
11512 uint32_t unique_id;
11513 int status;
11514
11515 ASSERT(mutex_owned(&mpt->m_mutex));
11516
11517 unique_id = diag_unregister->UniqueId;
11518
11519 /*
11520 * Get the current buffer and look up the unique ID. The unique ID
11521 * should be there.
11522 */
11523 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11524 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11525 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11526 return (DDI_FAILURE);
11527 }
11528
11529 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11530
11531 /*
11532 * Try to release the buffer from FW before freeing it. If release
11533 * fails, don't free the DMA buffer in case FW tries to access it
11534 * later. If buffer is not owned by firmware, can't release it.
11535 */
11536 if (!pBuffer->owned_by_firmware) {
11537 status = DDI_SUCCESS;
11538 } else {
11539 status = mptsas_release_fw_diag_buffer(mpt, pBuffer,
11540 return_code, MPTSAS_FW_DIAG_TYPE_UNREGISTER);
11541 }
11542
11543 /*
11544 * At this point, return the current status no matter what happens with
11545 * the DMA buffer.
11546 */
11547 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
11548 if (status == DDI_SUCCESS) {
11549 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
11550 DDI_SUCCESS) {
11551 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed "
11552 "in mptsas_diag_unregister.");
11553 ddi_fm_service_impact(mpt->m_dip,
11554 DDI_SERVICE_UNAFFECTED);
11555 }
11556 mptsas_dma_free(&pBuffer->buffer_data);
11557 }
11558
11559 return (status);
11560 }
11561
11562 static int
11563 mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
11564 uint32_t *return_code)
11565 {
11566 mptsas_fw_diagnostic_buffer_t *pBuffer;
11567 uint8_t i;
11568 uint32_t unique_id;
11569
11570 ASSERT(mutex_owned(&mpt->m_mutex));
11571
11572 unique_id = diag_query->UniqueId;
11573
11574 /*
11575 * If ID is valid, query on ID.
11576 * If ID is invalid, query on buffer type.
11577 */
11578 if (unique_id == MPTSAS_FW_DIAG_INVALID_UID) {
11579 i = diag_query->BufferType;
11580 if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
11581 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11582 return (DDI_FAILURE);
11583 }
11584 } else {
11585 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11586 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11587 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11588 return (DDI_FAILURE);
11589 }
11590 }
11591
11592 /*
11593 * Fill query structure with the diag buffer info.
11594 */
11595 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11596 diag_query->BufferType = pBuffer->buffer_type;
11597 diag_query->ExtendedType = pBuffer->extended_type;
11598 if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
11599 for (i = 0; i < (sizeof (diag_query->ProductSpecific) / 4);
11600 i++) {
11601 diag_query->ProductSpecific[i] =
11602 pBuffer->product_specific[i];
11603 }
11604 }
11605 diag_query->TotalBufferSize = pBuffer->buffer_data.size;
11606 diag_query->DriverAddedBufferSize = 0;
11607 diag_query->UniqueId = pBuffer->unique_id;
11608 diag_query->ApplicationFlags = 0;
11609 diag_query->DiagnosticFlags = 0;
11610
11611 /*
11612 * Set/Clear application flags
11613 */
11614 if (pBuffer->immediate) {
11615 diag_query->ApplicationFlags &= ~MPTSAS_FW_DIAG_FLAG_APP_OWNED;
11616 } else {
11617 diag_query->ApplicationFlags |= MPTSAS_FW_DIAG_FLAG_APP_OWNED;
11618 }
11619 if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
11620 diag_query->ApplicationFlags |=
11621 MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
11622 } else {
11623 diag_query->ApplicationFlags &=
11624 ~MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
11625 }
11626 if (pBuffer->owned_by_firmware) {
11627 diag_query->ApplicationFlags |=
11628 MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
11629 } else {
11630 diag_query->ApplicationFlags &=
11631 ~MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
11632 }
11633
11634 return (DDI_SUCCESS);
11635 }
11636
11637 static int
11638 mptsas_diag_read_buffer(mptsas_t *mpt,
11639 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
11640 uint32_t *return_code, int ioctl_mode)
11641 {
11642 mptsas_fw_diagnostic_buffer_t *pBuffer;
11643 uint8_t i, *pData;
11644 uint32_t unique_id, byte;
11645 int status;
11646
11647 ASSERT(mutex_owned(&mpt->m_mutex));
11648
11649 unique_id = diag_read_buffer->UniqueId;
11650
11651 /*
11652 * Get the current buffer and look up the unique ID. The unique ID
11653 * should be there.
11654 */
11655 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11656 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11657 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11658 return (DDI_FAILURE);
11659 }
11660
11661 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11662
11663 /*
11664 * Make sure requested read is within limits
11665 */
11666 if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
11667 pBuffer->buffer_data.size) {
11668 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11669 return (DDI_FAILURE);
11670 }
11671
11672 /*
11673 * Copy the requested data from DMA to the diag_read_buffer. The DMA
11674 * buffer that was allocated is one contiguous buffer.
11675 */
11676 pData = (uint8_t *)(pBuffer->buffer_data.memp +
11677 diag_read_buffer->StartingOffset);
11678 (void) ddi_dma_sync(pBuffer->buffer_data.handle, 0, 0,
11679 DDI_DMA_SYNC_FORCPU);
11680 for (byte = 0; byte < diag_read_buffer->BytesToRead; byte++) {
11681 if (ddi_copyout(pData + byte, ioctl_buf + byte, 1, ioctl_mode)
11682 != 0) {
11683 return (DDI_FAILURE);
11684 }
11685 }
11686 diag_read_buffer->Status = 0;
11687
11688 /*
11689 * Set or clear the Force Release flag.
11690 */
11691 if (pBuffer->force_release) {
11692 diag_read_buffer->Flags |= MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
11693 } else {
11694 diag_read_buffer->Flags &= ~MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
11695 }
11696
11697 /*
11698 * If buffer is to be reregistered, make sure it's not already owned by
11699 * firmware first.
11700 */
11701 status = DDI_SUCCESS;
11702 if (!pBuffer->owned_by_firmware) {
11703 if (diag_read_buffer->Flags & MPTSAS_FW_DIAG_FLAG_REREGISTER) {
11704 status = mptsas_post_fw_diag_buffer(mpt, pBuffer,
11705 return_code);
11706 }
11707 }
11708
11709 return (status);
11710 }
11711
11712 static int
11713 mptsas_diag_release(mptsas_t *mpt, mptsas_fw_diag_release_t *diag_release,
11714 uint32_t *return_code)
11715 {
11716 mptsas_fw_diagnostic_buffer_t *pBuffer;
11717 uint8_t i;
11718 uint32_t unique_id;
11719 int status;
11720
11721 ASSERT(mutex_owned(&mpt->m_mutex));
11722
11723 unique_id = diag_release->UniqueId;
11724
11725 /*
11726 * Get the current buffer and look up the unique ID. The unique ID
11727 * should be there.
11728 */
11729 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11730 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11731 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11732 return (DDI_FAILURE);
11733 }
11734
11735 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11736
11737 /*
11738 * If buffer is not owned by firmware, it's already been released.
11739 */
11740 if (!pBuffer->owned_by_firmware) {
11741 *return_code = MPTSAS_FW_DIAG_ERROR_ALREADY_RELEASED;
11742 return (DDI_FAILURE);
11743 }
11744
11745 /*
11746 * Release the buffer.
11747 */
11748 status = mptsas_release_fw_diag_buffer(mpt, pBuffer, return_code,
11749 MPTSAS_FW_DIAG_TYPE_RELEASE);
11750 return (status);
11751 }
11752
11753 static int
11754 mptsas_do_diag_action(mptsas_t *mpt, uint32_t action, uint8_t *diag_action,
11755 uint32_t length, uint32_t *return_code, int ioctl_mode)
11756 {
11757 mptsas_fw_diag_register_t diag_register;
11758 mptsas_fw_diag_unregister_t diag_unregister;
11759 mptsas_fw_diag_query_t diag_query;
11760 mptsas_diag_read_buffer_t diag_read_buffer;
11761 mptsas_fw_diag_release_t diag_release;
11762 int status = DDI_SUCCESS;
11763 uint32_t original_return_code, read_buf_len;
11764
11765 ASSERT(mutex_owned(&mpt->m_mutex));
11766
11767 original_return_code = *return_code;
11768 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11769
11770 switch (action) {
11771 case MPTSAS_FW_DIAG_TYPE_REGISTER:
11772 if (!length) {
11773 *return_code =
11774 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11775 status = DDI_FAILURE;
11776 break;
11777 }
11778 if (ddi_copyin(diag_action, &diag_register,
11779 sizeof (diag_register), ioctl_mode) != 0) {
11780 return (DDI_FAILURE);
11781 }
11782 status = mptsas_diag_register(mpt, &diag_register,
11783 return_code);
11784 break;
11785
11786 case MPTSAS_FW_DIAG_TYPE_UNREGISTER:
11787 if (length < sizeof (diag_unregister)) {
11788 *return_code =
11789 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11790 status = DDI_FAILURE;
11791 break;
11792 }
11793 if (ddi_copyin(diag_action, &diag_unregister,
11794 sizeof (diag_unregister), ioctl_mode) != 0) {
11795 return (DDI_FAILURE);
11796 }
11797 status = mptsas_diag_unregister(mpt, &diag_unregister,
11798 return_code);
11799 break;
11800
11801 case MPTSAS_FW_DIAG_TYPE_QUERY:
11802 if (length < sizeof (diag_query)) {
11803 *return_code =
11804 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11805 status = DDI_FAILURE;
11806 break;
11807 }
11808 if (ddi_copyin(diag_action, &diag_query,
11809 sizeof (diag_query), ioctl_mode) != 0) {
11810 return (DDI_FAILURE);
11811 }
11812 status = mptsas_diag_query(mpt, &diag_query,
11813 return_code);
11814 if (status == DDI_SUCCESS) {
11815 if (ddi_copyout(&diag_query, diag_action,
11816 sizeof (diag_query), ioctl_mode) != 0) {
11817 return (DDI_FAILURE);
11818 }
11819 }
11820 break;
11821
11822 case MPTSAS_FW_DIAG_TYPE_READ_BUFFER:
11823 if (ddi_copyin(diag_action, &diag_read_buffer,
11824 sizeof (diag_read_buffer) - 4, ioctl_mode) != 0) {
11825 return (DDI_FAILURE);
11826 }
11827 read_buf_len = sizeof (diag_read_buffer) -
11828 sizeof (diag_read_buffer.DataBuffer) +
11829 diag_read_buffer.BytesToRead;
11830 if (length < read_buf_len) {
11831 *return_code =
11832 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11833 status = DDI_FAILURE;
11834 break;
11835 }
11836 status = mptsas_diag_read_buffer(mpt,
11837 &diag_read_buffer, diag_action +
11838 sizeof (diag_read_buffer) - 4, return_code,
11839 ioctl_mode);
11840 if (status == DDI_SUCCESS) {
11841 if (ddi_copyout(&diag_read_buffer, diag_action,
11842 sizeof (diag_read_buffer) - 4, ioctl_mode)
11843 != 0) {
11844 return (DDI_FAILURE);
11845 }
11846 }
11847 break;
11848
11849 case MPTSAS_FW_DIAG_TYPE_RELEASE:
11850 if (length < sizeof (diag_release)) {
11851 *return_code =
11852 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11853 status = DDI_FAILURE;
11854 break;
11855 }
11856 if (ddi_copyin(diag_action, &diag_release,
11857 sizeof (diag_release), ioctl_mode) != 0) {
11858 return (DDI_FAILURE);
11859 }
11860 status = mptsas_diag_release(mpt, &diag_release,
11861 return_code);
11862 break;
11863
11864 default:
11865 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11866 status = DDI_FAILURE;
11867 break;
11868 }
11869
11870 if ((status == DDI_FAILURE) &&
11871 (original_return_code == MPTSAS_FW_DIAG_NEW) &&
11872 (*return_code != MPTSAS_FW_DIAG_ERROR_SUCCESS)) {
11873 status = DDI_SUCCESS;
11874 }
11875
11876 return (status);
11877 }
11878
11879 static int
11880 mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *user_data, int mode)
11881 {
11882 int status;
11883 mptsas_diag_action_t driver_data;
11884
11885 ASSERT(mutex_owned(&mpt->m_mutex));
11886
11887 /*
11888 * Copy the user data to a driver data buffer.
11889 */
11890 if (ddi_copyin(user_data, &driver_data, sizeof (mptsas_diag_action_t),
11891 mode) == 0) {
11892 /*
11893 * Send diag action request if Action is valid
11894 */
11895 if (driver_data.Action == MPTSAS_FW_DIAG_TYPE_REGISTER ||
11896 driver_data.Action == MPTSAS_FW_DIAG_TYPE_UNREGISTER ||
11897 driver_data.Action == MPTSAS_FW_DIAG_TYPE_QUERY ||
11898 driver_data.Action == MPTSAS_FW_DIAG_TYPE_READ_BUFFER ||
11899 driver_data.Action == MPTSAS_FW_DIAG_TYPE_RELEASE) {
11900 status = mptsas_do_diag_action(mpt, driver_data.Action,
11901 (void *)(uintptr_t)driver_data.PtrDiagAction,
11902 driver_data.Length, &driver_data.ReturnCode,
11903 mode);
11904 if (status == DDI_SUCCESS) {
11905 if (ddi_copyout(&driver_data.ReturnCode,
11906 &user_data->ReturnCode,
11907 sizeof (user_data->ReturnCode), mode)
11908 != 0) {
11909 status = EFAULT;
11910 } else {
11911 status = 0;
11912 }
11913 } else {
11914 status = EIO;
11915 }
11916 } else {
11917 status = EINVAL;
11918 }
11919 } else {
11920 status = EFAULT;
11921 }
11922
11923 return (status);
11924 }
11925
11926 /*
11927 * This routine handles the "event query" ioctl.
11928 */
11929 static int
11930 mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, int mode,
11931 int *rval)
11932 {
11933 int status;
11934 mptsas_event_query_t driverdata;
11935 uint8_t i;
11936
11937 driverdata.Entries = MPTSAS_EVENT_QUEUE_SIZE;
11938
11939 mutex_enter(&mpt->m_mutex);
11940 for (i = 0; i < 4; i++) {
11941 driverdata.Types[i] = mpt->m_event_mask[i];
11942 }
11943 mutex_exit(&mpt->m_mutex);
11944
11945 if (ddi_copyout(&driverdata, data, sizeof (driverdata), mode) != 0) {
11946 status = EFAULT;
11947 } else {
11948 *rval = MPTIOCTL_STATUS_GOOD;
11949 status = 0;
11950 }
11951
11952 return (status);
11953 }
11954
11955 /*
11956 * This routine handles the "event enable" ioctl.
11957 */
11958 static int
11959 mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, int mode,
11960 int *rval)
11961 {
11962 int status;
11963 mptsas_event_enable_t driverdata;
11964 uint8_t i;
11965
11966 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11967 mutex_enter(&mpt->m_mutex);
11968 for (i = 0; i < 4; i++) {
11969 mpt->m_event_mask[i] = driverdata.Types[i];
11970 }
11971 mutex_exit(&mpt->m_mutex);
11972
11973 *rval = MPTIOCTL_STATUS_GOOD;
11974 status = 0;
11975 } else {
11976 status = EFAULT;
11977 }
11978 return (status);
11979 }
11980
11981 /*
11982 * This routine handles the "event report" ioctl.
11983 */
11984 static int
11985 mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, int mode,
11986 int *rval)
11987 {
11988 int status;
11989 mptsas_event_report_t driverdata;
11990
11991 mutex_enter(&mpt->m_mutex);
11992
11993 if (ddi_copyin(&data->Size, &driverdata.Size, sizeof (driverdata.Size),
11994 mode) == 0) {
11995 if (driverdata.Size >= sizeof (mpt->m_events)) {
11996 if (ddi_copyout(mpt->m_events, data->Events,
11997 sizeof (mpt->m_events), mode) != 0) {
11998 status = EFAULT;
11999 } else {
12000 if (driverdata.Size > sizeof (mpt->m_events)) {
12001 driverdata.Size =
12002 sizeof (mpt->m_events);
12003 if (ddi_copyout(&driverdata.Size,
12004 &data->Size,
12005 sizeof (driverdata.Size),
12006 mode) != 0) {
12007 status = EFAULT;
12008 } else {
12009 *rval = MPTIOCTL_STATUS_GOOD;
12010 status = 0;
12011 }
12012 } else {
12013 *rval = MPTIOCTL_STATUS_GOOD;
12014 status = 0;
12015 }
12016 }
12017 } else {
12018 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
12019 status = 0;
12020 }
12021 } else {
12022 status = EFAULT;
12023 }
12024
12025 mutex_exit(&mpt->m_mutex);
12026 return (status);
12027 }
12028
12029 static void
12030 mptsas_lookup_pci_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
12031 {
12032 int *reg_data;
12033 uint_t reglen;
12034
12035 /*
12036 * Lookup the 'reg' property and extract the other data
12037 */
12038 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
12039 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
12040 DDI_PROP_SUCCESS) {
12041 /*
12042 * Extract the PCI data from the 'reg' property first DWORD.
12043 * The entry looks like the following:
12044 * First DWORD:
12045 * Bits 0 - 7 8-bit Register number
12046 * Bits 8 - 10 3-bit Function number
12047 * Bits 11 - 15 5-bit Device number
12048 * Bits 16 - 23 8-bit Bus number
12049 * Bits 24 - 25 2-bit Address Space type identifier
12050 *
12051 */
12052 adapter_data->PciInformation.u.bits.BusNumber =
12053 (reg_data[0] & 0x00FF0000) >> 16;
12054 adapter_data->PciInformation.u.bits.DeviceNumber =
12055 (reg_data[0] & 0x0000F800) >> 11;
12056 adapter_data->PciInformation.u.bits.FunctionNumber =
12057 (reg_data[0] & 0x00000700) >> 8;
12058 ddi_prop_free((void *)reg_data);
12059 } else {
12060 /*
12061 * If we can't determine the PCI data then we fill in FF's for
12062 * the data to indicate this.
12063 */
12064 adapter_data->PCIDeviceHwId = 0xFFFFFFFF;
12065 adapter_data->MpiPortNumber = 0xFFFFFFFF;
12066 adapter_data->PciInformation.u.AsDWORD = 0xFFFFFFFF;
12067 }
12068
12069 /*
12070 * Saved in the mpt->m_fwversion
12071 */
12072 adapter_data->MpiFirmwareVersion = mpt->m_fwversion;
12073 }
12074
12075 static void
12076 mptsas_read_adapter_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
12077 {
12078 char *driver_verstr = MPTSAS_MOD_STRING;
12079
12080 mptsas_lookup_pci_data(mpt, adapter_data);
12081 adapter_data->AdapterType = mpt->m_MPI25 ?
12082 MPTIOCTL_ADAPTER_TYPE_SAS3 :
12083 MPTIOCTL_ADAPTER_TYPE_SAS2;
12084 adapter_data->PCIDeviceHwId = (uint32_t)mpt->m_devid;
12085 adapter_data->PCIDeviceHwRev = (uint32_t)mpt->m_revid;
12086 adapter_data->SubSystemId = (uint32_t)mpt->m_ssid;
12087 adapter_data->SubsystemVendorId = (uint32_t)mpt->m_svid;
12088 (void) strcpy((char *)&adapter_data->DriverVersion[0], driver_verstr);
12089 adapter_data->BiosVersion = 0;
12090 (void) mptsas_get_bios_page3(mpt, &adapter_data->BiosVersion);
12091 }
12092
12093 static void
12094 mptsas_read_pci_info(mptsas_t *mpt, mptsas_pci_info_t *pci_info)
12095 {
12096 int *reg_data, i;
12097 uint_t reglen;
12098
12099 /*
12100 * Lookup the 'reg' property and extract the other data
12101 */
12102 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
12103 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
12104 DDI_PROP_SUCCESS) {
12105 /*
12106 * Extract the PCI data from the 'reg' property first DWORD.
12107 * The entry looks like the following:
12108 * First DWORD:
12109 * Bits 8 - 10 3-bit Function number
12110 * Bits 11 - 15 5-bit Device number
12111 * Bits 16 - 23 8-bit Bus number
12112 */
12113 pci_info->BusNumber = (reg_data[0] & 0x00FF0000) >> 16;
12114 pci_info->DeviceNumber = (reg_data[0] & 0x0000F800) >> 11;
12115 pci_info->FunctionNumber = (reg_data[0] & 0x00000700) >> 8;
12116 ddi_prop_free((void *)reg_data);
12117 } else {
12118 /*
12119 * If we can't determine the PCI info then we fill in FF's for
12120 * the data to indicate this.
12121 */
12122 pci_info->BusNumber = 0xFFFFFFFF;
12123 pci_info->DeviceNumber = 0xFF;
12124 pci_info->FunctionNumber = 0xFF;
12125 }
12126
12127 /*
12128 * Now get the interrupt vector and the pci header. The vector can
12129 * only be 0 right now. The header is the first 256 bytes of config
12130 * space.
12131 */
12132 pci_info->InterruptVector = 0;
12133 for (i = 0; i < sizeof (pci_info->PciHeader); i++) {
12134 pci_info->PciHeader[i] = pci_config_get8(mpt->m_config_handle,
12135 i);
12136 }
12137 }
12138
12139 static int
12140 mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data, int mode)
12141 {
12142 int status = 0;
12143 mptsas_reg_access_t driverdata;
12144
12145 mutex_enter(&mpt->m_mutex);
12146 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
12147 switch (driverdata.Command) {
12148 /*
12149 * IO access is not supported.
12150 */
12151 case REG_IO_READ:
12152 case REG_IO_WRITE:
12153 mptsas_log(mpt, CE_WARN, "IO access is not "
12154 "supported. Use memory access.");
12155 status = EINVAL;
12156 break;
12157
12158 case REG_MEM_READ:
12159 driverdata.RegData = ddi_get32(mpt->m_datap,
12160 (uint32_t *)(void *)mpt->m_reg +
12161 driverdata.RegOffset);
12162 if (ddi_copyout(&driverdata.RegData,
12163 &data->RegData,
12164 sizeof (driverdata.RegData), mode) != 0) {
12165 mptsas_log(mpt, CE_WARN, "Register "
12166 "Read Failed");
12167 status = EFAULT;
12168 }
12169 break;
12170
12171 case REG_MEM_WRITE:
12172 ddi_put32(mpt->m_datap,
12173 (uint32_t *)(void *)mpt->m_reg +
12174 driverdata.RegOffset,
12175 driverdata.RegData);
12176 break;
12177
12178 default:
12179 status = EINVAL;
12180 break;
12181 }
12182 } else {
12183 status = EFAULT;
12184 }
12185
12186 mutex_exit(&mpt->m_mutex);
12187 return (status);
12188 }
12189
12190 static int
12191 led_control(mptsas_t *mpt, intptr_t data, int mode)
12192 {
12193 int ret = 0;
12194 mptsas_led_control_t lc;
12195 mptsas_target_t *ptgt;
12196
12197 if (ddi_copyin((void *)data, &lc, sizeof (lc), mode) != 0) {
12198 return (EFAULT);
12199 }
12200
12201 if ((lc.Command != MPTSAS_LEDCTL_FLAG_SET &&
12202 lc.Command != MPTSAS_LEDCTL_FLAG_GET) ||
12203 lc.Led < MPTSAS_LEDCTL_LED_MIN ||
12204 lc.Led > MPTSAS_LEDCTL_LED_MAX ||
12205 (lc.Command == MPTSAS_LEDCTL_FLAG_SET && lc.LedStatus != 0 &&
12206 lc.LedStatus != 1)) {
12207 return (EINVAL);
12208 }
12209
12210 if ((lc.Command == MPTSAS_LEDCTL_FLAG_SET && (mode & FWRITE) == 0) ||
12211 (lc.Command == MPTSAS_LEDCTL_FLAG_GET && (mode & FREAD) == 0))
12212 return (EACCES);
12213
12214 /* Locate the target we're interrogating... */
12215 mutex_enter(&mpt->m_mutex);
12216 ptgt = refhash_linear_search(mpt->m_targets,
12217 mptsas_target_eval_slot, &lc);
12218 if (ptgt == NULL) {
12219 /* We could not find a target for that enclosure/slot. */
12220 mutex_exit(&mpt->m_mutex);
12221 return (ENOENT);
12222 }
12223
12224 if (lc.Command == MPTSAS_LEDCTL_FLAG_SET) {
12225 /* Update our internal LED state. */
12226 ptgt->m_led_status &= ~(1 << (lc.Led - 1));
12227 ptgt->m_led_status |= lc.LedStatus << (lc.Led - 1);
12228
12229 /* Flush it to the controller. */
12230 ret = mptsas_flush_led_status(mpt, ptgt);
12231 mutex_exit(&mpt->m_mutex);
12232 return (ret);
12233 }
12234
12235 /* Return our internal LED state. */
12236 lc.LedStatus = (ptgt->m_led_status >> (lc.Led - 1)) & 1;
12237 mutex_exit(&mpt->m_mutex);
12238
12239 if (ddi_copyout(&lc, (void *)data, sizeof (lc), mode) != 0) {
12240 return (EFAULT);
12241 }
12242
12243 return (0);
12244 }
12245
12246 static int
12247 get_disk_info(mptsas_t *mpt, intptr_t data, int mode)
12248 {
12249 uint16_t i = 0;
12250 uint16_t count = 0;
12251 int ret = 0;
12252 mptsas_target_t *ptgt;
12253 mptsas_disk_info_t *di;
12254 STRUCT_DECL(mptsas_get_disk_info, gdi);
12255
12256 if ((mode & FREAD) == 0)
12257 return (EACCES);
12258
12259 STRUCT_INIT(gdi, get_udatamodel());
12260
12261 if (ddi_copyin((void *)data, STRUCT_BUF(gdi), STRUCT_SIZE(gdi),
12262 mode) != 0) {
12263 return (EFAULT);
12264 }
12265
12266 /* Find out how many targets there are. */
12267 mutex_enter(&mpt->m_mutex);
12268 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12269 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12270 count++;
12271 }
12272 mutex_exit(&mpt->m_mutex);
12273
12274 /*
12275 * If we haven't been asked to copy out information on each target,
12276 * then just return the count.
12277 */
12278 STRUCT_FSET(gdi, DiskCount, count);
12279 if (STRUCT_FGETP(gdi, PtrDiskInfoArray) == NULL)
12280 goto copy_out;
12281
12282 /*
12283 * If we haven't been given a large enough buffer to copy out into,
12284 * let the caller know.
12285 */
12286 if (STRUCT_FGET(gdi, DiskInfoArraySize) <
12287 count * sizeof (mptsas_disk_info_t)) {
12288 ret = ENOSPC;
12289 goto copy_out;
12290 }
12291
12292 di = kmem_zalloc(count * sizeof (mptsas_disk_info_t), KM_SLEEP);
12293
12294 mutex_enter(&mpt->m_mutex);
12295 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12296 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12297 if (i >= count) {
12298 /*
12299 * The number of targets changed while we weren't
12300 * looking, so give up.
12301 */
12302 refhash_rele(mpt->m_targets, ptgt);
12303 mutex_exit(&mpt->m_mutex);
12304 kmem_free(di, count * sizeof (mptsas_disk_info_t));
12305 return (EAGAIN);
12306 }
12307 di[i].Instance = mpt->m_instance;
12308 di[i].Enclosure = ptgt->m_enclosure;
12309 di[i].Slot = ptgt->m_slot_num;
12310 di[i].SasAddress = ptgt->m_addr.mta_wwn;
12311 i++;
12312 }
12313 mutex_exit(&mpt->m_mutex);
12314 STRUCT_FSET(gdi, DiskCount, i);
12315
12316 /* Copy out the disk information to the caller. */
12317 if (ddi_copyout((void *)di, STRUCT_FGETP(gdi, PtrDiskInfoArray),
12318 i * sizeof (mptsas_disk_info_t), mode) != 0) {
12319 ret = EFAULT;
12320 }
12321
12322 kmem_free(di, count * sizeof (mptsas_disk_info_t));
12323
12324 copy_out:
12325 if (ddi_copyout(STRUCT_BUF(gdi), (void *)data, STRUCT_SIZE(gdi),
12326 mode) != 0) {
12327 ret = EFAULT;
12328 }
12329
12330 return (ret);
12331 }
12332
12333 static int
12334 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
12335 int *rval)
12336 {
12337 int status = 0;
12338 mptsas_t *mpt;
12339 mptsas_update_flash_t flashdata;
12340 mptsas_pass_thru_t passthru_data;
12341 mptsas_adapter_data_t adapter_data;
12342 mptsas_pci_info_t pci_info;
12343 int copylen;
12344
12345 int iport_flag = 0;
12346 dev_info_t *dip = NULL;
12347 mptsas_phymask_t phymask = 0;
12348 struct devctl_iocdata *dcp = NULL;
12349 char *addr = NULL;
12350 mptsas_target_t *ptgt = NULL;
12351
12352 *rval = MPTIOCTL_STATUS_GOOD;
12353 if (secpolicy_sys_config(credp, B_FALSE) != 0) {
12354 return (EPERM);
12355 }
12356
12357 mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
12358 if (mpt == NULL) {
12359 /*
12360 * Called from iport node, get the states
12361 */
12362 iport_flag = 1;
12363 dip = mptsas_get_dip_from_dev(dev, &phymask);
12364 if (dip == NULL) {
12365 return (ENXIO);
12366 }
12367 mpt = DIP2MPT(dip);
12368 }
12369 /* Make sure power level is D0 before accessing registers */
12370 mutex_enter(&mpt->m_mutex);
12371 if (mpt->m_options & MPTSAS_OPT_PM) {
12372 (void) pm_busy_component(mpt->m_dip, 0);
12373 if (mpt->m_power_level != PM_LEVEL_D0) {
12374 mutex_exit(&mpt->m_mutex);
12375 if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
12376 DDI_SUCCESS) {
12377 mptsas_log(mpt, CE_WARN,
12378 "mptsas%d: mptsas_ioctl: Raise power "
12379 "request failed.", mpt->m_instance);
12380 (void) pm_idle_component(mpt->m_dip, 0);
12381 return (ENXIO);
12382 }
12383 } else {
12384 mutex_exit(&mpt->m_mutex);
12385 }
12386 } else {
12387 mutex_exit(&mpt->m_mutex);
12388 }
12389
12390 if (iport_flag) {
12391 status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
12392 if (status != 0) {
12393 goto out;
12394 }
12395 /*
12396 * The following code control the OK2RM LED, it doesn't affect
12397 * the ioctl return status.
12398 */
12399 if ((cmd == DEVCTL_DEVICE_ONLINE) ||
12400 (cmd == DEVCTL_DEVICE_OFFLINE)) {
12401 if (ndi_dc_allochdl((void *)data, &dcp) !=
12402 NDI_SUCCESS) {
12403 goto out;
12404 }
12405 addr = ndi_dc_getaddr(dcp);
12406 ptgt = mptsas_addr_to_ptgt(mpt, addr, phymask);
12407 if (ptgt == NULL) {
12408 NDBG14(("mptsas_ioctl led control: tgt %s not "
12409 "found", addr));
12410 ndi_dc_freehdl(dcp);
12411 goto out;
12412 }
12413 mutex_enter(&mpt->m_mutex);
12414 if (cmd == DEVCTL_DEVICE_ONLINE) {
12415 ptgt->m_tgt_unconfigured = 0;
12416 } else if (cmd == DEVCTL_DEVICE_OFFLINE) {
12417 ptgt->m_tgt_unconfigured = 1;
12418 }
12419 if (cmd == DEVCTL_DEVICE_OFFLINE) {
12420 ptgt->m_led_status |=
12421 (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1));
12422 } else {
12423 ptgt->m_led_status &=
12424 ~(1 << (MPTSAS_LEDCTL_LED_OK2RM - 1));
12425 }
12426 (void) mptsas_flush_led_status(mpt, ptgt);
12427 mutex_exit(&mpt->m_mutex);
12428 ndi_dc_freehdl(dcp);
12429 }
12430 goto out;
12431 }
12432 switch (cmd) {
12433 case MPTIOCTL_GET_DISK_INFO:
12434 status = get_disk_info(mpt, data, mode);
12435 break;
12436 case MPTIOCTL_LED_CONTROL:
12437 status = led_control(mpt, data, mode);
12438 break;
12439 case MPTIOCTL_UPDATE_FLASH:
12440 if (ddi_copyin((void *)data, &flashdata,
12441 sizeof (struct mptsas_update_flash), mode)) {
12442 status = EFAULT;
12443 break;
12444 }
12445
12446 mutex_enter(&mpt->m_mutex);
12447 if (mptsas_update_flash(mpt,
12448 (caddr_t)(long)flashdata.PtrBuffer,
12449 flashdata.ImageSize, flashdata.ImageType, mode)) {
12450 status = EFAULT;
12451 }
12452
12453 /*
12454 * Reset the chip to start using the new
12455 * firmware. Reset if failed also.
12456 */
12457 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12458 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
12459 status = EFAULT;
12460 }
12461 mutex_exit(&mpt->m_mutex);
12462 break;
12463 case MPTIOCTL_PASS_THRU:
12464 /*
12465 * The user has requested to pass through a command to
12466 * be executed by the MPT firmware. Call our routine
12467 * which does this. Only allow one passthru IOCTL at
12468 * one time. Other threads will block on
12469 * m_passthru_mutex, which is of adaptive variant.
12470 */
12471 if (ddi_copyin((void *)data, &passthru_data,
12472 sizeof (mptsas_pass_thru_t), mode)) {
12473 status = EFAULT;
12474 break;
12475 }
12476 mutex_enter(&mpt->m_passthru_mutex);
12477 mutex_enter(&mpt->m_mutex);
12478 status = mptsas_pass_thru(mpt, &passthru_data, mode);
12479 mutex_exit(&mpt->m_mutex);
12480 mutex_exit(&mpt->m_passthru_mutex);
12481
12482 break;
12483 case MPTIOCTL_GET_ADAPTER_DATA:
12484 /*
12485 * The user has requested to read adapter data. Call
12486 * our routine which does this.
12487 */
12488 bzero(&adapter_data, sizeof (mptsas_adapter_data_t));
12489 if (ddi_copyin((void *)data, (void *)&adapter_data,
12490 sizeof (mptsas_adapter_data_t), mode)) {
12491 status = EFAULT;
12492 break;
12493 }
12494 if (adapter_data.StructureLength >=
12495 sizeof (mptsas_adapter_data_t)) {
12496 adapter_data.StructureLength = (uint32_t)
12497 sizeof (mptsas_adapter_data_t);
12498 copylen = sizeof (mptsas_adapter_data_t);
12499 mutex_enter(&mpt->m_mutex);
12500 mptsas_read_adapter_data(mpt, &adapter_data);
12501 mutex_exit(&mpt->m_mutex);
12502 } else {
12503 adapter_data.StructureLength = (uint32_t)
12504 sizeof (mptsas_adapter_data_t);
12505 copylen = sizeof (adapter_data.StructureLength);
12506 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
12507 }
12508 if (ddi_copyout((void *)(&adapter_data), (void *)data,
12509 copylen, mode) != 0) {
12510 status = EFAULT;
12511 }
12512 break;
12513 case MPTIOCTL_GET_PCI_INFO:
12514 /*
12515 * The user has requested to read pci info. Call
12516 * our routine which does this.
12517 */
12518 bzero(&pci_info, sizeof (mptsas_pci_info_t));
12519 mutex_enter(&mpt->m_mutex);
12520 mptsas_read_pci_info(mpt, &pci_info);
12521 mutex_exit(&mpt->m_mutex);
12522 if (ddi_copyout((void *)(&pci_info), (void *)data,
12523 sizeof (mptsas_pci_info_t), mode) != 0) {
12524 status = EFAULT;
12525 }
12526 break;
12527 case MPTIOCTL_RESET_ADAPTER:
12528 mutex_enter(&mpt->m_mutex);
12529 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12530 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
12531 mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
12532 "failed");
12533 status = EFAULT;
12534 }
12535 mutex_exit(&mpt->m_mutex);
12536 break;
12537 case MPTIOCTL_DIAG_ACTION:
12538 /*
12539 * The user has done a diag buffer action. Call our
12540 * routine which does this. Only allow one diag action
12541 * at one time.
12542 */
12543 mutex_enter(&mpt->m_mutex);
12544 if (mpt->m_diag_action_in_progress) {
12545 mutex_exit(&mpt->m_mutex);
12546 return (EBUSY);
12547 }
12548 mpt->m_diag_action_in_progress = 1;
12549 status = mptsas_diag_action(mpt,
12550 (mptsas_diag_action_t *)data, mode);
12551 mpt->m_diag_action_in_progress = 0;
12552 mutex_exit(&mpt->m_mutex);
12553 break;
12554 case MPTIOCTL_EVENT_QUERY:
12555 /*
12556 * The user has done an event query. Call our routine
12557 * which does this.
12558 */
12559 status = mptsas_event_query(mpt,
12560 (mptsas_event_query_t *)data, mode, rval);
12561 break;
12562 case MPTIOCTL_EVENT_ENABLE:
12563 /*
12564 * The user has done an event enable. Call our routine
12565 * which does this.
12566 */
12567 status = mptsas_event_enable(mpt,
12568 (mptsas_event_enable_t *)data, mode, rval);
12569 break;
12570 case MPTIOCTL_EVENT_REPORT:
12571 /*
12572 * The user has done an event report. Call our routine
12573 * which does this.
12574 */
12575 status = mptsas_event_report(mpt,
12576 (mptsas_event_report_t *)data, mode, rval);
12577 break;
12578 case MPTIOCTL_REG_ACCESS:
12579 /*
12580 * The user has requested register access. Call our
12581 * routine which does this.
12582 */
12583 status = mptsas_reg_access(mpt,
12584 (mptsas_reg_access_t *)data, mode);
12585 break;
12586 default:
12587 status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
12588 rval);
12589 break;
12590 }
12591
12592 out:
12593 return (status);
12594 }
12595
12596 int
12597 mptsas_restart_ioc(mptsas_t *mpt)
12598 {
12599 int rval = DDI_SUCCESS;
12600 mptsas_target_t *ptgt = NULL;
12601
12602 ASSERT(mutex_owned(&mpt->m_mutex));
12603
12604 /*
12605 * Set a flag telling I/O path that we're processing a reset. This is
12606 * needed because after the reset is complete, the hash table still
12607 * needs to be rebuilt. If I/Os are started before the hash table is
12608 * rebuilt, I/O errors will occur. This flag allows I/Os to be marked
12609 * so that they can be retried.
12610 */
12611 mpt->m_in_reset = TRUE;
12612
12613 /*
12614 * Set all throttles to HOLD
12615 */
12616 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12617 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12618 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
12619 }
12620
12621 /*
12622 * Disable interrupts
12623 */
12624 MPTSAS_DISABLE_INTR(mpt);
12625
12626 /*
12627 * Abort all commands: outstanding commands, commands in waitq and
12628 * tx_waitq.
12629 */
12630 mptsas_flush_hba(mpt);
12631
12632 /*
12633 * Reinitialize the chip.
12634 */
12635 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
12636 rval = DDI_FAILURE;
12637 }
12638
12639 /*
12640 * Enable interrupts again
12641 */
12642 MPTSAS_ENABLE_INTR(mpt);
12643
12644 /*
12645 * If mptsas_init_chip was successful, update the driver data.
12646 */
12647 if (rval == DDI_SUCCESS) {
12648 mptsas_update_driver_data(mpt);
12649 }
12650
12651 /*
12652 * Reset the throttles
12653 */
12654 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12655 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12656 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
12657 }
12658
12659 mptsas_doneq_empty(mpt);
12660 mptsas_restart_hba(mpt);
12661
12662 if (rval != DDI_SUCCESS) {
12663 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
12664 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
12665 }
12666
12667 /*
12668 * Clear the reset flag so that I/Os can continue.
12669 */
12670 mpt->m_in_reset = FALSE;
12671
12672 return (rval);
12673 }
12674
12675 static int
12676 mptsas_init_chip(mptsas_t *mpt, int first_time)
12677 {
12678 ddi_dma_cookie_t cookie;
12679 uint32_t i;
12680 int rval;
12681
12682 /*
12683 * Check to see if the firmware image is valid
12684 */
12685 if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) &
12686 MPI2_DIAG_FLASH_BAD_SIG) {
12687 mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
12688 goto fail;
12689 }
12690
12691 /*
12692 * Reset the chip
12693 */
12694 rval = mptsas_ioc_reset(mpt, first_time);
12695 if (rval == MPTSAS_RESET_FAIL) {
12696 mptsas_log(mpt, CE_WARN, "hard reset failed!");
12697 goto fail;
12698 }
12699
12700 if ((rval == MPTSAS_SUCCESS_MUR) && (!first_time)) {
12701 goto mur;
12702 }
12703 /*
12704 * Setup configuration space
12705 */
12706 if (mptsas_config_space_init(mpt) == FALSE) {
12707 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
12708 "failed!");
12709 goto fail;
12710 }
12711
12712 /*
12713 * IOC facts can change after a diag reset so all buffers that are
12714 * based on these numbers must be de-allocated and re-allocated. Get
12715 * new IOC facts each time chip is initialized.
12716 */
12717 if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
12718 mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed");
12719 goto fail;
12720 }
12721
12722 if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
12723 goto fail;
12724 }
12725 /*
12726 * Allocate request message frames, reply free queue, reply descriptor
12727 * post queue, and reply message frames using latest IOC facts.
12728 */
12729 if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
12730 mptsas_log(mpt, CE_WARN, "mptsas_alloc_request_frames failed");
12731 goto fail;
12732 }
12733 if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
12734 mptsas_log(mpt, CE_WARN, "mptsas_alloc_free_queue failed!");
12735 goto fail;
12736 }
12737 if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
12738 mptsas_log(mpt, CE_WARN, "mptsas_alloc_post_queue failed!");
12739 goto fail;
12740 }
12741 if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
12742 mptsas_log(mpt, CE_WARN, "mptsas_alloc_reply_frames failed!");
12743 goto fail;
12744 }
12745
12746 mur:
12747 /*
12748 * Re-Initialize ioc to operational state
12749 */
12750 if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
12751 mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
12752 goto fail;
12753 }
12754
12755 mptsas_alloc_reply_args(mpt);
12756
12757 /*
12758 * Initialize reply post index. Reply free index is initialized after
12759 * the next loop.
12760 */
12761 mpt->m_post_index = 0;
12762
12763 /*
12764 * Initialize the Reply Free Queue with the physical addresses of our
12765 * reply frames.
12766 */
12767 cookie.dmac_address = mpt->m_reply_frame_dma_addr;
12768 for (i = 0; i < mpt->m_max_replies; i++) {
12769 ddi_put32(mpt->m_acc_free_queue_hdl,
12770 &((uint32_t *)(void *)mpt->m_free_queue)[i],
12771 cookie.dmac_address);
12772 cookie.dmac_address += mpt->m_reply_frame_size;
12773 }
12774 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
12775 DDI_DMA_SYNC_FORDEV);
12776
12777 /*
12778 * Initialize the reply free index to one past the last frame on the
12779 * queue. This will signify that the queue is empty to start with.
12780 */
12781 mpt->m_free_index = i;
12782 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, i);
12783
12784 /*
12785 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
12786 */
12787 for (i = 0; i < mpt->m_post_queue_depth; i++) {
12788 ddi_put64(mpt->m_acc_post_queue_hdl,
12789 &((uint64_t *)(void *)mpt->m_post_queue)[i],
12790 0xFFFFFFFFFFFFFFFF);
12791 }
12792 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
12793 DDI_DMA_SYNC_FORDEV);
12794
12795 /*
12796 * Enable ports
12797 */
12798 if (mptsas_ioc_enable_port(mpt) == DDI_FAILURE) {
12799 mptsas_log(mpt, CE_WARN, "mptsas_ioc_enable_port failed");
12800 goto fail;
12801 }
12802
12803 /*
12804 * enable events
12805 */
12806 if (mptsas_ioc_enable_event_notification(mpt)) {
12807 goto fail;
12808 }
12809
12810 /*
12811 * We need checks in attach and these.
12812 * chip_init is called in mult. places
12813 */
12814
12815 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
12816 DDI_SUCCESS) ||
12817 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
12818 DDI_SUCCESS) ||
12819 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
12820 DDI_SUCCESS) ||
12821 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
12822 DDI_SUCCESS) ||
12823 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
12824 DDI_SUCCESS)) {
12825 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
12826 goto fail;
12827 }
12828
12829 /* Check all acc handles */
12830 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
12831 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
12832 DDI_SUCCESS) ||
12833 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
12834 DDI_SUCCESS) ||
12835 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
12836 DDI_SUCCESS) ||
12837 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
12838 DDI_SUCCESS) ||
12839 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
12840 DDI_SUCCESS) ||
12841 (mptsas_check_acc_handle(mpt->m_config_handle) !=
12842 DDI_SUCCESS)) {
12843 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
12844 goto fail;
12845 }
12846
12847 return (DDI_SUCCESS);
12848
12849 fail:
12850 return (DDI_FAILURE);
12851 }
12852
12853 static int
12854 mptsas_get_pci_cap(mptsas_t *mpt)
12855 {
12856 ushort_t caps_ptr, cap, cap_count;
12857
12858 if (mpt->m_config_handle == NULL)
12859 return (FALSE);
12860 /*
12861 * Check if capabilities list is supported and if so,
12862 * get initial capabilities pointer and clear bits 0,1.
12863 */
12864 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
12865 & PCI_STAT_CAP) {
12866 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
12867 PCI_CONF_CAP_PTR), 4);
12868 } else {
12869 caps_ptr = PCI_CAP_NEXT_PTR_NULL;
12870 }
12871
12872 /*
12873 * Walk capabilities if supported.
12874 */
12875 for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
12876
12877 /*
12878 * Check that we haven't exceeded the maximum number of
12879 * capabilities and that the pointer is in a valid range.
12880 */
12881 if (++cap_count > 48) {
12882 mptsas_log(mpt, CE_WARN,
12883 "too many device capabilities.\n");
12884 break;
12885 }
12886 if (caps_ptr < 64) {
12887 mptsas_log(mpt, CE_WARN,
12888 "capabilities pointer 0x%x out of range.\n",
12889 caps_ptr);
12890 break;
12891 }
12892
12893 /*
12894 * Get next capability and check that it is valid.
12895 * For now, we only support power management.
12896 */
12897 cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
12898 switch (cap) {
12899 case PCI_CAP_ID_PM:
12900 mptsas_log(mpt, CE_NOTE,
12901 "?mptsas%d supports power management.\n",
12902 mpt->m_instance);
12903 mpt->m_options |= MPTSAS_OPT_PM;
12904
12905 /* Save PMCSR offset */
12906 mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
12907 break;
12908 /*
12909 * The following capabilities are valid. Any others
12910 * will cause a message to be logged.
12911 */
12912 case PCI_CAP_ID_VPD:
12913 case PCI_CAP_ID_MSI:
12914 case PCI_CAP_ID_PCIX:
12915 case PCI_CAP_ID_PCI_E:
12916 case PCI_CAP_ID_MSI_X:
12917 break;
12918 default:
12919 mptsas_log(mpt, CE_NOTE,
12920 "?mptsas%d unrecognized capability "
12921 "0x%x.\n", mpt->m_instance, cap);
12922 break;
12923 }
12924
12925 /*
12926 * Get next capabilities pointer and clear bits 0,1.
12927 */
12928 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
12929 (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
12930 }
12931 return (TRUE);
12932 }
12933
12934 static int
12935 mptsas_init_pm(mptsas_t *mpt)
12936 {
12937 char pmc_name[16];
12938 char *pmc[] = {
12939 NULL,
12940 "0=Off (PCI D3 State)",
12941 "3=On (PCI D0 State)",
12942 NULL
12943 };
12944 uint16_t pmcsr_stat;
12945
12946 if (mptsas_get_pci_cap(mpt) == FALSE) {
12947 return (DDI_FAILURE);
12948 }
12949 /*
12950 * If PCI's capability does not support PM, then don't need
12951 * to registe the pm-components
12952 */
12953 if (!(mpt->m_options & MPTSAS_OPT_PM))
12954 return (DDI_SUCCESS);
12955 /*
12956 * If power management is supported by this chip, create
12957 * pm-components property for the power management framework
12958 */
12959 (void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
12960 pmc[0] = pmc_name;
12961 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
12962 "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
12963 mpt->m_options &= ~MPTSAS_OPT_PM;
12964 mptsas_log(mpt, CE_WARN,
12965 "mptsas%d: pm-component property creation failed.",
12966 mpt->m_instance);
12967 return (DDI_FAILURE);
12968 }
12969
12970 /*
12971 * Power on device.
12972 */
12973 (void) pm_busy_component(mpt->m_dip, 0);
12974 pmcsr_stat = pci_config_get16(mpt->m_config_handle,
12975 mpt->m_pmcsr_offset);
12976 if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
12977 mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
12978 mpt->m_instance);
12979 pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
12980 PCI_PMCSR_D0);
12981 }
12982 if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
12983 mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
12984 return (DDI_FAILURE);
12985 }
12986 mpt->m_power_level = PM_LEVEL_D0;
12987 /*
12988 * Set pm idle delay.
12989 */
12990 mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
12991 mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
12992
12993 return (DDI_SUCCESS);
12994 }
12995
12996 static int
12997 mptsas_register_intrs(mptsas_t *mpt)
12998 {
12999 dev_info_t *dip;
13000 int intr_types;
13001
13002 dip = mpt->m_dip;
13003
13004 /* Get supported interrupt types */
13005 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
13006 mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
13007 "failed\n");
13008 return (FALSE);
13009 }
13010
13011 NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
13012
13013 /*
13014 * Try MSI, but fall back to FIXED
13015 */
13016 if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
13017 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
13018 NDBG0(("Using MSI interrupt type"));
13019 mpt->m_intr_type = DDI_INTR_TYPE_MSI;
13020 return (TRUE);
13021 }
13022 }
13023 if (intr_types & DDI_INTR_TYPE_FIXED) {
13024 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
13025 NDBG0(("Using FIXED interrupt type"));
13026 mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
13027 return (TRUE);
13028 } else {
13029 NDBG0(("FIXED interrupt registration failed"));
13030 return (FALSE);
13031 }
13032 }
13033
13034 return (FALSE);
13035 }
13036
13037 static void
13038 mptsas_unregister_intrs(mptsas_t *mpt)
13039 {
13040 mptsas_rem_intrs(mpt);
13041 }
13042
13043 /*
13044 * mptsas_add_intrs:
13045 *
13046 * Register FIXED or MSI interrupts.
13047 */
13048 static int
13049 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
13050 {
13051 dev_info_t *dip = mpt->m_dip;
13052 int avail, actual, count = 0;
13053 int i, flag, ret;
13054
13055 NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
13056
13057 /* Get number of interrupts */
13058 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
13059 if ((ret != DDI_SUCCESS) || (count <= 0)) {
13060 mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
13061 "ret %d count %d\n", ret, count);
13062
13063 return (DDI_FAILURE);
13064 }
13065
13066 /* Get number of available interrupts */
13067 ret = ddi_intr_get_navail(dip, intr_type, &avail);
13068 if ((ret != DDI_SUCCESS) || (avail == 0)) {
13069 mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
13070 "ret %d avail %d\n", ret, avail);
13071
13072 return (DDI_FAILURE);
13073 }
13074
13075 if (avail < count) {
13076 mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, "
13077 "navail() returned %d", count, avail);
13078 }
13079
13080 /* Mpt only have one interrupt routine */
13081 if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
13082 count = 1;
13083 }
13084
13085 /* Allocate an array of interrupt handles */
13086 mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
13087 mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
13088
13089 flag = DDI_INTR_ALLOC_NORMAL;
13090
13091 /* call ddi_intr_alloc() */
13092 ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
13093 count, &actual, flag);
13094
13095 if ((ret != DDI_SUCCESS) || (actual == 0)) {
13096 mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d\n",
13097 ret);
13098 kmem_free(mpt->m_htable, mpt->m_intr_size);
13099 return (DDI_FAILURE);
13100 }
13101
13102 /* use interrupt count returned or abort? */
13103 if (actual < count) {
13104 mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d\n",
13105 count, actual);
13106 }
13107
13108 mpt->m_intr_cnt = actual;
13109
13110 /*
13111 * Get priority for first msi, assume remaining are all the same
13112 */
13113 if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
13114 &mpt->m_intr_pri)) != DDI_SUCCESS) {
13115 mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d\n", ret);
13116
13117 /* Free already allocated intr */
13118 for (i = 0; i < actual; i++) {
13119 (void) ddi_intr_free(mpt->m_htable[i]);
13120 }
13121
13122 kmem_free(mpt->m_htable, mpt->m_intr_size);
13123 return (DDI_FAILURE);
13124 }
13125
13126 /* Test for high level mutex */
13127 if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
13128 mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
13129 "Hi level interrupt not supported\n");
13130
13131 /* Free already allocated intr */
13132 for (i = 0; i < actual; i++) {
13133 (void) ddi_intr_free(mpt->m_htable[i]);
13134 }
13135
13136 kmem_free(mpt->m_htable, mpt->m_intr_size);
13137 return (DDI_FAILURE);
13138 }
13139
13140 /* Call ddi_intr_add_handler() */
13141 for (i = 0; i < actual; i++) {
13142 if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
13143 (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
13144 mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
13145 "failed %d\n", ret);
13146
13147 /* Free already allocated intr */
13148 for (i = 0; i < actual; i++) {
13149 (void) ddi_intr_free(mpt->m_htable[i]);
13150 }
13151
13152 kmem_free(mpt->m_htable, mpt->m_intr_size);
13153 return (DDI_FAILURE);
13154 }
13155 }
13156
13157 if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
13158 != DDI_SUCCESS) {
13159 mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d\n", ret);
13160
13161 /* Free already allocated intr */
13162 for (i = 0; i < actual; i++) {
13163 (void) ddi_intr_free(mpt->m_htable[i]);
13164 }
13165
13166 kmem_free(mpt->m_htable, mpt->m_intr_size);
13167 return (DDI_FAILURE);
13168 }
13169
13170 /*
13171 * Enable interrupts
13172 */
13173 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
13174 /* Call ddi_intr_block_enable() for MSI interrupts */
13175 (void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
13176 } else {
13177 /* Call ddi_intr_enable for MSI or FIXED interrupts */
13178 for (i = 0; i < mpt->m_intr_cnt; i++) {
13179 (void) ddi_intr_enable(mpt->m_htable[i]);
13180 }
13181 }
13182 return (DDI_SUCCESS);
13183 }
13184
13185 /*
13186 * mptsas_rem_intrs:
13187 *
13188 * Unregister FIXED or MSI interrupts
13189 */
13190 static void
13191 mptsas_rem_intrs(mptsas_t *mpt)
13192 {
13193 int i;
13194
13195 NDBG6(("mptsas_rem_intrs"));
13196
13197 /* Disable all interrupts */
13198 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
13199 /* Call ddi_intr_block_disable() */
13200 (void) ddi_intr_block_disable(mpt->m_htable, mpt->m_intr_cnt);
13201 } else {
13202 for (i = 0; i < mpt->m_intr_cnt; i++) {
13203 (void) ddi_intr_disable(mpt->m_htable[i]);
13204 }
13205 }
13206
13207 /* Call ddi_intr_remove_handler() */
13208 for (i = 0; i < mpt->m_intr_cnt; i++) {
13209 (void) ddi_intr_remove_handler(mpt->m_htable[i]);
13210 (void) ddi_intr_free(mpt->m_htable[i]);
13211 }
13212
13213 kmem_free(mpt->m_htable, mpt->m_intr_size);
13214 }
13215
13216 /*
13217 * The IO fault service error handling callback function
13218 */
13219 /*ARGSUSED*/
13220 static int
13221 mptsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
13222 {
13223 /*
13224 * as the driver can always deal with an error in any dma or
13225 * access handle, we can just return the fme_status value.
13226 */
13227 pci_ereport_post(dip, err, NULL);
13228 return (err->fme_status);
13229 }
13230
13231 /*
13232 * mptsas_fm_init - initialize fma capabilities and register with IO
13233 * fault services.
13234 */
13235 static void
13236 mptsas_fm_init(mptsas_t *mpt)
13237 {
13238 /*
13239 * Need to change iblock to priority for new MSI intr
13240 */
13241 ddi_iblock_cookie_t fm_ibc;
13242
13243 /* Only register with IO Fault Services if we have some capability */
13244 if (mpt->m_fm_capabilities) {
13245 /* Adjust access and dma attributes for FMA */
13246 mpt->m_reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
13247 mpt->m_msg_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
13248 mpt->m_io_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
13249
13250 /*
13251 * Register capabilities with IO Fault Services.
13252 * mpt->m_fm_capabilities will be updated to indicate
13253 * capabilities actually supported (not requested.)
13254 */
13255 ddi_fm_init(mpt->m_dip, &mpt->m_fm_capabilities, &fm_ibc);
13256
13257 /*
13258 * Initialize pci ereport capabilities if ereport
13259 * capable (should always be.)
13260 */
13261 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
13262 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13263 pci_ereport_setup(mpt->m_dip);
13264 }
13265
13266 /*
13267 * Register error callback if error callback capable.
13268 */
13269 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13270 ddi_fm_handler_register(mpt->m_dip,
13271 mptsas_fm_error_cb, (void *) mpt);
13272 }
13273 }
13274 }
13275
13276 /*
13277 * mptsas_fm_fini - Releases fma capabilities and un-registers with IO
13278 * fault services.
13279 *
13280 */
13281 static void
13282 mptsas_fm_fini(mptsas_t *mpt)
13283 {
13284 /* Only unregister FMA capabilities if registered */
13285 if (mpt->m_fm_capabilities) {
13286
13287 /*
13288 * Un-register error callback if error callback capable.
13289 */
13290
13291 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13292 ddi_fm_handler_unregister(mpt->m_dip);
13293 }
13294
13295 /*
13296 * Release any resources allocated by pci_ereport_setup()
13297 */
13298
13299 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
13300 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13301 pci_ereport_teardown(mpt->m_dip);
13302 }
13303
13304 /* Unregister from IO Fault Services */
13305 ddi_fm_fini(mpt->m_dip);
13306
13307 /* Adjust access and dma attributes for FMA */
13308 mpt->m_reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
13309 mpt->m_msg_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
13310 mpt->m_io_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
13311
13312 }
13313 }
13314
13315 int
13316 mptsas_check_acc_handle(ddi_acc_handle_t handle)
13317 {
13318 ddi_fm_error_t de;
13319
13320 if (handle == NULL)
13321 return (DDI_FAILURE);
13322 ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
13323 return (de.fme_status);
13324 }
13325
13326 int
13327 mptsas_check_dma_handle(ddi_dma_handle_t handle)
13328 {
13329 ddi_fm_error_t de;
13330
13331 if (handle == NULL)
13332 return (DDI_FAILURE);
13333 ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
13334 return (de.fme_status);
13335 }
13336
13337 void
13338 mptsas_fm_ereport(mptsas_t *mpt, char *detail)
13339 {
13340 uint64_t ena;
13341 char buf[FM_MAX_CLASS];
13342
13343 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
13344 ena = fm_ena_generate(0, FM_ENA_FMT1);
13345 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities)) {
13346 ddi_fm_ereport_post(mpt->m_dip, buf, ena, DDI_NOSLEEP,
13347 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
13348 }
13349 }
13350
13351 static int
13352 mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
13353 uint16_t *dev_handle, mptsas_target_t **pptgt)
13354 {
13355 int rval;
13356 uint32_t dev_info;
13357 uint64_t sas_wwn;
13358 mptsas_phymask_t phymask;
13359 uint8_t physport, phynum, config, disk;
13360 uint64_t devicename;
13361 uint16_t pdev_hdl;
13362 mptsas_target_t *tmp_tgt = NULL;
13363 uint16_t bay_num, enclosure, io_flags;
13364
13365 ASSERT(*pptgt == NULL);
13366
13367 rval = mptsas_get_sas_device_page0(mpt, page_address, dev_handle,
13368 &sas_wwn, &dev_info, &physport, &phynum, &pdev_hdl,
13369 &bay_num, &enclosure, &io_flags);
13370 if (rval != DDI_SUCCESS) {
13371 rval = DEV_INFO_FAIL_PAGE0;
13372 return (rval);
13373 }
13374
13375 if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
13376 MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13377 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) == NULL) {
13378 rval = DEV_INFO_WRONG_DEVICE_TYPE;
13379 return (rval);
13380 }
13381
13382 /*
13383 * Check if the dev handle is for a Phys Disk. If so, set return value
13384 * and exit. Don't add Phys Disks to hash.
13385 */
13386 for (config = 0; config < mpt->m_num_raid_configs; config++) {
13387 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
13388 if (*dev_handle == mpt->m_raidconfig[config].
13389 m_physdisk_devhdl[disk]) {
13390 rval = DEV_INFO_PHYS_DISK;
13391 return (rval);
13392 }
13393 }
13394 }
13395
13396 /*
13397 * Get SATA Device Name from SAS device page0 for
13398 * sata device, if device name doesn't exist, set mta_wwn to
13399 * 0 for direct attached SATA. For the device behind the expander
13400 * we still can use STP address assigned by expander.
13401 */
13402 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13403 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
13404 mutex_exit(&mpt->m_mutex);
13405 /* alloc a tmp_tgt to send the cmd */
13406 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target),
13407 KM_SLEEP);
13408 tmp_tgt->m_devhdl = *dev_handle;
13409 tmp_tgt->m_deviceinfo = dev_info;
13410 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
13411 tmp_tgt->m_qfull_retry_interval =
13412 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
13413 tmp_tgt->m_t_throttle = MAX_THROTTLE;
13414 devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
13415 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
13416 mutex_enter(&mpt->m_mutex);
13417 if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
13418 sas_wwn = devicename;
13419 } else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
13420 sas_wwn = 0;
13421 }
13422 }
13423
13424 phymask = mptsas_physport_to_phymask(mpt, physport);
13425 *pptgt = mptsas_tgt_alloc(mpt, *dev_handle, sas_wwn,
13426 dev_info, phymask, phynum);
13427 if (*pptgt == NULL) {
13428 mptsas_log(mpt, CE_WARN, "Failed to allocated target"
13429 "structure!");
13430 rval = DEV_INFO_FAIL_ALLOC;
13431 return (rval);
13432 }
13433 (*pptgt)->m_io_flags = io_flags;
13434 (*pptgt)->m_enclosure = enclosure;
13435 (*pptgt)->m_slot_num = bay_num;
13436 return (DEV_INFO_SUCCESS);
13437 }
13438
13439 uint64_t
13440 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
13441 {
13442 uint64_t sata_guid = 0, *pwwn = NULL;
13443 int target = ptgt->m_devhdl;
13444 uchar_t *inq83 = NULL;
13445 int inq83_len = 0xFF;
13446 uchar_t *dblk = NULL;
13447 int inq83_retry = 3;
13448 int rval = DDI_FAILURE;
13449
13450 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
13451
13452 inq83_retry:
13453 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
13454 inq83_len, NULL, 1);
13455 if (rval != DDI_SUCCESS) {
13456 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
13457 "0x83 for target:%x, lun:%x failed!", target, lun);
13458 goto out;
13459 }
13460 /* According to SAT2, the first descriptor is logic unit name */
13461 dblk = &inq83[4];
13462 if ((dblk[1] & 0x30) != 0) {
13463 mptsas_log(mpt, CE_WARN, "!Descriptor is not lun associated.");
13464 goto out;
13465 }
13466 pwwn = (uint64_t *)(void *)(&dblk[4]);
13467 if ((dblk[4] & 0xf0) == 0x50) {
13468 sata_guid = BE_64(*pwwn);
13469 goto out;
13470 } else if (dblk[4] == 'A') {
13471 NDBG20(("SATA drive has no NAA format GUID."));
13472 goto out;
13473 } else {
13474 /* The data is not ready, wait and retry */
13475 inq83_retry--;
13476 if (inq83_retry <= 0) {
13477 goto out;
13478 }
13479 NDBG20(("The GUID is not ready, retry..."));
13480 delay(1 * drv_usectohz(1000000));
13481 goto inq83_retry;
13482 }
13483 out:
13484 kmem_free(inq83, inq83_len);
13485 return (sata_guid);
13486 }
13487
13488 static int
13489 mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, uchar_t page,
13490 unsigned char *buf, int len, int *reallen, uchar_t evpd)
13491 {
13492 uchar_t cdb[CDB_GROUP0];
13493 struct scsi_address ap;
13494 struct buf *data_bp = NULL;
13495 int resid = 0;
13496 int ret = DDI_FAILURE;
13497
13498 ASSERT(len <= 0xffff);
13499
13500 ap.a_target = MPTSAS_INVALID_DEVHDL;
13501 ap.a_lun = (uchar_t)(lun);
13502 ap.a_hba_tran = mpt->m_tran;
13503
13504 data_bp = scsi_alloc_consistent_buf(&ap,
13505 (struct buf *)NULL, len, B_READ, NULL_FUNC, NULL);
13506 if (data_bp == NULL) {
13507 return (ret);
13508 }
13509 bzero(cdb, CDB_GROUP0);
13510 cdb[0] = SCMD_INQUIRY;
13511 cdb[1] = evpd;
13512 cdb[2] = page;
13513 cdb[3] = (len & 0xff00) >> 8;
13514 cdb[4] = (len & 0x00ff);
13515 cdb[5] = 0;
13516
13517 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP0, data_bp,
13518 &resid);
13519 if (ret == DDI_SUCCESS) {
13520 if (reallen) {
13521 *reallen = len - resid;
13522 }
13523 bcopy((caddr_t)data_bp->b_un.b_addr, buf, len);
13524 }
13525 if (data_bp) {
13526 scsi_free_consistent_buf(data_bp);
13527 }
13528 return (ret);
13529 }
13530
13531 static int
13532 mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
13533 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
13534 int *resid)
13535 {
13536 struct scsi_pkt *pktp = NULL;
13537 scsi_hba_tran_t *tran_clone = NULL;
13538 mptsas_tgt_private_t *tgt_private = NULL;
13539 int ret = DDI_FAILURE;
13540
13541 /*
13542 * scsi_hba_tran_t->tran_tgt_private is used to pass the address
13543 * information to scsi_init_pkt, allocate a scsi_hba_tran structure
13544 * to simulate the cmds from sd
13545 */
13546 tran_clone = kmem_alloc(
13547 sizeof (scsi_hba_tran_t), KM_SLEEP);
13548 if (tran_clone == NULL) {
13549 goto out;
13550 }
13551 bcopy((caddr_t)mpt->m_tran,
13552 (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
13553 tgt_private = kmem_alloc(
13554 sizeof (mptsas_tgt_private_t), KM_SLEEP);
13555 if (tgt_private == NULL) {
13556 goto out;
13557 }
13558 tgt_private->t_lun = ap->a_lun;
13559 tgt_private->t_private = ptgt;
13560 tran_clone->tran_tgt_private = tgt_private;
13561 ap->a_hba_tran = tran_clone;
13562
13563 pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
13564 data_bp, cdblen, sizeof (struct scsi_arq_status),
13565 0, PKT_CONSISTENT, NULL, NULL);
13566 if (pktp == NULL) {
13567 goto out;
13568 }
13569 bcopy(cdb, pktp->pkt_cdbp, cdblen);
13570 pktp->pkt_flags = FLAG_NOPARITY;
13571 if (scsi_poll(pktp) < 0) {
13572 goto out;
13573 }
13574 if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
13575 goto out;
13576 }
13577 if (resid != NULL) {
13578 *resid = pktp->pkt_resid;
13579 }
13580
13581 ret = DDI_SUCCESS;
13582 out:
13583 if (pktp) {
13584 scsi_destroy_pkt(pktp);
13585 }
13586 if (tran_clone) {
13587 kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
13588 }
13589 if (tgt_private) {
13590 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
13591 }
13592 return (ret);
13593 }
13594 static int
13595 mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, int *lun)
13596 {
13597 char *cp = NULL;
13598 char *ptr = NULL;
13599 size_t s = 0;
13600 char *wwid_str = NULL;
13601 char *lun_str = NULL;
13602 long lunnum;
13603 long phyid = -1;
13604 int rc = DDI_FAILURE;
13605
13606 ptr = name;
13607 ASSERT(ptr[0] == 'w' || ptr[0] == 'p');
13608 ptr++;
13609 if ((cp = strchr(ptr, ',')) == NULL) {
13610 return (DDI_FAILURE);
13611 }
13612
13613 wwid_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13614 s = (uintptr_t)cp - (uintptr_t)ptr;
13615
13616 bcopy(ptr, wwid_str, s);
13617 wwid_str[s] = '\0';
13618
13619 ptr = ++cp;
13620
13621 if ((cp = strchr(ptr, '\0')) == NULL) {
13622 goto out;
13623 }
13624 lun_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13625 s = (uintptr_t)cp - (uintptr_t)ptr;
13626
13627 bcopy(ptr, lun_str, s);
13628 lun_str[s] = '\0';
13629
13630 if (name[0] == 'p') {
13631 rc = ddi_strtol(wwid_str, NULL, 0x10, &phyid);
13632 } else {
13633 rc = scsi_wwnstr_to_wwn(wwid_str, wwid);
13634 }
13635 if (rc != DDI_SUCCESS)
13636 goto out;
13637
13638 if (phyid != -1) {
13639 ASSERT(phyid < MPTSAS_MAX_PHYS);
13640 *phy = (uint8_t)phyid;
13641 }
13642 rc = ddi_strtol(lun_str, NULL, 0x10, &lunnum);
13643 if (rc != 0)
13644 goto out;
13645
13646 *lun = (int)lunnum;
13647 rc = DDI_SUCCESS;
13648 out:
13649 if (wwid_str)
13650 kmem_free(wwid_str, SCSI_MAXNAMELEN);
13651 if (lun_str)
13652 kmem_free(lun_str, SCSI_MAXNAMELEN);
13653
13654 return (rc);
13655 }
13656
13657 /*
13658 * mptsas_parse_smp_name() is to parse sas wwn string
13659 * which format is "wWWN"
13660 */
13661 static int
13662 mptsas_parse_smp_name(char *name, uint64_t *wwn)
13663 {
13664 char *ptr = name;
13665
13666 if (*ptr != 'w') {
13667 return (DDI_FAILURE);
13668 }
13669
13670 ptr++;
13671 if (scsi_wwnstr_to_wwn(ptr, wwn)) {
13672 return (DDI_FAILURE);
13673 }
13674 return (DDI_SUCCESS);
13675 }
13676
13677 static int
13678 mptsas_bus_config(dev_info_t *pdip, uint_t flag,
13679 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
13680 {
13681 int ret = NDI_FAILURE;
13682 int circ = 0;
13683 int circ1 = 0;
13684 mptsas_t *mpt;
13685 char *ptr = NULL;
13686 char *devnm = NULL;
13687 uint64_t wwid = 0;
13688 uint8_t phy = 0xFF;
13689 int lun = 0;
13690 uint_t mflags = flag;
13691 int bconfig = TRUE;
13692
13693 if (scsi_hba_iport_unit_address(pdip) == 0) {
13694 return (DDI_FAILURE);
13695 }
13696
13697 mpt = DIP2MPT(pdip);
13698 if (!mpt) {
13699 return (DDI_FAILURE);
13700 }
13701 /*
13702 * Hold the nexus across the bus_config
13703 */
13704 ndi_devi_enter(scsi_vhci_dip, &circ);
13705 ndi_devi_enter(pdip, &circ1);
13706 switch (op) {
13707 case BUS_CONFIG_ONE:
13708 /* parse wwid/target name out of name given */
13709 if ((ptr = strchr((char *)arg, '@')) == NULL) {
13710 ret = NDI_FAILURE;
13711 break;
13712 }
13713 ptr++;
13714 if (strncmp((char *)arg, "smp", 3) == 0) {
13715 /*
13716 * This is a SMP target device
13717 */
13718 ret = mptsas_parse_smp_name(ptr, &wwid);
13719 if (ret != DDI_SUCCESS) {
13720 ret = NDI_FAILURE;
13721 break;
13722 }
13723 ret = mptsas_config_smp(pdip, wwid, childp);
13724 } else if ((ptr[0] == 'w') || (ptr[0] == 'p')) {
13725 /*
13726 * OBP could pass down a non-canonical form
13727 * bootpath without LUN part when LUN is 0.
13728 * So driver need adjust the string.
13729 */
13730 if (strchr(ptr, ',') == NULL) {
13731 devnm = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13732 (void) sprintf(devnm, "%s,0", (char *)arg);
13733 ptr = strchr(devnm, '@');
13734 ptr++;
13735 }
13736
13737 /*
13738 * The device path is wWWID format and the device
13739 * is not SMP target device.
13740 */
13741 ret = mptsas_parse_address(ptr, &wwid, &phy, &lun);
13742 if (ret != DDI_SUCCESS) {
13743 ret = NDI_FAILURE;
13744 break;
13745 }
13746 *childp = NULL;
13747 if (ptr[0] == 'w') {
13748 ret = mptsas_config_one_addr(pdip, wwid,
13749 lun, childp);
13750 } else if (ptr[0] == 'p') {
13751 ret = mptsas_config_one_phy(pdip, phy, lun,
13752 childp);
13753 }
13754
13755 /*
13756 * If this is CD/DVD device in OBP path, the
13757 * ndi_busop_bus_config can be skipped as config one
13758 * operation is done above.
13759 */
13760 if ((ret == NDI_SUCCESS) && (*childp != NULL) &&
13761 (strcmp(ddi_node_name(*childp), "cdrom") == 0) &&
13762 (strncmp((char *)arg, "disk", 4) == 0)) {
13763 bconfig = FALSE;
13764 ndi_hold_devi(*childp);
13765 }
13766 } else {
13767 ret = NDI_FAILURE;
13768 break;
13769 }
13770
13771 /*
13772 * DDI group instructed us to use this flag.
13773 */
13774 mflags |= NDI_MDI_FALLBACK;
13775 break;
13776 case BUS_CONFIG_DRIVER:
13777 case BUS_CONFIG_ALL:
13778 mptsas_config_all(pdip);
13779 ret = NDI_SUCCESS;
13780 break;
13781 }
13782
13783 if ((ret == NDI_SUCCESS) && bconfig) {
13784 ret = ndi_busop_bus_config(pdip, mflags, op,
13785 (devnm == NULL) ? arg : devnm, childp, 0);
13786 }
13787
13788 ndi_devi_exit(pdip, circ1);
13789 ndi_devi_exit(scsi_vhci_dip, circ);
13790 if (devnm != NULL)
13791 kmem_free(devnm, SCSI_MAXNAMELEN);
13792 return (ret);
13793 }
13794
13795 static int
13796 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
13797 mptsas_target_t *ptgt)
13798 {
13799 int rval = DDI_FAILURE;
13800 struct scsi_inquiry *sd_inq = NULL;
13801 mptsas_t *mpt = DIP2MPT(pdip);
13802
13803 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
13804
13805 rval = mptsas_inquiry(mpt, ptgt, lun, 0, (uchar_t *)sd_inq,
13806 SUN_INQSIZE, 0, (uchar_t)0);
13807
13808 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
13809 rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
13810 } else {
13811 rval = DDI_FAILURE;
13812 }
13813
13814 kmem_free(sd_inq, SUN_INQSIZE);
13815 return (rval);
13816 }
13817
13818 static int
13819 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
13820 dev_info_t **lundip)
13821 {
13822 int rval;
13823 mptsas_t *mpt = DIP2MPT(pdip);
13824 int phymask;
13825 mptsas_target_t *ptgt = NULL;
13826
13827 /*
13828 * Get the physical port associated to the iport
13829 */
13830 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13831 "phymask", 0);
13832
13833 ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
13834 if (ptgt == NULL) {
13835 /*
13836 * didn't match any device by searching
13837 */
13838 return (DDI_FAILURE);
13839 }
13840 /*
13841 * If the LUN already exists and the status is online,
13842 * we just return the pointer to dev_info_t directly.
13843 * For the mdi_pathinfo node, we'll handle it in
13844 * mptsas_create_virt_lun()
13845 * TODO should be also in mptsas_handle_dr
13846 */
13847
13848 *lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
13849 if (*lundip != NULL) {
13850 /*
13851 * TODO Another senario is, we hotplug the same disk
13852 * on the same slot, the devhdl changed, is this
13853 * possible?
13854 * tgt_private->t_private != ptgt
13855 */
13856 if (sasaddr != ptgt->m_addr.mta_wwn) {
13857 /*
13858 * The device has changed although the devhdl is the
13859 * same (Enclosure mapping mode, change drive on the
13860 * same slot)
13861 */
13862 return (DDI_FAILURE);
13863 }
13864 return (DDI_SUCCESS);
13865 }
13866
13867 if (phymask == 0) {
13868 /*
13869 * Configure IR volume
13870 */
13871 rval = mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
13872 return (rval);
13873 }
13874 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
13875
13876 return (rval);
13877 }
13878
13879 static int
13880 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
13881 dev_info_t **lundip)
13882 {
13883 int rval;
13884 mptsas_t *mpt = DIP2MPT(pdip);
13885 mptsas_phymask_t phymask;
13886 mptsas_target_t *ptgt = NULL;
13887
13888 /*
13889 * Get the physical port associated to the iport
13890 */
13891 phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13892 "phymask", 0);
13893
13894 ptgt = mptsas_phy_to_tgt(mpt, phymask, phy);
13895 if (ptgt == NULL) {
13896 /*
13897 * didn't match any device by searching
13898 */
13899 return (DDI_FAILURE);
13900 }
13901
13902 /*
13903 * If the LUN already exists and the status is online,
13904 * we just return the pointer to dev_info_t directly.
13905 * For the mdi_pathinfo node, we'll handle it in
13906 * mptsas_create_virt_lun().
13907 */
13908
13909 *lundip = mptsas_find_child_phy(pdip, phy);
13910 if (*lundip != NULL) {
13911 return (DDI_SUCCESS);
13912 }
13913
13914 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
13915
13916 return (rval);
13917 }
13918
13919 static int
13920 mptsas_retrieve_lundata(int lun_cnt, uint8_t *buf, uint16_t *lun_num,
13921 uint8_t *lun_addr_type)
13922 {
13923 uint32_t lun_idx = 0;
13924
13925 ASSERT(lun_num != NULL);
13926 ASSERT(lun_addr_type != NULL);
13927
13928 lun_idx = (lun_cnt + 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
13929 /* determine report luns addressing type */
13930 switch (buf[lun_idx] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) {
13931 /*
13932 * Vendors in the field have been found to be concatenating
13933 * bus/target/lun to equal the complete lun value instead
13934 * of switching to flat space addressing
13935 */
13936 /* 00b - peripheral device addressing method */
13937 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL:
13938 /* FALLTHRU */
13939 /* 10b - logical unit addressing method */
13940 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT:
13941 /* FALLTHRU */
13942 /* 01b - flat space addressing method */
13943 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE:
13944 /* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
13945 *lun_addr_type = (buf[lun_idx] &
13946 MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) >> 6;
13947 *lun_num = (buf[lun_idx] & 0x3F) << 8;
13948 *lun_num |= buf[lun_idx + 1];
13949 return (DDI_SUCCESS);
13950 default:
13951 return (DDI_FAILURE);
13952 }
13953 }
13954
13955 static int
13956 mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt)
13957 {
13958 struct buf *repluns_bp = NULL;
13959 struct scsi_address ap;
13960 uchar_t cdb[CDB_GROUP5];
13961 int ret = DDI_FAILURE;
13962 int retry = 0;
13963 int lun_list_len = 0;
13964 uint16_t lun_num = 0;
13965 uint8_t lun_addr_type = 0;
13966 uint32_t lun_cnt = 0;
13967 uint32_t lun_total = 0;
13968 dev_info_t *cdip = NULL;
13969 uint16_t *saved_repluns = NULL;
13970 char *buffer = NULL;
13971 int buf_len = 128;
13972 mptsas_t *mpt = DIP2MPT(pdip);
13973 uint64_t sas_wwn = 0;
13974 uint8_t phy = 0xFF;
13975 uint32_t dev_info = 0;
13976
13977 mutex_enter(&mpt->m_mutex);
13978 sas_wwn = ptgt->m_addr.mta_wwn;
13979 phy = ptgt->m_phynum;
13980 dev_info = ptgt->m_deviceinfo;
13981 mutex_exit(&mpt->m_mutex);
13982
13983 if (sas_wwn == 0) {
13984 /*
13985 * It's a SATA without Device Name
13986 * So don't try multi-LUNs
13987 */
13988 if (mptsas_find_child_phy(pdip, phy)) {
13989 return (DDI_SUCCESS);
13990 } else {
13991 /*
13992 * need configure and create node
13993 */
13994 return (DDI_FAILURE);
13995 }
13996 }
13997
13998 /*
13999 * WWN (SAS address or Device Name exist)
14000 */
14001 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14002 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14003 /*
14004 * SATA device with Device Name
14005 * So don't try multi-LUNs
14006 */
14007 if (mptsas_find_child_addr(pdip, sas_wwn, 0)) {
14008 return (DDI_SUCCESS);
14009 } else {
14010 return (DDI_FAILURE);
14011 }
14012 }
14013
14014 do {
14015 ap.a_target = MPTSAS_INVALID_DEVHDL;
14016 ap.a_lun = 0;
14017 ap.a_hba_tran = mpt->m_tran;
14018 repluns_bp = scsi_alloc_consistent_buf(&ap,
14019 (struct buf *)NULL, buf_len, B_READ, NULL_FUNC, NULL);
14020 if (repluns_bp == NULL) {
14021 retry++;
14022 continue;
14023 }
14024 bzero(cdb, CDB_GROUP5);
14025 cdb[0] = SCMD_REPORT_LUNS;
14026 cdb[6] = (buf_len & 0xff000000) >> 24;
14027 cdb[7] = (buf_len & 0x00ff0000) >> 16;
14028 cdb[8] = (buf_len & 0x0000ff00) >> 8;
14029 cdb[9] = (buf_len & 0x000000ff);
14030
14031 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP5,
14032 repluns_bp, NULL);
14033 if (ret != DDI_SUCCESS) {
14034 scsi_free_consistent_buf(repluns_bp);
14035 retry++;
14036 continue;
14037 }
14038 lun_list_len = BE_32(*(int *)((void *)(
14039 repluns_bp->b_un.b_addr)));
14040 if (buf_len >= lun_list_len + 8) {
14041 ret = DDI_SUCCESS;
14042 break;
14043 }
14044 scsi_free_consistent_buf(repluns_bp);
14045 buf_len = lun_list_len + 8;
14046
14047 } while (retry < 3);
14048
14049 if (ret != DDI_SUCCESS)
14050 return (ret);
14051 buffer = (char *)repluns_bp->b_un.b_addr;
14052 /*
14053 * find out the number of luns returned by the SCSI ReportLun call
14054 * and allocate buffer space
14055 */
14056 lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
14057 saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
14058 if (saved_repluns == NULL) {
14059 scsi_free_consistent_buf(repluns_bp);
14060 return (DDI_FAILURE);
14061 }
14062 for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
14063 if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
14064 &lun_num, &lun_addr_type) != DDI_SUCCESS) {
14065 continue;
14066 }
14067 saved_repluns[lun_cnt] = lun_num;
14068 if (cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num))
14069 ret = DDI_SUCCESS;
14070 else
14071 ret = mptsas_probe_lun(pdip, lun_num, &cdip,
14072 ptgt);
14073 if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
14074 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
14075 MPTSAS_DEV_GONE);
14076 }
14077 }
14078 mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
14079 kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
14080 scsi_free_consistent_buf(repluns_bp);
14081 return (DDI_SUCCESS);
14082 }
14083
14084 static int
14085 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
14086 {
14087 int rval = DDI_FAILURE;
14088 struct scsi_inquiry *sd_inq = NULL;
14089 mptsas_t *mpt = DIP2MPT(pdip);
14090 mptsas_target_t *ptgt = NULL;
14091
14092 mutex_enter(&mpt->m_mutex);
14093 ptgt = refhash_linear_search(mpt->m_targets,
14094 mptsas_target_eval_devhdl, &target);
14095 mutex_exit(&mpt->m_mutex);
14096 if (ptgt == NULL) {
14097 mptsas_log(mpt, CE_WARN, "Volume with VolDevHandle of 0x%x "
14098 "not found.", target);
14099 return (rval);
14100 }
14101
14102 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
14103 rval = mptsas_inquiry(mpt, ptgt, 0, 0, (uchar_t *)sd_inq,
14104 SUN_INQSIZE, 0, (uchar_t)0);
14105
14106 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
14107 rval = mptsas_create_phys_lun(pdip, sd_inq, NULL, dip, ptgt,
14108 0);
14109 } else {
14110 rval = DDI_FAILURE;
14111 }
14112
14113 kmem_free(sd_inq, SUN_INQSIZE);
14114 return (rval);
14115 }
14116
14117 /*
14118 * configure all RAID volumes for virtual iport
14119 */
14120 static void
14121 mptsas_config_all_viport(dev_info_t *pdip)
14122 {
14123 mptsas_t *mpt = DIP2MPT(pdip);
14124 int config, vol;
14125 int target;
14126 dev_info_t *lundip = NULL;
14127
14128 /*
14129 * Get latest RAID info and search for any Volume DevHandles. If any
14130 * are found, configure the volume.
14131 */
14132 mutex_enter(&mpt->m_mutex);
14133 for (config = 0; config < mpt->m_num_raid_configs; config++) {
14134 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
14135 if (mpt->m_raidconfig[config].m_raidvol[vol].m_israid
14136 == 1) {
14137 target = mpt->m_raidconfig[config].
14138 m_raidvol[vol].m_raidhandle;
14139 mutex_exit(&mpt->m_mutex);
14140 (void) mptsas_config_raid(pdip, target,
14141 &lundip);
14142 mutex_enter(&mpt->m_mutex);
14143 }
14144 }
14145 }
14146 mutex_exit(&mpt->m_mutex);
14147 }
14148
14149 static void
14150 mptsas_offline_missed_luns(dev_info_t *pdip, uint16_t *repluns,
14151 int lun_cnt, mptsas_target_t *ptgt)
14152 {
14153 dev_info_t *child = NULL, *savechild = NULL;
14154 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
14155 uint64_t sas_wwn, wwid;
14156 uint8_t phy;
14157 int lun;
14158 int i;
14159 int find;
14160 char *addr;
14161 char *nodename;
14162 mptsas_t *mpt = DIP2MPT(pdip);
14163
14164 mutex_enter(&mpt->m_mutex);
14165 wwid = ptgt->m_addr.mta_wwn;
14166 mutex_exit(&mpt->m_mutex);
14167
14168 child = ddi_get_child(pdip);
14169 while (child) {
14170 find = 0;
14171 savechild = child;
14172 child = ddi_get_next_sibling(child);
14173
14174 nodename = ddi_node_name(savechild);
14175 if (strcmp(nodename, "smp") == 0) {
14176 continue;
14177 }
14178
14179 addr = ddi_get_name_addr(savechild);
14180 if (addr == NULL) {
14181 continue;
14182 }
14183
14184 if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
14185 DDI_SUCCESS) {
14186 continue;
14187 }
14188
14189 if (wwid == sas_wwn) {
14190 for (i = 0; i < lun_cnt; i++) {
14191 if (repluns[i] == lun) {
14192 find = 1;
14193 break;
14194 }
14195 }
14196 } else {
14197 continue;
14198 }
14199 if (find == 0) {
14200 /*
14201 * The lun has not been there already
14202 */
14203 (void) mptsas_offline_lun(pdip, savechild, NULL,
14204 NDI_DEVI_REMOVE);
14205 }
14206 }
14207
14208 pip = mdi_get_next_client_path(pdip, NULL);
14209 while (pip) {
14210 find = 0;
14211 savepip = pip;
14212 addr = MDI_PI(pip)->pi_addr;
14213
14214 pip = mdi_get_next_client_path(pdip, pip);
14215
14216 if (addr == NULL) {
14217 continue;
14218 }
14219
14220 if (mptsas_parse_address(addr, &sas_wwn, &phy,
14221 &lun) != DDI_SUCCESS) {
14222 continue;
14223 }
14224
14225 if (sas_wwn == wwid) {
14226 for (i = 0; i < lun_cnt; i++) {
14227 if (repluns[i] == lun) {
14228 find = 1;
14229 break;
14230 }
14231 }
14232 } else {
14233 continue;
14234 }
14235
14236 if (find == 0) {
14237 /*
14238 * The lun has not been there already
14239 */
14240 (void) mptsas_offline_lun(pdip, NULL, savepip,
14241 NDI_DEVI_REMOVE);
14242 }
14243 }
14244 }
14245
14246 void
14247 mptsas_update_hashtab(struct mptsas *mpt)
14248 {
14249 uint32_t page_address;
14250 int rval = 0;
14251 uint16_t dev_handle;
14252 mptsas_target_t *ptgt = NULL;
14253 mptsas_smp_t smp_node;
14254
14255 /*
14256 * Get latest RAID info.
14257 */
14258 (void) mptsas_get_raid_info(mpt);
14259
14260 dev_handle = mpt->m_smp_devhdl;
14261 for (; mpt->m_done_traverse_smp == 0; ) {
14262 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
14263 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)dev_handle;
14264 if (mptsas_get_sas_expander_page0(mpt, page_address, &smp_node)
14265 != DDI_SUCCESS) {
14266 break;
14267 }
14268 mpt->m_smp_devhdl = dev_handle = smp_node.m_devhdl;
14269 (void) mptsas_smp_alloc(mpt, &smp_node);
14270 }
14271
14272 /*
14273 * Config target devices
14274 */
14275 dev_handle = mpt->m_dev_handle;
14276
14277 /*
14278 * Do loop to get sas device page 0 by GetNextHandle till the
14279 * the last handle. If the sas device is a SATA/SSP target,
14280 * we try to config it.
14281 */
14282 for (; mpt->m_done_traverse_dev == 0; ) {
14283 ptgt = NULL;
14284 page_address =
14285 (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
14286 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14287 (uint32_t)dev_handle;
14288 rval = mptsas_get_target_device_info(mpt, page_address,
14289 &dev_handle, &ptgt);
14290 if ((rval == DEV_INFO_FAIL_PAGE0) ||
14291 (rval == DEV_INFO_FAIL_ALLOC)) {
14292 break;
14293 }
14294
14295 mpt->m_dev_handle = dev_handle;
14296 }
14297
14298 }
14299
14300 void
14301 mptsas_update_driver_data(struct mptsas *mpt)
14302 {
14303 mptsas_target_t *tp;
14304 mptsas_smp_t *sp;
14305
14306 ASSERT(MUTEX_HELD(&mpt->m_mutex));
14307
14308 /*
14309 * TODO after hard reset, update the driver data structures
14310 * 1. update port/phymask mapping table mpt->m_phy_info
14311 * 2. invalid all the entries in hash table
14312 * m_devhdl = 0xffff and m_deviceinfo = 0
14313 * 3. call sas_device_page/expander_page to update hash table
14314 */
14315 mptsas_update_phymask(mpt);
14316
14317 /*
14318 * Remove all the devhdls for existing entries but leave their
14319 * addresses alone. In update_hashtab() below, we'll find all
14320 * targets that are still present and reassociate them with
14321 * their potentially new devhdls. Leaving the targets around in
14322 * this fashion allows them to be used on the tx waitq even
14323 * while IOC reset it occurring.
14324 */
14325 for (tp = refhash_first(mpt->m_targets); tp != NULL;
14326 tp = refhash_next(mpt->m_targets, tp)) {
14327 tp->m_devhdl = MPTSAS_INVALID_DEVHDL;
14328 tp->m_deviceinfo = 0;
14329 tp->m_dr_flag = MPTSAS_DR_INACTIVE;
14330 }
14331 for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
14332 sp = refhash_next(mpt->m_smp_targets, sp)) {
14333 sp->m_devhdl = MPTSAS_INVALID_DEVHDL;
14334 sp->m_deviceinfo = 0;
14335 }
14336 mpt->m_done_traverse_dev = 0;
14337 mpt->m_done_traverse_smp = 0;
14338 mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
14339 mptsas_update_hashtab(mpt);
14340 }
14341
14342 static void
14343 mptsas_config_all(dev_info_t *pdip)
14344 {
14345 dev_info_t *smpdip = NULL;
14346 mptsas_t *mpt = DIP2MPT(pdip);
14347 int phymask = 0;
14348 mptsas_phymask_t phy_mask;
14349 mptsas_target_t *ptgt = NULL;
14350 mptsas_smp_t *psmp;
14351
14352 /*
14353 * Get the phymask associated to the iport
14354 */
14355 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14356 "phymask", 0);
14357
14358 /*
14359 * Enumerate RAID volumes here (phymask == 0).
14360 */
14361 if (phymask == 0) {
14362 mptsas_config_all_viport(pdip);
14363 return;
14364 }
14365
14366 mutex_enter(&mpt->m_mutex);
14367
14368 if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp) {
14369 mptsas_update_hashtab(mpt);
14370 }
14371
14372 for (psmp = refhash_first(mpt->m_smp_targets); psmp != NULL;
14373 psmp = refhash_next(mpt->m_smp_targets, psmp)) {
14374 phy_mask = psmp->m_addr.mta_phymask;
14375 if (phy_mask == phymask) {
14376 smpdip = NULL;
14377 mutex_exit(&mpt->m_mutex);
14378 (void) mptsas_online_smp(pdip, psmp, &smpdip);
14379 mutex_enter(&mpt->m_mutex);
14380 }
14381 }
14382
14383 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
14384 ptgt = refhash_next(mpt->m_targets, ptgt)) {
14385 phy_mask = ptgt->m_addr.mta_phymask;
14386 if (phy_mask == phymask) {
14387 mutex_exit(&mpt->m_mutex);
14388 (void) mptsas_config_target(pdip, ptgt);
14389 mutex_enter(&mpt->m_mutex);
14390 }
14391 }
14392 mutex_exit(&mpt->m_mutex);
14393 }
14394
14395 static int
14396 mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt)
14397 {
14398 int rval = DDI_FAILURE;
14399 dev_info_t *tdip;
14400
14401 rval = mptsas_config_luns(pdip, ptgt);
14402 if (rval != DDI_SUCCESS) {
14403 /*
14404 * The return value means the SCMD_REPORT_LUNS
14405 * did not execute successfully. The target maybe
14406 * doesn't support such command.
14407 */
14408 rval = mptsas_probe_lun(pdip, 0, &tdip, ptgt);
14409 }
14410 return (rval);
14411 }
14412
14413 /*
14414 * Return fail if not all the childs/paths are freed.
14415 * if there is any path under the HBA, the return value will be always fail
14416 * because we didn't call mdi_pi_free for path
14417 */
14418 static int
14419 mptsas_offline_target(dev_info_t *pdip, char *name)
14420 {
14421 dev_info_t *child = NULL, *prechild = NULL;
14422 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
14423 int tmp_rval, rval = DDI_SUCCESS;
14424 char *addr, *cp;
14425 size_t s;
14426 mptsas_t *mpt = DIP2MPT(pdip);
14427
14428 child = ddi_get_child(pdip);
14429 while (child) {
14430 addr = ddi_get_name_addr(child);
14431 prechild = child;
14432 child = ddi_get_next_sibling(child);
14433
14434 if (addr == NULL) {
14435 continue;
14436 }
14437 if ((cp = strchr(addr, ',')) == NULL) {
14438 continue;
14439 }
14440
14441 s = (uintptr_t)cp - (uintptr_t)addr;
14442
14443 if (strncmp(addr, name, s) != 0) {
14444 continue;
14445 }
14446
14447 tmp_rval = mptsas_offline_lun(pdip, prechild, NULL,
14448 NDI_DEVI_REMOVE);
14449 if (tmp_rval != DDI_SUCCESS) {
14450 rval = DDI_FAILURE;
14451 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14452 prechild, MPTSAS_DEV_GONE) !=
14453 DDI_PROP_SUCCESS) {
14454 mptsas_log(mpt, CE_WARN, "mptsas driver "
14455 "unable to create property for "
14456 "SAS %s (MPTSAS_DEV_GONE)", addr);
14457 }
14458 }
14459 }
14460
14461 pip = mdi_get_next_client_path(pdip, NULL);
14462 while (pip) {
14463 addr = MDI_PI(pip)->pi_addr;
14464 savepip = pip;
14465 pip = mdi_get_next_client_path(pdip, pip);
14466 if (addr == NULL) {
14467 continue;
14468 }
14469
14470 if ((cp = strchr(addr, ',')) == NULL) {
14471 continue;
14472 }
14473
14474 s = (uintptr_t)cp - (uintptr_t)addr;
14475
14476 if (strncmp(addr, name, s) != 0) {
14477 continue;
14478 }
14479
14480 (void) mptsas_offline_lun(pdip, NULL, savepip,
14481 NDI_DEVI_REMOVE);
14482 /*
14483 * driver will not invoke mdi_pi_free, so path will not
14484 * be freed forever, return DDI_FAILURE.
14485 */
14486 rval = DDI_FAILURE;
14487 }
14488 return (rval);
14489 }
14490
14491 static int
14492 mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
14493 mdi_pathinfo_t *rpip, uint_t flags)
14494 {
14495 int rval = DDI_FAILURE;
14496 char *devname;
14497 dev_info_t *cdip, *parent;
14498
14499 if (rpip != NULL) {
14500 parent = scsi_vhci_dip;
14501 cdip = mdi_pi_get_client(rpip);
14502 } else if (rdip != NULL) {
14503 parent = pdip;
14504 cdip = rdip;
14505 } else {
14506 return (DDI_FAILURE);
14507 }
14508
14509 /*
14510 * Make sure node is attached otherwise
14511 * it won't have related cache nodes to
14512 * clean up. i_ddi_devi_attached is
14513 * similiar to i_ddi_node_state(cdip) >=
14514 * DS_ATTACHED.
14515 */
14516 if (i_ddi_devi_attached(cdip)) {
14517
14518 /* Get full devname */
14519 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
14520 (void) ddi_deviname(cdip, devname);
14521 /* Clean cache */
14522 (void) devfs_clean(parent, devname + 1,
14523 DV_CLEAN_FORCE);
14524 kmem_free(devname, MAXNAMELEN + 1);
14525 }
14526 if (rpip != NULL) {
14527 if (MDI_PI_IS_OFFLINE(rpip)) {
14528 rval = DDI_SUCCESS;
14529 } else {
14530 rval = mdi_pi_offline(rpip, 0);
14531 }
14532 } else {
14533 rval = ndi_devi_offline(cdip, flags);
14534 }
14535
14536 return (rval);
14537 }
14538
14539 static dev_info_t *
14540 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
14541 {
14542 dev_info_t *child = NULL;
14543 char *smp_wwn = NULL;
14544
14545 child = ddi_get_child(parent);
14546 while (child) {
14547 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
14548 DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
14549 != DDI_SUCCESS) {
14550 child = ddi_get_next_sibling(child);
14551 continue;
14552 }
14553
14554 if (strcmp(smp_wwn, str_wwn) == 0) {
14555 ddi_prop_free(smp_wwn);
14556 break;
14557 }
14558 child = ddi_get_next_sibling(child);
14559 ddi_prop_free(smp_wwn);
14560 }
14561 return (child);
14562 }
14563
14564 static int
14565 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, uint_t flags)
14566 {
14567 int rval = DDI_FAILURE;
14568 char *devname;
14569 char wwn_str[MPTSAS_WWN_STRLEN];
14570 dev_info_t *cdip;
14571
14572 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
14573
14574 cdip = mptsas_find_smp_child(pdip, wwn_str);
14575
14576 if (cdip == NULL)
14577 return (DDI_SUCCESS);
14578
14579 /*
14580 * Make sure node is attached otherwise
14581 * it won't have related cache nodes to
14582 * clean up. i_ddi_devi_attached is
14583 * similiar to i_ddi_node_state(cdip) >=
14584 * DS_ATTACHED.
14585 */
14586 if (i_ddi_devi_attached(cdip)) {
14587
14588 /* Get full devname */
14589 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
14590 (void) ddi_deviname(cdip, devname);
14591 /* Clean cache */
14592 (void) devfs_clean(pdip, devname + 1,
14593 DV_CLEAN_FORCE);
14594 kmem_free(devname, MAXNAMELEN + 1);
14595 }
14596
14597 rval = ndi_devi_offline(cdip, flags);
14598
14599 return (rval);
14600 }
14601
14602 static dev_info_t *
14603 mptsas_find_child(dev_info_t *pdip, char *name)
14604 {
14605 dev_info_t *child = NULL;
14606 char *rname = NULL;
14607 int rval = DDI_FAILURE;
14608
14609 rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14610
14611 child = ddi_get_child(pdip);
14612 while (child) {
14613 rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
14614 if (rval != DDI_SUCCESS) {
14615 child = ddi_get_next_sibling(child);
14616 bzero(rname, SCSI_MAXNAMELEN);
14617 continue;
14618 }
14619
14620 if (strcmp(rname, name) == 0) {
14621 break;
14622 }
14623 child = ddi_get_next_sibling(child);
14624 bzero(rname, SCSI_MAXNAMELEN);
14625 }
14626
14627 kmem_free(rname, SCSI_MAXNAMELEN);
14628
14629 return (child);
14630 }
14631
14632
14633 static dev_info_t *
14634 mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, int lun)
14635 {
14636 dev_info_t *child = NULL;
14637 char *name = NULL;
14638 char *addr = NULL;
14639
14640 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14641 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14642 (void) sprintf(name, "%016"PRIx64, sasaddr);
14643 (void) sprintf(addr, "w%s,%x", name, lun);
14644 child = mptsas_find_child(pdip, addr);
14645 kmem_free(name, SCSI_MAXNAMELEN);
14646 kmem_free(addr, SCSI_MAXNAMELEN);
14647 return (child);
14648 }
14649
14650 static dev_info_t *
14651 mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy)
14652 {
14653 dev_info_t *child;
14654 char *addr;
14655
14656 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14657 (void) sprintf(addr, "p%x,0", phy);
14658 child = mptsas_find_child(pdip, addr);
14659 kmem_free(addr, SCSI_MAXNAMELEN);
14660 return (child);
14661 }
14662
14663 static mdi_pathinfo_t *
14664 mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy)
14665 {
14666 mdi_pathinfo_t *path;
14667 char *addr = NULL;
14668
14669 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14670 (void) sprintf(addr, "p%x,0", phy);
14671 path = mdi_pi_find(pdip, NULL, addr);
14672 kmem_free(addr, SCSI_MAXNAMELEN);
14673 return (path);
14674 }
14675
14676 static mdi_pathinfo_t *
14677 mptsas_find_path_addr(dev_info_t *parent, uint64_t sasaddr, int lun)
14678 {
14679 mdi_pathinfo_t *path;
14680 char *name = NULL;
14681 char *addr = NULL;
14682
14683 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14684 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14685 (void) sprintf(name, "%016"PRIx64, sasaddr);
14686 (void) sprintf(addr, "w%s,%x", name, lun);
14687 path = mdi_pi_find(parent, NULL, addr);
14688 kmem_free(name, SCSI_MAXNAMELEN);
14689 kmem_free(addr, SCSI_MAXNAMELEN);
14690
14691 return (path);
14692 }
14693
14694 static int
14695 mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
14696 dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
14697 {
14698 int i = 0;
14699 uchar_t *inq83 = NULL;
14700 int inq83_len1 = 0xFF;
14701 int inq83_len = 0;
14702 int rval = DDI_FAILURE;
14703 ddi_devid_t devid;
14704 char *guid = NULL;
14705 int target = ptgt->m_devhdl;
14706 mdi_pathinfo_t *pip = NULL;
14707 mptsas_t *mpt = DIP2MPT(pdip);
14708
14709 /*
14710 * For DVD/CD ROM and tape devices and optical
14711 * devices, we won't try to enumerate them under
14712 * scsi_vhci, so no need to try page83
14713 */
14714 if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
14715 sd_inq->inq_dtype == DTYPE_OPTICAL ||
14716 sd_inq->inq_dtype == DTYPE_ESI))
14717 goto create_lun;
14718
14719 /*
14720 * The LCA returns good SCSI status, but corrupt page 83 data the first
14721 * time it is queried. The solution is to keep trying to request page83
14722 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
14723 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
14724 * give up to get VPD page at this stage and fail the enumeration.
14725 */
14726
14727 inq83 = kmem_zalloc(inq83_len1, KM_SLEEP);
14728
14729 for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
14730 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
14731 inq83_len1, &inq83_len, 1);
14732 if (rval != 0) {
14733 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
14734 "0x83 for target:%x, lun:%x failed!", target, lun);
14735 if (mptsas_physical_bind_failed_page_83 != B_FALSE)
14736 goto create_lun;
14737 goto out;
14738 }
14739 /*
14740 * create DEVID from inquiry data
14741 */
14742 if ((rval = ddi_devid_scsi_encode(
14743 DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
14744 sizeof (struct scsi_inquiry), NULL, 0, inq83,
14745 (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
14746 /*
14747 * extract GUID from DEVID
14748 */
14749 guid = ddi_devid_to_guid(devid);
14750
14751 /*
14752 * Do not enable MPXIO if the strlen(guid) is greater
14753 * than MPTSAS_MAX_GUID_LEN, this constrain would be
14754 * handled by framework later.
14755 */
14756 if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
14757 ddi_devid_free_guid(guid);
14758 guid = NULL;
14759 if (mpt->m_mpxio_enable == TRUE) {
14760 mptsas_log(mpt, CE_NOTE, "!Target:%x, "
14761 "lun:%x doesn't have a valid GUID, "
14762 "multipathing for this drive is "
14763 "not enabled", target, lun);
14764 }
14765 }
14766
14767 /*
14768 * devid no longer needed
14769 */
14770 ddi_devid_free(devid);
14771 break;
14772 } else if (rval == DDI_NOT_WELL_FORMED) {
14773 /*
14774 * return value of ddi_devid_scsi_encode equal to
14775 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
14776 * to retry inquiry page 0x83 and get GUID.
14777 */
14778 NDBG20(("Not well formed devid, retry..."));
14779 delay(1 * drv_usectohz(1000000));
14780 continue;
14781 } else {
14782 mptsas_log(mpt, CE_WARN, "!Encode devid failed for "
14783 "path target:%x, lun:%x", target, lun);
14784 rval = DDI_FAILURE;
14785 goto create_lun;
14786 }
14787 }
14788
14789 if (i == mptsas_inq83_retry_timeout) {
14790 mptsas_log(mpt, CE_WARN, "!Repeated page83 requests timeout "
14791 "for path target:%x, lun:%x", target, lun);
14792 }
14793
14794 rval = DDI_FAILURE;
14795
14796 create_lun:
14797 if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
14798 rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
14799 ptgt, lun);
14800 }
14801 if (rval != DDI_SUCCESS) {
14802 rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
14803 ptgt, lun);
14804
14805 }
14806 out:
14807 if (guid != NULL) {
14808 /*
14809 * guid no longer needed
14810 */
14811 ddi_devid_free_guid(guid);
14812 }
14813 if (inq83 != NULL)
14814 kmem_free(inq83, inq83_len1);
14815 return (rval);
14816 }
14817
14818 static int
14819 mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid,
14820 dev_info_t **lun_dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, int lun)
14821 {
14822 int target;
14823 char *nodename = NULL;
14824 char **compatible = NULL;
14825 int ncompatible = 0;
14826 int mdi_rtn = MDI_FAILURE;
14827 int rval = DDI_FAILURE;
14828 char *old_guid = NULL;
14829 mptsas_t *mpt = DIP2MPT(pdip);
14830 char *lun_addr = NULL;
14831 char *wwn_str = NULL;
14832 char *attached_wwn_str = NULL;
14833 char *component = NULL;
14834 uint8_t phy = 0xFF;
14835 uint64_t sas_wwn;
14836 int64_t lun64 = 0;
14837 uint32_t devinfo;
14838 uint16_t dev_hdl;
14839 uint16_t pdev_hdl;
14840 uint64_t dev_sas_wwn;
14841 uint64_t pdev_sas_wwn;
14842 uint32_t pdev_info;
14843 uint8_t physport;
14844 uint8_t phy_id;
14845 uint32_t page_address;
14846 uint16_t bay_num, enclosure, io_flags;
14847 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
14848 uint32_t dev_info;
14849
14850 mutex_enter(&mpt->m_mutex);
14851 target = ptgt->m_devhdl;
14852 sas_wwn = ptgt->m_addr.mta_wwn;
14853 devinfo = ptgt->m_deviceinfo;
14854 phy = ptgt->m_phynum;
14855 mutex_exit(&mpt->m_mutex);
14856
14857 if (sas_wwn) {
14858 *pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
14859 } else {
14860 *pip = mptsas_find_path_phy(pdip, phy);
14861 }
14862
14863 if (*pip != NULL) {
14864 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
14865 ASSERT(*lun_dip != NULL);
14866 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
14867 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
14868 MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
14869 if (strncmp(guid, old_guid, strlen(guid)) == 0) {
14870 /*
14871 * Same path back online again.
14872 */
14873 (void) ddi_prop_free(old_guid);
14874 if ((!MDI_PI_IS_ONLINE(*pip)) &&
14875 (!MDI_PI_IS_STANDBY(*pip)) &&
14876 (ptgt->m_tgt_unconfigured == 0)) {
14877 rval = mdi_pi_online(*pip, 0);
14878 mutex_enter(&mpt->m_mutex);
14879 ptgt->m_led_status = 0;
14880 (void) mptsas_flush_led_status(mpt,
14881 ptgt);
14882 mutex_exit(&mpt->m_mutex);
14883 } else {
14884 rval = DDI_SUCCESS;
14885 }
14886 if (rval != DDI_SUCCESS) {
14887 mptsas_log(mpt, CE_WARN, "path:target: "
14888 "%x, lun:%x online failed!", target,
14889 lun);
14890 *pip = NULL;
14891 *lun_dip = NULL;
14892 }
14893 return (rval);
14894 } else {
14895 /*
14896 * The GUID of the LUN has changed which maybe
14897 * because customer mapped another volume to the
14898 * same LUN.
14899 */
14900 mptsas_log(mpt, CE_WARN, "The GUID of the "
14901 "target:%x, lun:%x was changed, maybe "
14902 "because someone mapped another volume "
14903 "to the same LUN", target, lun);
14904 (void) ddi_prop_free(old_guid);
14905 if (!MDI_PI_IS_OFFLINE(*pip)) {
14906 rval = mdi_pi_offline(*pip, 0);
14907 if (rval != MDI_SUCCESS) {
14908 mptsas_log(mpt, CE_WARN, "path:"
14909 "target:%x, lun:%x offline "
14910 "failed!", target, lun);
14911 *pip = NULL;
14912 *lun_dip = NULL;
14913 return (DDI_FAILURE);
14914 }
14915 }
14916 if (mdi_pi_free(*pip, 0) != MDI_SUCCESS) {
14917 mptsas_log(mpt, CE_WARN, "path:target:"
14918 "%x, lun:%x free failed!", target,
14919 lun);
14920 *pip = NULL;
14921 *lun_dip = NULL;
14922 return (DDI_FAILURE);
14923 }
14924 }
14925 } else {
14926 mptsas_log(mpt, CE_WARN, "Can't get client-guid "
14927 "property for path:target:%x, lun:%x", target, lun);
14928 *pip = NULL;
14929 *lun_dip = NULL;
14930 return (DDI_FAILURE);
14931 }
14932 }
14933 scsi_hba_nodename_compatible_get(inq, NULL,
14934 inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
14935
14936 /*
14937 * if nodename can't be determined then print a message and skip it
14938 */
14939 if (nodename == NULL) {
14940 mptsas_log(mpt, CE_WARN, "mptsas driver found no compatible "
14941 "driver for target%d lun %d dtype:0x%02x", target, lun,
14942 inq->inq_dtype);
14943 return (DDI_FAILURE);
14944 }
14945
14946 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
14947 /* The property is needed by MPAPI */
14948 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
14949
14950 lun_addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14951 if (guid) {
14952 (void) sprintf(lun_addr, "w%s,%x", wwn_str, lun);
14953 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14954 } else {
14955 (void) sprintf(lun_addr, "p%x,%x", phy, lun);
14956 (void) sprintf(wwn_str, "p%x", phy);
14957 }
14958
14959 mdi_rtn = mdi_pi_alloc_compatible(pdip, nodename,
14960 guid, lun_addr, compatible, ncompatible,
14961 0, pip);
14962 if (mdi_rtn == MDI_SUCCESS) {
14963
14964 if (mdi_prop_update_string(*pip, MDI_GUID,
14965 guid) != DDI_SUCCESS) {
14966 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14967 "create prop for target %d lun %d (MDI_GUID)",
14968 target, lun);
14969 mdi_rtn = MDI_FAILURE;
14970 goto virt_create_done;
14971 }
14972
14973 if (mdi_prop_update_int(*pip, LUN_PROP,
14974 lun) != DDI_SUCCESS) {
14975 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14976 "create prop for target %d lun %d (LUN_PROP)",
14977 target, lun);
14978 mdi_rtn = MDI_FAILURE;
14979 goto virt_create_done;
14980 }
14981 lun64 = (int64_t)lun;
14982 if (mdi_prop_update_int64(*pip, LUN64_PROP,
14983 lun64) != DDI_SUCCESS) {
14984 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14985 "create prop for target %d (LUN64_PROP)",
14986 target);
14987 mdi_rtn = MDI_FAILURE;
14988 goto virt_create_done;
14989 }
14990 if (mdi_prop_update_string_array(*pip, "compatible",
14991 compatible, ncompatible) !=
14992 DDI_PROP_SUCCESS) {
14993 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14994 "create prop for target %d lun %d (COMPATIBLE)",
14995 target, lun);
14996 mdi_rtn = MDI_FAILURE;
14997 goto virt_create_done;
14998 }
14999 if (sas_wwn && (mdi_prop_update_string(*pip,
15000 SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS)) {
15001 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15002 "create prop for target %d lun %d "
15003 "(target-port)", target, lun);
15004 mdi_rtn = MDI_FAILURE;
15005 goto virt_create_done;
15006 } else if ((sas_wwn == 0) && (mdi_prop_update_int(*pip,
15007 "sata-phy", phy) != DDI_PROP_SUCCESS)) {
15008 /*
15009 * Direct attached SATA device without DeviceName
15010 */
15011 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15012 "create prop for SAS target %d lun %d "
15013 "(sata-phy)", target, lun);
15014 mdi_rtn = MDI_FAILURE;
15015 goto virt_create_done;
15016 }
15017 mutex_enter(&mpt->m_mutex);
15018
15019 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15020 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15021 (uint32_t)ptgt->m_devhdl;
15022 rval = mptsas_get_sas_device_page0(mpt, page_address,
15023 &dev_hdl, &dev_sas_wwn, &dev_info, &physport,
15024 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15025 if (rval != DDI_SUCCESS) {
15026 mutex_exit(&mpt->m_mutex);
15027 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15028 "parent device for handle %d", page_address);
15029 mdi_rtn = MDI_FAILURE;
15030 goto virt_create_done;
15031 }
15032
15033 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15034 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
15035 rval = mptsas_get_sas_device_page0(mpt, page_address,
15036 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
15037 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15038 if (rval != DDI_SUCCESS) {
15039 mutex_exit(&mpt->m_mutex);
15040 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
15041 "device info for handle %d", page_address);
15042 mdi_rtn = MDI_FAILURE;
15043 goto virt_create_done;
15044 }
15045
15046 mutex_exit(&mpt->m_mutex);
15047
15048 /*
15049 * If this device direct attached to the controller
15050 * set the attached-port to the base wwid
15051 */
15052 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15053 != DEVINFO_DIRECT_ATTACHED) {
15054 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15055 pdev_sas_wwn);
15056 } else {
15057 /*
15058 * Update the iport's attached-port to guid
15059 */
15060 if (sas_wwn == 0) {
15061 (void) sprintf(wwn_str, "p%x", phy);
15062 } else {
15063 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15064 }
15065 if (ddi_prop_update_string(DDI_DEV_T_NONE,
15066 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15067 DDI_PROP_SUCCESS) {
15068 mptsas_log(mpt, CE_WARN,
15069 "mptsas unable to create "
15070 "property for iport target-port"
15071 " %s (sas_wwn)",
15072 wwn_str);
15073 mdi_rtn = MDI_FAILURE;
15074 goto virt_create_done;
15075 }
15076
15077 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15078 mpt->un.m_base_wwid);
15079 }
15080
15081 if (mdi_prop_update_string(*pip,
15082 SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
15083 DDI_PROP_SUCCESS) {
15084 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15085 "property for iport attached-port %s (sas_wwn)",
15086 attached_wwn_str);
15087 mdi_rtn = MDI_FAILURE;
15088 goto virt_create_done;
15089 }
15090
15091
15092 if (inq->inq_dtype == 0) {
15093 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
15094 /*
15095 * set obp path for pathinfo
15096 */
15097 (void) snprintf(component, MAXPATHLEN,
15098 "disk@%s", lun_addr);
15099
15100 if (mdi_pi_pathname_obp_set(*pip, component) !=
15101 DDI_SUCCESS) {
15102 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
15103 "unable to set obp-path for object %s",
15104 component);
15105 mdi_rtn = MDI_FAILURE;
15106 goto virt_create_done;
15107 }
15108 }
15109
15110 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
15111 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
15112 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
15113 if ((ndi_prop_update_int(DDI_DEV_T_NONE, *lun_dip,
15114 "pm-capable", 1)) !=
15115 DDI_PROP_SUCCESS) {
15116 mptsas_log(mpt, CE_WARN, "mptsas driver"
15117 "failed to create pm-capable "
15118 "property, target %d", target);
15119 mdi_rtn = MDI_FAILURE;
15120 goto virt_create_done;
15121 }
15122 }
15123 /*
15124 * Create the phy-num property
15125 */
15126 if (mdi_prop_update_int(*pip, "phy-num",
15127 ptgt->m_phynum) != DDI_SUCCESS) {
15128 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15129 "create phy-num property for target %d lun %d",
15130 target, lun);
15131 mdi_rtn = MDI_FAILURE;
15132 goto virt_create_done;
15133 }
15134 NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
15135 mdi_rtn = mdi_pi_online(*pip, 0);
15136 if (mdi_rtn == MDI_SUCCESS) {
15137 mutex_enter(&mpt->m_mutex);
15138 ptgt->m_led_status = 0;
15139 (void) mptsas_flush_led_status(mpt, ptgt);
15140 mutex_exit(&mpt->m_mutex);
15141 }
15142 if (mdi_rtn == MDI_NOT_SUPPORTED) {
15143 mdi_rtn = MDI_FAILURE;
15144 }
15145 virt_create_done:
15146 if (*pip && mdi_rtn != MDI_SUCCESS) {
15147 (void) mdi_pi_free(*pip, 0);
15148 *pip = NULL;
15149 *lun_dip = NULL;
15150 }
15151 }
15152
15153 scsi_hba_nodename_compatible_free(nodename, compatible);
15154 if (lun_addr != NULL) {
15155 kmem_free(lun_addr, SCSI_MAXNAMELEN);
15156 }
15157 if (wwn_str != NULL) {
15158 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
15159 }
15160 if (component != NULL) {
15161 kmem_free(component, MAXPATHLEN);
15162 }
15163
15164 return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15165 }
15166
15167 static int
15168 mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq,
15169 char *guid, dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
15170 {
15171 int target;
15172 int rval;
15173 int ndi_rtn = NDI_FAILURE;
15174 uint64_t be_sas_wwn;
15175 char *nodename = NULL;
15176 char **compatible = NULL;
15177 int ncompatible = 0;
15178 int instance = 0;
15179 mptsas_t *mpt = DIP2MPT(pdip);
15180 char *wwn_str = NULL;
15181 char *component = NULL;
15182 char *attached_wwn_str = NULL;
15183 uint8_t phy = 0xFF;
15184 uint64_t sas_wwn;
15185 uint32_t devinfo;
15186 uint16_t dev_hdl;
15187 uint16_t pdev_hdl;
15188 uint64_t pdev_sas_wwn;
15189 uint64_t dev_sas_wwn;
15190 uint32_t pdev_info;
15191 uint8_t physport;
15192 uint8_t phy_id;
15193 uint32_t page_address;
15194 uint16_t bay_num, enclosure, io_flags;
15195 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
15196 uint32_t dev_info;
15197 int64_t lun64 = 0;
15198
15199 mutex_enter(&mpt->m_mutex);
15200 target = ptgt->m_devhdl;
15201 sas_wwn = ptgt->m_addr.mta_wwn;
15202 devinfo = ptgt->m_deviceinfo;
15203 phy = ptgt->m_phynum;
15204 mutex_exit(&mpt->m_mutex);
15205
15206 /*
15207 * generate compatible property with binding-set "mpt"
15208 */
15209 scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL,
15210 &nodename, &compatible, &ncompatible);
15211
15212 /*
15213 * if nodename can't be determined then print a message and skip it
15214 */
15215 if (nodename == NULL) {
15216 mptsas_log(mpt, CE_WARN, "mptsas found no compatible driver "
15217 "for target %d lun %d", target, lun);
15218 return (DDI_FAILURE);
15219 }
15220
15221 ndi_rtn = ndi_devi_alloc(pdip, nodename,
15222 DEVI_SID_NODEID, lun_dip);
15223
15224 /*
15225 * if lun alloc success, set props
15226 */
15227 if (ndi_rtn == NDI_SUCCESS) {
15228
15229 if (ndi_prop_update_int(DDI_DEV_T_NONE,
15230 *lun_dip, LUN_PROP, lun) !=
15231 DDI_PROP_SUCCESS) {
15232 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15233 "property for target %d lun %d (LUN_PROP)",
15234 target, lun);
15235 ndi_rtn = NDI_FAILURE;
15236 goto phys_create_done;
15237 }
15238
15239 lun64 = (int64_t)lun;
15240 if (ndi_prop_update_int64(DDI_DEV_T_NONE,
15241 *lun_dip, LUN64_PROP, lun64) !=
15242 DDI_PROP_SUCCESS) {
15243 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15244 "property for target %d lun64 %d (LUN64_PROP)",
15245 target, lun);
15246 ndi_rtn = NDI_FAILURE;
15247 goto phys_create_done;
15248 }
15249 if (ndi_prop_update_string_array(DDI_DEV_T_NONE,
15250 *lun_dip, "compatible", compatible, ncompatible)
15251 != DDI_PROP_SUCCESS) {
15252 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15253 "property for target %d lun %d (COMPATIBLE)",
15254 target, lun);
15255 ndi_rtn = NDI_FAILURE;
15256 goto phys_create_done;
15257 }
15258
15259 /*
15260 * We need the SAS WWN for non-multipath devices, so
15261 * we'll use the same property as that multipathing
15262 * devices need to present for MPAPI. If we don't have
15263 * a WWN (e.g. parallel SCSI), don't create the prop.
15264 */
15265 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
15266 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15267 if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE,
15268 *lun_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str)
15269 != DDI_PROP_SUCCESS) {
15270 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15271 "create property for SAS target %d lun %d "
15272 "(target-port)", target, lun);
15273 ndi_rtn = NDI_FAILURE;
15274 goto phys_create_done;
15275 }
15276
15277 be_sas_wwn = BE_64(sas_wwn);
15278 if (sas_wwn && ndi_prop_update_byte_array(
15279 DDI_DEV_T_NONE, *lun_dip, "port-wwn",
15280 (uchar_t *)&be_sas_wwn, 8) != DDI_PROP_SUCCESS) {
15281 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15282 "create property for SAS target %d lun %d "
15283 "(port-wwn)", target, lun);
15284 ndi_rtn = NDI_FAILURE;
15285 goto phys_create_done;
15286 } else if ((sas_wwn == 0) && (ndi_prop_update_int(
15287 DDI_DEV_T_NONE, *lun_dip, "sata-phy", phy) !=
15288 DDI_PROP_SUCCESS)) {
15289 /*
15290 * Direct attached SATA device without DeviceName
15291 */
15292 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15293 "create property for SAS target %d lun %d "
15294 "(sata-phy)", target, lun);
15295 ndi_rtn = NDI_FAILURE;
15296 goto phys_create_done;
15297 }
15298
15299 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15300 *lun_dip, SAS_PROP) != DDI_PROP_SUCCESS) {
15301 mptsas_log(mpt, CE_WARN, "mptsas unable to"
15302 "create property for SAS target %d lun %d"
15303 " (SAS_PROP)", target, lun);
15304 ndi_rtn = NDI_FAILURE;
15305 goto phys_create_done;
15306 }
15307 if (guid && (ndi_prop_update_string(DDI_DEV_T_NONE,
15308 *lun_dip, NDI_GUID, guid) != DDI_SUCCESS)) {
15309 mptsas_log(mpt, CE_WARN, "mptsas unable "
15310 "to create guid property for target %d "
15311 "lun %d", target, lun);
15312 ndi_rtn = NDI_FAILURE;
15313 goto phys_create_done;
15314 }
15315
15316 /*
15317 * The following code is to set properties for SM-HBA support,
15318 * it doesn't apply to RAID volumes
15319 */
15320 if (ptgt->m_addr.mta_phymask == 0)
15321 goto phys_raid_lun;
15322
15323 mutex_enter(&mpt->m_mutex);
15324
15325 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15326 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15327 (uint32_t)ptgt->m_devhdl;
15328 rval = mptsas_get_sas_device_page0(mpt, page_address,
15329 &dev_hdl, &dev_sas_wwn, &dev_info,
15330 &physport, &phy_id, &pdev_hdl,
15331 &bay_num, &enclosure, &io_flags);
15332 if (rval != DDI_SUCCESS) {
15333 mutex_exit(&mpt->m_mutex);
15334 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
15335 "parent device for handle %d.", page_address);
15336 ndi_rtn = NDI_FAILURE;
15337 goto phys_create_done;
15338 }
15339
15340 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15341 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
15342 rval = mptsas_get_sas_device_page0(mpt, page_address,
15343 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
15344 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15345 if (rval != DDI_SUCCESS) {
15346 mutex_exit(&mpt->m_mutex);
15347 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15348 "device for handle %d.", page_address);
15349 ndi_rtn = NDI_FAILURE;
15350 goto phys_create_done;
15351 }
15352
15353 mutex_exit(&mpt->m_mutex);
15354
15355 /*
15356 * If this device direct attached to the controller
15357 * set the attached-port to the base wwid
15358 */
15359 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15360 != DEVINFO_DIRECT_ATTACHED) {
15361 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15362 pdev_sas_wwn);
15363 } else {
15364 /*
15365 * Update the iport's attached-port to guid
15366 */
15367 if (sas_wwn == 0) {
15368 (void) sprintf(wwn_str, "p%x", phy);
15369 } else {
15370 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15371 }
15372 if (ddi_prop_update_string(DDI_DEV_T_NONE,
15373 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15374 DDI_PROP_SUCCESS) {
15375 mptsas_log(mpt, CE_WARN,
15376 "mptsas unable to create "
15377 "property for iport target-port"
15378 " %s (sas_wwn)",
15379 wwn_str);
15380 ndi_rtn = NDI_FAILURE;
15381 goto phys_create_done;
15382 }
15383
15384 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15385 mpt->un.m_base_wwid);
15386 }
15387
15388 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15389 *lun_dip, SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
15390 DDI_PROP_SUCCESS) {
15391 mptsas_log(mpt, CE_WARN,
15392 "mptsas unable to create "
15393 "property for iport attached-port %s (sas_wwn)",
15394 attached_wwn_str);
15395 ndi_rtn = NDI_FAILURE;
15396 goto phys_create_done;
15397 }
15398
15399 if (IS_SATA_DEVICE(dev_info)) {
15400 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15401 *lun_dip, MPTSAS_VARIANT, "sata") !=
15402 DDI_PROP_SUCCESS) {
15403 mptsas_log(mpt, CE_WARN,
15404 "mptsas unable to create "
15405 "property for device variant ");
15406 ndi_rtn = NDI_FAILURE;
15407 goto phys_create_done;
15408 }
15409 }
15410
15411 if (IS_ATAPI_DEVICE(dev_info)) {
15412 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15413 *lun_dip, MPTSAS_VARIANT, "atapi") !=
15414 DDI_PROP_SUCCESS) {
15415 mptsas_log(mpt, CE_WARN,
15416 "mptsas unable to create "
15417 "property for device variant ");
15418 ndi_rtn = NDI_FAILURE;
15419 goto phys_create_done;
15420 }
15421 }
15422
15423 phys_raid_lun:
15424 /*
15425 * if this is a SAS controller, and the target is a SATA
15426 * drive, set the 'pm-capable' property for sd and if on
15427 * an OPL platform, also check if this is an ATAPI
15428 * device.
15429 */
15430 instance = ddi_get_instance(mpt->m_dip);
15431 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
15432 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
15433 NDBG2(("mptsas%d: creating pm-capable property, "
15434 "target %d", instance, target));
15435
15436 if ((ndi_prop_update_int(DDI_DEV_T_NONE,
15437 *lun_dip, "pm-capable", 1)) !=
15438 DDI_PROP_SUCCESS) {
15439 mptsas_log(mpt, CE_WARN, "mptsas "
15440 "failed to create pm-capable "
15441 "property, target %d", target);
15442 ndi_rtn = NDI_FAILURE;
15443 goto phys_create_done;
15444 }
15445
15446 }
15447
15448 if ((inq->inq_dtype == 0) || (inq->inq_dtype == 5)) {
15449 /*
15450 * add 'obp-path' properties for devinfo
15451 */
15452 bzero(wwn_str, sizeof (wwn_str));
15453 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
15454 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
15455 if (guid) {
15456 (void) snprintf(component, MAXPATHLEN,
15457 "disk@w%s,%x", wwn_str, lun);
15458 } else {
15459 (void) snprintf(component, MAXPATHLEN,
15460 "disk@p%x,%x", phy, lun);
15461 }
15462 if (ddi_pathname_obp_set(*lun_dip, component)
15463 != DDI_SUCCESS) {
15464 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
15465 "unable to set obp-path for SAS "
15466 "object %s", component);
15467 ndi_rtn = NDI_FAILURE;
15468 goto phys_create_done;
15469 }
15470 }
15471 /*
15472 * Create the phy-num property for non-raid disk
15473 */
15474 if (ptgt->m_addr.mta_phymask != 0) {
15475 if (ndi_prop_update_int(DDI_DEV_T_NONE,
15476 *lun_dip, "phy-num", ptgt->m_phynum) !=
15477 DDI_PROP_SUCCESS) {
15478 mptsas_log(mpt, CE_WARN, "mptsas driver "
15479 "failed to create phy-num property for "
15480 "target %d", target);
15481 ndi_rtn = NDI_FAILURE;
15482 goto phys_create_done;
15483 }
15484 }
15485 phys_create_done:
15486 /*
15487 * If props were setup ok, online the lun
15488 */
15489 if (ndi_rtn == NDI_SUCCESS) {
15490 /*
15491 * Try to online the new node
15492 */
15493 ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
15494 }
15495 if (ndi_rtn == NDI_SUCCESS) {
15496 mutex_enter(&mpt->m_mutex);
15497 ptgt->m_led_status = 0;
15498 (void) mptsas_flush_led_status(mpt, ptgt);
15499 mutex_exit(&mpt->m_mutex);
15500 }
15501
15502 /*
15503 * If success set rtn flag, else unwire alloc'd lun
15504 */
15505 if (ndi_rtn != NDI_SUCCESS) {
15506 NDBG12(("mptsas driver unable to online "
15507 "target %d lun %d", target, lun));
15508 ndi_prop_remove_all(*lun_dip);
15509 (void) ndi_devi_free(*lun_dip);
15510 *lun_dip = NULL;
15511 }
15512 }
15513
15514 scsi_hba_nodename_compatible_free(nodename, compatible);
15515
15516 if (wwn_str != NULL) {
15517 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
15518 }
15519 if (component != NULL) {
15520 kmem_free(component, MAXPATHLEN);
15521 }
15522
15523
15524 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15525 }
15526
15527 static int
15528 mptsas_probe_smp(dev_info_t *pdip, uint64_t wwn)
15529 {
15530 mptsas_t *mpt = DIP2MPT(pdip);
15531 struct smp_device smp_sd;
15532
15533 /* XXX An HBA driver should not be allocating an smp_device. */
15534 bzero(&smp_sd, sizeof (struct smp_device));
15535 smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran;
15536 bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE);
15537
15538 if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS)
15539 return (NDI_FAILURE);
15540 return (NDI_SUCCESS);
15541 }
15542
15543 static int
15544 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
15545 {
15546 mptsas_t *mpt = DIP2MPT(pdip);
15547 mptsas_smp_t *psmp = NULL;
15548 int rval;
15549 int phymask;
15550
15551 /*
15552 * Get the physical port associated to the iport
15553 * PHYMASK TODO
15554 */
15555 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
15556 "phymask", 0);
15557 /*
15558 * Find the smp node in hash table with specified sas address and
15559 * physical port
15560 */
15561 psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
15562 if (psmp == NULL) {
15563 return (DDI_FAILURE);
15564 }
15565
15566 rval = mptsas_online_smp(pdip, psmp, smp_dip);
15567
15568 return (rval);
15569 }
15570
15571 static int
15572 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
15573 dev_info_t **smp_dip)
15574 {
15575 char wwn_str[MPTSAS_WWN_STRLEN];
15576 char attached_wwn_str[MPTSAS_WWN_STRLEN];
15577 int ndi_rtn = NDI_FAILURE;
15578 int rval = 0;
15579 mptsas_smp_t dev_info;
15580 uint32_t page_address;
15581 mptsas_t *mpt = DIP2MPT(pdip);
15582 uint16_t dev_hdl;
15583 uint64_t sas_wwn;
15584 uint64_t smp_sas_wwn;
15585 uint8_t physport;
15586 uint8_t phy_id;
15587 uint16_t pdev_hdl;
15588 uint8_t numphys = 0;
15589 uint16_t i = 0;
15590 char phymask[MPTSAS_MAX_PHYS];
15591 char *iport = NULL;
15592 mptsas_phymask_t phy_mask = 0;
15593 uint16_t attached_devhdl;
15594 uint16_t bay_num, enclosure, io_flags;
15595
15596 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
15597
15598 /*
15599 * Probe smp device, prevent the node of removed device from being
15600 * configured succesfully
15601 */
15602 if (mptsas_probe_smp(pdip, smp_node->m_addr.mta_wwn) != NDI_SUCCESS) {
15603 return (DDI_FAILURE);
15604 }
15605
15606 if ((*smp_dip = mptsas_find_smp_child(pdip, wwn_str)) != NULL) {
15607 return (DDI_SUCCESS);
15608 }
15609
15610 ndi_rtn = ndi_devi_alloc(pdip, "smp", DEVI_SID_NODEID, smp_dip);
15611
15612 /*
15613 * if lun alloc success, set props
15614 */
15615 if (ndi_rtn == NDI_SUCCESS) {
15616 /*
15617 * Set the flavor of the child to be SMP flavored
15618 */
15619 ndi_flavor_set(*smp_dip, SCSA_FLAVOR_SMP);
15620
15621 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15622 *smp_dip, SMP_WWN, wwn_str) !=
15623 DDI_PROP_SUCCESS) {
15624 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15625 "property for smp device %s (sas_wwn)",
15626 wwn_str);
15627 ndi_rtn = NDI_FAILURE;
15628 goto smp_create_done;
15629 }
15630 (void) sprintf(wwn_str, "w%"PRIx64, smp_node->m_addr.mta_wwn);
15631 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15632 *smp_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str) !=
15633 DDI_PROP_SUCCESS) {
15634 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15635 "property for iport target-port %s (sas_wwn)",
15636 wwn_str);
15637 ndi_rtn = NDI_FAILURE;
15638 goto smp_create_done;
15639 }
15640
15641 mutex_enter(&mpt->m_mutex);
15642
15643 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
15644 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | smp_node->m_devhdl;
15645 rval = mptsas_get_sas_expander_page0(mpt, page_address,
15646 &dev_info);
15647 if (rval != DDI_SUCCESS) {
15648 mutex_exit(&mpt->m_mutex);
15649 mptsas_log(mpt, CE_WARN,
15650 "mptsas unable to get expander "
15651 "parent device info for %x", page_address);
15652 ndi_rtn = NDI_FAILURE;
15653 goto smp_create_done;
15654 }
15655
15656 smp_node->m_pdevhdl = dev_info.m_pdevhdl;
15657 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15658 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15659 (uint32_t)dev_info.m_pdevhdl;
15660 rval = mptsas_get_sas_device_page0(mpt, page_address,
15661 &dev_hdl, &sas_wwn, &smp_node->m_pdevinfo, &physport,
15662 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15663 if (rval != DDI_SUCCESS) {
15664 mutex_exit(&mpt->m_mutex);
15665 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15666 "device info for %x", page_address);
15667 ndi_rtn = NDI_FAILURE;
15668 goto smp_create_done;
15669 }
15670
15671 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15672 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15673 (uint32_t)dev_info.m_devhdl;
15674 rval = mptsas_get_sas_device_page0(mpt, page_address,
15675 &dev_hdl, &smp_sas_wwn, &smp_node->m_deviceinfo,
15676 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure,
15677 &io_flags);
15678 if (rval != DDI_SUCCESS) {
15679 mutex_exit(&mpt->m_mutex);
15680 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15681 "device info for %x", page_address);
15682 ndi_rtn = NDI_FAILURE;
15683 goto smp_create_done;
15684 }
15685 mutex_exit(&mpt->m_mutex);
15686
15687 /*
15688 * If this smp direct attached to the controller
15689 * set the attached-port to the base wwid
15690 */
15691 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15692 != DEVINFO_DIRECT_ATTACHED) {
15693 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
15694 sas_wwn);
15695 } else {
15696 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
15697 mpt->un.m_base_wwid);
15698 }
15699
15700 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15701 *smp_dip, SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwn_str) !=
15702 DDI_PROP_SUCCESS) {
15703 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15704 "property for smp attached-port %s (sas_wwn)",
15705 attached_wwn_str);
15706 ndi_rtn = NDI_FAILURE;
15707 goto smp_create_done;
15708 }
15709
15710 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15711 *smp_dip, SMP_PROP) != DDI_PROP_SUCCESS) {
15712 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15713 "create property for SMP %s (SMP_PROP) ",
15714 wwn_str);
15715 ndi_rtn = NDI_FAILURE;
15716 goto smp_create_done;
15717 }
15718
15719 /*
15720 * check the smp to see whether it direct
15721 * attached to the controller
15722 */
15723 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15724 != DEVINFO_DIRECT_ATTACHED) {
15725 goto smp_create_done;
15726 }
15727 numphys = ddi_prop_get_int(DDI_DEV_T_ANY, pdip,
15728 DDI_PROP_DONTPASS, MPTSAS_NUM_PHYS, -1);
15729 if (numphys > 0) {
15730 goto smp_create_done;
15731 }
15732 /*
15733 * this iport is an old iport, we need to
15734 * reconfig the props for it.
15735 */
15736 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
15737 MPTSAS_VIRTUAL_PORT, 0) !=
15738 DDI_PROP_SUCCESS) {
15739 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
15740 MPTSAS_VIRTUAL_PORT);
15741 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
15742 "prop update failed");
15743 goto smp_create_done;
15744 }
15745
15746 mutex_enter(&mpt->m_mutex);
15747 numphys = 0;
15748 iport = ddi_get_name_addr(pdip);
15749 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15750 bzero(phymask, sizeof (phymask));
15751 (void) sprintf(phymask,
15752 "%x", mpt->m_phy_info[i].phy_mask);
15753 if (strcmp(phymask, iport) == 0) {
15754 phy_mask = mpt->m_phy_info[i].phy_mask;
15755 break;
15756 }
15757 }
15758
15759 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15760 if ((phy_mask >> i) & 0x01) {
15761 numphys++;
15762 }
15763 }
15764 /*
15765 * Update PHY info for smhba
15766 */
15767 if (mptsas_smhba_phy_init(mpt)) {
15768 mutex_exit(&mpt->m_mutex);
15769 mptsas_log(mpt, CE_WARN, "mptsas phy update "
15770 "failed");
15771 goto smp_create_done;
15772 }
15773 mutex_exit(&mpt->m_mutex);
15774
15775 mptsas_smhba_set_all_phy_props(mpt, pdip, numphys, phy_mask,
15776 &attached_devhdl);
15777
15778 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
15779 MPTSAS_NUM_PHYS, numphys) !=
15780 DDI_PROP_SUCCESS) {
15781 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
15782 MPTSAS_NUM_PHYS);
15783 mptsas_log(mpt, CE_WARN, "mptsas update "
15784 "num phys props failed");
15785 goto smp_create_done;
15786 }
15787 /*
15788 * Add parent's props for SMHBA support
15789 */
15790 if (ddi_prop_update_string(DDI_DEV_T_NONE, pdip,
15791 SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15792 DDI_PROP_SUCCESS) {
15793 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
15794 SCSI_ADDR_PROP_ATTACHED_PORT);
15795 mptsas_log(mpt, CE_WARN, "mptsas update iport"
15796 "attached-port failed");
15797 goto smp_create_done;
15798 }
15799
15800 smp_create_done:
15801 /*
15802 * If props were setup ok, online the lun
15803 */
15804 if (ndi_rtn == NDI_SUCCESS) {
15805 /*
15806 * Try to online the new node
15807 */
15808 ndi_rtn = ndi_devi_online(*smp_dip, NDI_ONLINE_ATTACH);
15809 }
15810
15811 /*
15812 * If success set rtn flag, else unwire alloc'd lun
15813 */
15814 if (ndi_rtn != NDI_SUCCESS) {
15815 NDBG12(("mptsas unable to online "
15816 "SMP target %s", wwn_str));
15817 ndi_prop_remove_all(*smp_dip);
15818 (void) ndi_devi_free(*smp_dip);
15819 }
15820 }
15821
15822 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15823 }
15824
15825 /* smp transport routine */
15826 static int mptsas_smp_start(struct smp_pkt *smp_pkt)
15827 {
15828 uint64_t wwn;
15829 Mpi2SmpPassthroughRequest_t req;
15830 Mpi2SmpPassthroughReply_t rep;
15831 uint32_t direction = 0;
15832 mptsas_t *mpt;
15833 int ret;
15834 uint64_t tmp64;
15835
15836 mpt = (mptsas_t *)smp_pkt->smp_pkt_address->
15837 smp_a_hba_tran->smp_tran_hba_private;
15838
15839 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
15840 /*
15841 * Need to compose a SMP request message
15842 * and call mptsas_do_passthru() function
15843 */
15844 bzero(&req, sizeof (req));
15845 bzero(&rep, sizeof (rep));
15846 req.PassthroughFlags = 0;
15847 req.PhysicalPort = 0xff;
15848 req.ChainOffset = 0;
15849 req.Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
15850
15851 if ((smp_pkt->smp_pkt_reqsize & 0xffff0000ul) != 0) {
15852 smp_pkt->smp_pkt_reason = ERANGE;
15853 return (DDI_FAILURE);
15854 }
15855 req.RequestDataLength = LE_16((uint16_t)(smp_pkt->smp_pkt_reqsize - 4));
15856
15857 req.MsgFlags = 0;
15858 tmp64 = LE_64(wwn);
15859 bcopy(&tmp64, &req.SASAddress, SAS_WWN_BYTE_SIZE);
15860 if (smp_pkt->smp_pkt_rspsize > 0) {
15861 direction |= MPTSAS_PASS_THRU_DIRECTION_READ;
15862 }
15863 if (smp_pkt->smp_pkt_reqsize > 0) {
15864 direction |= MPTSAS_PASS_THRU_DIRECTION_WRITE;
15865 }
15866
15867 mutex_enter(&mpt->m_mutex);
15868 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep,
15869 (uint8_t *)smp_pkt->smp_pkt_rsp,
15870 offsetof(Mpi2SmpPassthroughRequest_t, SGL), sizeof (rep),
15871 smp_pkt->smp_pkt_rspsize - 4, direction,
15872 (uint8_t *)smp_pkt->smp_pkt_req, smp_pkt->smp_pkt_reqsize - 4,
15873 smp_pkt->smp_pkt_timeout, FKIOCTL);
15874 mutex_exit(&mpt->m_mutex);
15875 if (ret != 0) {
15876 cmn_err(CE_WARN, "smp_start do passthru error %d", ret);
15877 smp_pkt->smp_pkt_reason = (uchar_t)(ret);
15878 return (DDI_FAILURE);
15879 }
15880 /* do passthrough success, check the smp status */
15881 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
15882 switch (LE_16(rep.IOCStatus)) {
15883 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
15884 smp_pkt->smp_pkt_reason = ENODEV;
15885 break;
15886 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
15887 smp_pkt->smp_pkt_reason = EOVERFLOW;
15888 break;
15889 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
15890 smp_pkt->smp_pkt_reason = EIO;
15891 break;
15892 default:
15893 mptsas_log(mpt, CE_NOTE, "smp_start: get unknown ioc"
15894 "status:%x", LE_16(rep.IOCStatus));
15895 smp_pkt->smp_pkt_reason = EIO;
15896 break;
15897 }
15898 return (DDI_FAILURE);
15899 }
15900 if (rep.SASStatus != MPI2_SASSTATUS_SUCCESS) {
15901 mptsas_log(mpt, CE_NOTE, "smp_start: get error SAS status:%x",
15902 rep.SASStatus);
15903 smp_pkt->smp_pkt_reason = EIO;
15904 return (DDI_FAILURE);
15905 }
15906
15907 return (DDI_SUCCESS);
15908 }
15909
15910 /*
15911 * If we didn't get a match, we need to get sas page0 for each device, and
15912 * untill we get a match. If failed, return NULL
15913 */
15914 static mptsas_target_t *
15915 mptsas_phy_to_tgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint8_t phy)
15916 {
15917 int i, j = 0;
15918 int rval = 0;
15919 uint16_t cur_handle;
15920 uint32_t page_address;
15921 mptsas_target_t *ptgt = NULL;
15922
15923 /*
15924 * PHY named device must be direct attached and attaches to
15925 * narrow port, if the iport is not parent of the device which
15926 * we are looking for.
15927 */
15928 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15929 if ((1 << i) & phymask)
15930 j++;
15931 }
15932
15933 if (j > 1)
15934 return (NULL);
15935
15936 /*
15937 * Must be a narrow port and single device attached to the narrow port
15938 * So the physical port num of device which is equal to the iport's
15939 * port num is the device what we are looking for.
15940 */
15941
15942 if (mpt->m_phy_info[phy].phy_mask != phymask)
15943 return (NULL);
15944
15945 mutex_enter(&mpt->m_mutex);
15946
15947 ptgt = refhash_linear_search(mpt->m_targets, mptsas_target_eval_nowwn,
15948 &phy);
15949 if (ptgt != NULL) {
15950 mutex_exit(&mpt->m_mutex);
15951 return (ptgt);
15952 }
15953
15954 if (mpt->m_done_traverse_dev) {
15955 mutex_exit(&mpt->m_mutex);
15956 return (NULL);
15957 }
15958
15959 /* If didn't get a match, come here */
15960 cur_handle = mpt->m_dev_handle;
15961 for (; ; ) {
15962 ptgt = NULL;
15963 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
15964 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)cur_handle;
15965 rval = mptsas_get_target_device_info(mpt, page_address,
15966 &cur_handle, &ptgt);
15967 if ((rval == DEV_INFO_FAIL_PAGE0) ||
15968 (rval == DEV_INFO_FAIL_ALLOC)) {
15969 break;
15970 }
15971 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
15972 (rval == DEV_INFO_PHYS_DISK)) {
15973 continue;
15974 }
15975 mpt->m_dev_handle = cur_handle;
15976
15977 if ((ptgt->m_addr.mta_wwn == 0) && (ptgt->m_phynum == phy)) {
15978 break;
15979 }
15980 }
15981
15982 mutex_exit(&mpt->m_mutex);
15983 return (ptgt);
15984 }
15985
15986 /*
15987 * The ptgt->m_addr.mta_wwn contains the wwid for each disk.
15988 * For Raid volumes, we need to check m_raidvol[x].m_raidwwid
15989 * If we didn't get a match, we need to get sas page0 for each device, and
15990 * untill we get a match
15991 * If failed, return NULL
15992 */
15993 static mptsas_target_t *
15994 mptsas_wwid_to_ptgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
15995 {
15996 int rval = 0;
15997 uint16_t cur_handle;
15998 uint32_t page_address;
15999 mptsas_target_t *tmp_tgt = NULL;
16000 mptsas_target_addr_t addr;
16001
16002 addr.mta_wwn = wwid;
16003 addr.mta_phymask = phymask;
16004 mutex_enter(&mpt->m_mutex);
16005 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
16006 if (tmp_tgt != NULL) {
16007 mutex_exit(&mpt->m_mutex);
16008 return (tmp_tgt);
16009 }
16010
16011 if (phymask == 0) {
16012 /*
16013 * It's IR volume
16014 */
16015 rval = mptsas_get_raid_info(mpt);
16016 if (rval) {
16017 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
16018 }
16019 mutex_exit(&mpt->m_mutex);
16020 return (tmp_tgt);
16021 }
16022
16023 if (mpt->m_done_traverse_dev) {
16024 mutex_exit(&mpt->m_mutex);
16025 return (NULL);
16026 }
16027
16028 /* If didn't get a match, come here */
16029 cur_handle = mpt->m_dev_handle;
16030 for (;;) {
16031 tmp_tgt = NULL;
16032 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
16033 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | cur_handle;
16034 rval = mptsas_get_target_device_info(mpt, page_address,
16035 &cur_handle, &tmp_tgt);
16036 if ((rval == DEV_INFO_FAIL_PAGE0) ||
16037 (rval == DEV_INFO_FAIL_ALLOC)) {
16038 tmp_tgt = NULL;
16039 break;
16040 }
16041 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
16042 (rval == DEV_INFO_PHYS_DISK)) {
16043 continue;
16044 }
16045 mpt->m_dev_handle = cur_handle;
16046 if ((tmp_tgt->m_addr.mta_wwn) &&
16047 (tmp_tgt->m_addr.mta_wwn == wwid) &&
16048 (tmp_tgt->m_addr.mta_phymask == phymask)) {
16049 break;
16050 }
16051 }
16052
16053 mutex_exit(&mpt->m_mutex);
16054 return (tmp_tgt);
16055 }
16056
16057 static mptsas_smp_t *
16058 mptsas_wwid_to_psmp(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
16059 {
16060 int rval = 0;
16061 uint16_t cur_handle;
16062 uint32_t page_address;
16063 mptsas_smp_t smp_node, *psmp = NULL;
16064 mptsas_target_addr_t addr;
16065
16066 addr.mta_wwn = wwid;
16067 addr.mta_phymask = phymask;
16068 mutex_enter(&mpt->m_mutex);
16069 psmp = refhash_lookup(mpt->m_smp_targets, &addr);
16070 if (psmp != NULL) {
16071 mutex_exit(&mpt->m_mutex);
16072 return (psmp);
16073 }
16074
16075 if (mpt->m_done_traverse_smp) {
16076 mutex_exit(&mpt->m_mutex);
16077 return (NULL);
16078 }
16079
16080 /* If didn't get a match, come here */
16081 cur_handle = mpt->m_smp_devhdl;
16082 for (;;) {
16083 psmp = NULL;
16084 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
16085 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)cur_handle;
16086 rval = mptsas_get_sas_expander_page0(mpt, page_address,
16087 &smp_node);
16088 if (rval != DDI_SUCCESS) {
16089 break;
16090 }
16091 mpt->m_smp_devhdl = cur_handle = smp_node.m_devhdl;
16092 psmp = mptsas_smp_alloc(mpt, &smp_node);
16093 ASSERT(psmp);
16094 if ((psmp->m_addr.mta_wwn) && (psmp->m_addr.mta_wwn == wwid) &&
16095 (psmp->m_addr.mta_phymask == phymask)) {
16096 break;
16097 }
16098 }
16099
16100 mutex_exit(&mpt->m_mutex);
16101 return (psmp);
16102 }
16103
16104 mptsas_target_t *
16105 mptsas_tgt_alloc(mptsas_t *mpt, uint16_t devhdl, uint64_t wwid,
16106 uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum)
16107 {
16108 mptsas_target_t *tmp_tgt = NULL;
16109 mptsas_target_addr_t addr;
16110
16111 addr.mta_wwn = wwid;
16112 addr.mta_phymask = phymask;
16113 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
16114 if (tmp_tgt != NULL) {
16115 NDBG20(("Hash item already exist"));
16116 tmp_tgt->m_deviceinfo = devinfo;
16117 tmp_tgt->m_devhdl = devhdl; /* XXX - duplicate? */
16118 return (tmp_tgt);
16119 }
16120 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
16121 if (tmp_tgt == NULL) {
16122 cmn_err(CE_WARN, "Fatal, allocated tgt failed");
16123 return (NULL);
16124 }
16125 tmp_tgt->m_devhdl = devhdl;
16126 tmp_tgt->m_addr.mta_wwn = wwid;
16127 tmp_tgt->m_deviceinfo = devinfo;
16128 tmp_tgt->m_addr.mta_phymask = phymask;
16129 tmp_tgt->m_phynum = phynum;
16130 /* Initialized the tgt structure */
16131 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
16132 tmp_tgt->m_qfull_retry_interval =
16133 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
16134 tmp_tgt->m_t_throttle = MAX_THROTTLE;
16135 TAILQ_INIT(&tmp_tgt->m_active_cmdq);
16136
16137 refhash_insert(mpt->m_targets, tmp_tgt);
16138
16139 return (tmp_tgt);
16140 }
16141
16142 static void
16143 mptsas_smp_target_copy(mptsas_smp_t *src, mptsas_smp_t *dst)
16144 {
16145 dst->m_devhdl = src->m_devhdl;
16146 dst->m_deviceinfo = src->m_deviceinfo;
16147 dst->m_pdevhdl = src->m_pdevhdl;
16148 dst->m_pdevinfo = src->m_pdevinfo;
16149 }
16150
16151 static mptsas_smp_t *
16152 mptsas_smp_alloc(mptsas_t *mpt, mptsas_smp_t *data)
16153 {
16154 mptsas_target_addr_t addr;
16155 mptsas_smp_t *ret_data;
16156
16157 addr.mta_wwn = data->m_addr.mta_wwn;
16158 addr.mta_phymask = data->m_addr.mta_phymask;
16159 ret_data = refhash_lookup(mpt->m_smp_targets, &addr);
16160 /*
16161 * If there's already a matching SMP target, update its fields
16162 * in place. Since the address is not changing, it's safe to do
16163 * this. We cannot just bcopy() here because the structure we've
16164 * been given has invalid hash links.
16165 */
16166 if (ret_data != NULL) {
16167 mptsas_smp_target_copy(data, ret_data);
16168 return (ret_data);
16169 }
16170
16171 ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
16172 bcopy(data, ret_data, sizeof (mptsas_smp_t));
16173 refhash_insert(mpt->m_smp_targets, ret_data);
16174 return (ret_data);
16175 }
16176
16177 /*
16178 * Functions for SGPIO LED support
16179 */
16180 static dev_info_t *
16181 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
16182 {
16183 dev_info_t *dip;
16184 int prop;
16185 dip = e_ddi_hold_devi_by_dev(dev, 0);
16186 if (dip == NULL)
16187 return (dip);
16188 prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
16189 "phymask", 0);
16190 *phymask = (mptsas_phymask_t)prop;
16191 ddi_release_devi(dip);
16192 return (dip);
16193 }
16194 static mptsas_target_t *
16195 mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask)
16196 {
16197 uint8_t phynum;
16198 uint64_t wwn;
16199 int lun;
16200 mptsas_target_t *ptgt = NULL;
16201
16202 if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) {
16203 return (NULL);
16204 }
16205 if (addr[0] == 'w') {
16206 ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn);
16207 } else {
16208 ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum);
16209 }
16210 return (ptgt);
16211 }
16212
16213 static int
16214 mptsas_flush_led_status(mptsas_t *mpt, mptsas_target_t *ptgt)
16215 {
16216 uint32_t slotstatus = 0;
16217
16218 /* Build an MPI2 Slot Status based on our view of the world */
16219 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_IDENT - 1)))
16220 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST;
16221 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_FAIL - 1)))
16222 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT;
16223 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1)))
16224 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
16225
16226 /* Write it to the controller */
16227 NDBG14(("mptsas_ioctl: set LED status %x for slot %x",
16228 slotstatus, ptgt->m_slot_num));
16229 return (mptsas_send_sep(mpt, ptgt, &slotstatus,
16230 MPI2_SEP_REQ_ACTION_WRITE_STATUS));
16231 }
16232
16233 /*
16234 * send sep request, use enclosure/slot addressing
16235 */
16236 static int
16237 mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
16238 uint32_t *status, uint8_t act)
16239 {
16240 Mpi2SepRequest_t req;
16241 Mpi2SepReply_t rep;
16242 int ret;
16243
16244 ASSERT(mutex_owned(&mpt->m_mutex));
16245
16246 /*
16247 * We only support SEP control of directly-attached targets, in which
16248 * case the "SEP" we're talking to is a virtual one contained within
16249 * the HBA itself. This is necessary because DA targets typically have
16250 * no other mechanism for LED control. Targets for which a separate
16251 * enclosure service processor exists should be controlled via ses(7d)
16252 * or sgen(7d). Furthermore, since such requests can time out, they
16253 * should be made in user context rather than in response to
16254 * asynchronous fabric changes.
16255 *
16256 * In addition, we do not support this operation for RAID volumes,
16257 * since there is no slot associated with them.
16258 */
16259 if (!(ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED) ||
16260 ptgt->m_addr.mta_phymask == 0) {
16261 return (ENOTTY);
16262 }
16263
16264 bzero(&req, sizeof (req));
16265 bzero(&rep, sizeof (rep));
16266
16267 req.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
16268 req.Action = act;
16269 req.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
16270 req.EnclosureHandle = LE_16(ptgt->m_enclosure);
16271 req.Slot = LE_16(ptgt->m_slot_num);
16272 if (act == MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16273 req.SlotStatus = LE_32(*status);
16274 }
16275 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
16276 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
16277 if (ret != 0) {
16278 mptsas_log(mpt, CE_NOTE, "mptsas_send_sep: passthru SEP "
16279 "Processor Request message error %d", ret);
16280 return (ret);
16281 }
16282 /* do passthrough success, check the ioc status */
16283 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
16284 mptsas_log(mpt, CE_NOTE, "send_sep act %x: ioc "
16285 "status:%x loginfo %x", act, LE_16(rep.IOCStatus),
16286 LE_32(rep.IOCLogInfo));
16287 switch (LE_16(rep.IOCStatus) & MPI2_IOCSTATUS_MASK) {
16288 case MPI2_IOCSTATUS_INVALID_FUNCTION:
16289 case MPI2_IOCSTATUS_INVALID_VPID:
16290 case MPI2_IOCSTATUS_INVALID_FIELD:
16291 case MPI2_IOCSTATUS_INVALID_STATE:
16292 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
16293 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
16294 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
16295 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
16296 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
16297 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
16298 return (EINVAL);
16299 case MPI2_IOCSTATUS_BUSY:
16300 return (EBUSY);
16301 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
16302 return (EAGAIN);
16303 case MPI2_IOCSTATUS_INVALID_SGL:
16304 case MPI2_IOCSTATUS_INTERNAL_ERROR:
16305 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
16306 default:
16307 return (EIO);
16308 }
16309 }
16310 if (act != MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16311 *status = LE_32(rep.SlotStatus);
16312 }
16313
16314 return (0);
16315 }
16316
16317 int
16318 mptsas_dma_addr_create(mptsas_t *mpt, ddi_dma_attr_t dma_attr,
16319 ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp, caddr_t *dma_memp,
16320 uint32_t alloc_size, ddi_dma_cookie_t *cookiep)
16321 {
16322 ddi_dma_cookie_t new_cookie;
16323 size_t alloc_len;
16324 uint_t ncookie;
16325
16326 if (cookiep == NULL)
16327 cookiep = &new_cookie;
16328
16329 if (ddi_dma_alloc_handle(mpt->m_dip, &dma_attr, DDI_DMA_SLEEP,
16330 NULL, dma_hdp) != DDI_SUCCESS) {
16331 return (FALSE);
16332 }
16333
16334 if (ddi_dma_mem_alloc(*dma_hdp, alloc_size, &mpt->m_dev_acc_attr,
16335 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, dma_memp, &alloc_len,
16336 acc_hdp) != DDI_SUCCESS) {
16337 ddi_dma_free_handle(dma_hdp);
16338 return (FALSE);
16339 }
16340
16341 if (ddi_dma_addr_bind_handle(*dma_hdp, NULL, *dma_memp, alloc_len,
16342 (DDI_DMA_RDWR | DDI_DMA_CONSISTENT), DDI_DMA_SLEEP, NULL,
16343 cookiep, &ncookie) != DDI_DMA_MAPPED) {
16344 (void) ddi_dma_mem_free(acc_hdp);
16345 ddi_dma_free_handle(dma_hdp);
16346 return (FALSE);
16347 }
16348
16349 return (TRUE);
16350 }
16351
16352 void
16353 mptsas_dma_addr_destroy(ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp)
16354 {
16355 if (*dma_hdp == NULL)
16356 return;
16357
16358 (void) ddi_dma_unbind_handle(*dma_hdp);
16359 (void) ddi_dma_mem_free(acc_hdp);
16360 ddi_dma_free_handle(dma_hdp);
16361 }