1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2016 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2017, Joyent, Inc.
26 * Copyright 2014 OmniTI Computer Consulting, Inc. All rights reserved.
27 * Copyright (c) 2014, Tegile Systems Inc. All rights reserved.
28 */
29
30 /*
31 * Copyright (c) 2000 to 2010, LSI Corporation.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms of all code within
35 * this file that is exclusively owned by LSI, with or without
36 * modification, is permitted provided that, in addition to the CDDL 1.0
37 * License requirements, the following conditions are met:
38 *
39 * Neither the name of the author nor the names of its contributors may be
40 * used to endorse or promote products derived from this software without
41 * specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
46 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
47 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
48 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
49 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
50 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
51 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
52 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 * DAMAGE.
55 */
56
57 /*
58 * mptsas - This is a driver based on LSI Logic's MPT2.0/2.5 interface.
59 *
60 */
61
62 #if defined(lint) || defined(DEBUG)
63 #define MPTSAS_DEBUG
64 #endif
65
66 /*
67 * standard header files.
68 */
69 #include <sys/note.h>
70 #include <sys/scsi/scsi.h>
71 #include <sys/pci.h>
72 #include <sys/file.h>
73 #include <sys/policy.h>
74 #include <sys/model.h>
75 #include <sys/sysevent.h>
76 #include <sys/sysevent/eventdefs.h>
77 #include <sys/sysevent/dr.h>
78 #include <sys/sata/sata_defs.h>
79 #include <sys/sata/sata_hba.h>
80 #include <sys/scsi/generic/sas.h>
81 #include <sys/scsi/impl/scsi_sas.h>
82
83 #pragma pack(1)
84 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
85 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
86 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
87 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
88 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
89 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
90 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
91 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
92 #pragma pack()
93
94 /*
95 * private header files.
96 *
97 */
98 #include <sys/scsi/impl/scsi_reset_notify.h>
99 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
100 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
101 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
102 #include <sys/scsi/adapters/mpt_sas/mptsas_hash.h>
103 #include <sys/raidioctl.h>
104
105 /*
106 * FMA header files
107 */
108 #include <sys/ddifm.h>
109 #include <sys/fm/protocol.h>
110 #include <sys/fm/util.h>
111 #include <sys/fm/io/ddi.h>
112
113 /*
114 * autoconfiguration data and routines.
115 */
116 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
117 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
118 static int mptsas_power(dev_info_t *dip, int component, int level);
119
120 /*
121 * cb_ops function
122 */
123 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
124 cred_t *credp, int *rval);
125 #ifdef __sparc
126 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd);
127 #else /* __sparc */
128 static int mptsas_quiesce(dev_info_t *devi);
129 #endif /* __sparc */
130
131 /*
132 * Resource initilaization for hardware
133 */
134 static void mptsas_setup_cmd_reg(mptsas_t *mpt);
135 static void mptsas_disable_bus_master(mptsas_t *mpt);
136 static void mptsas_hba_fini(mptsas_t *mpt);
137 static void mptsas_cfg_fini(mptsas_t *mptsas_blkp);
138 static int mptsas_hba_setup(mptsas_t *mpt);
139 static void mptsas_hba_teardown(mptsas_t *mpt);
140 static int mptsas_config_space_init(mptsas_t *mpt);
141 static void mptsas_config_space_fini(mptsas_t *mpt);
142 static void mptsas_iport_register(mptsas_t *mpt);
143 static int mptsas_smp_setup(mptsas_t *mpt);
144 static void mptsas_smp_teardown(mptsas_t *mpt);
145 static int mptsas_enc_setup(mptsas_t *mpt);
146 static void mptsas_enc_teardown(mptsas_t *mpt);
147 static int mptsas_cache_create(mptsas_t *mpt);
148 static void mptsas_cache_destroy(mptsas_t *mpt);
149 static int mptsas_alloc_request_frames(mptsas_t *mpt);
150 static int mptsas_alloc_sense_bufs(mptsas_t *mpt);
151 static int mptsas_alloc_reply_frames(mptsas_t *mpt);
152 static int mptsas_alloc_free_queue(mptsas_t *mpt);
153 static int mptsas_alloc_post_queue(mptsas_t *mpt);
154 static void mptsas_alloc_reply_args(mptsas_t *mpt);
155 static int mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
156 static void mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
157 static int mptsas_init_chip(mptsas_t *mpt, int first_time);
158 static void mptsas_update_hashtab(mptsas_t *mpt);
159
160 /*
161 * SCSA function prototypes
162 */
163 static int mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
164 static int mptsas_scsi_reset(struct scsi_address *ap, int level);
165 static int mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
166 static int mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
167 static int mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
168 int tgtonly);
169 static void mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
170 static struct scsi_pkt *mptsas_scsi_init_pkt(struct scsi_address *ap,
171 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
172 int tgtlen, int flags, int (*callback)(), caddr_t arg);
173 static void mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
174 static void mptsas_scsi_destroy_pkt(struct scsi_address *ap,
175 struct scsi_pkt *pkt);
176 static int mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
177 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
178 static void mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
179 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
180 static int mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
181 void (*callback)(caddr_t), caddr_t arg);
182 static int mptsas_get_name(struct scsi_device *sd, char *name, int len);
183 static int mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len);
184 static int mptsas_scsi_quiesce(dev_info_t *dip);
185 static int mptsas_scsi_unquiesce(dev_info_t *dip);
186 static int mptsas_bus_config(dev_info_t *pdip, uint_t flags,
187 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
188
189 /*
190 * SMP functions
191 */
192 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
193
194 /*
195 * internal function prototypes.
196 */
197 static void mptsas_list_add(mptsas_t *mpt);
198 static void mptsas_list_del(mptsas_t *mpt);
199
200 static int mptsas_quiesce_bus(mptsas_t *mpt);
201 static int mptsas_unquiesce_bus(mptsas_t *mpt);
202
203 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
204 static void mptsas_free_handshake_msg(mptsas_t *mpt);
205
206 static void mptsas_ncmds_checkdrain(void *arg);
207
208 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
209 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
210 static int mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
211 static void mptsas_accept_tx_waitq(mptsas_t *mpt);
212
213 static int mptsas_do_detach(dev_info_t *dev);
214 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
215 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
216 struct scsi_pkt *pkt);
217 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp);
218
219 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
220 static void mptsas_handle_event(void *args);
221 static int mptsas_handle_event_sync(void *args);
222 static void mptsas_handle_dr(void *args);
223 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
224 dev_info_t *pdip);
225
226 static void mptsas_restart_cmd(void *);
227
228 static void mptsas_flush_hba(mptsas_t *mpt);
229 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
230 uint8_t tasktype);
231 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
232 uchar_t reason, uint_t stat);
233
234 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
235 static void mptsas_process_intr(mptsas_t *mpt,
236 pMpi2ReplyDescriptorsUnion_t reply_desc_union);
237 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
238 pMpi2ReplyDescriptorsUnion_t reply_desc);
239 static void mptsas_handle_address_reply(mptsas_t *mpt,
240 pMpi2ReplyDescriptorsUnion_t reply_desc);
241 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
242 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
243 uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
244
245 static void mptsas_watch(void *arg);
246 static void mptsas_watchsubr(mptsas_t *mpt);
247 static void mptsas_cmd_timeout(mptsas_t *mpt, mptsas_target_t *ptgt);
248
249 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
250 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
251 uint8_t *data, uint32_t request_size, uint32_t reply_size,
252 uint32_t data_size, uint32_t direction, uint8_t *dataout,
253 uint32_t dataout_size, short timeout, int mode);
254 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
255
256 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt,
257 uint32_t unique_id);
258 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd);
259 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt,
260 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
261 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt,
262 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
263 uint32_t diag_type);
264 static int mptsas_diag_register(mptsas_t *mpt,
265 mptsas_fw_diag_register_t *diag_register, uint32_t *return_code);
266 static int mptsas_diag_unregister(mptsas_t *mpt,
267 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
268 static int mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
269 uint32_t *return_code);
270 static int mptsas_diag_read_buffer(mptsas_t *mpt,
271 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
272 uint32_t *return_code, int ioctl_mode);
273 static int mptsas_diag_release(mptsas_t *mpt,
274 mptsas_fw_diag_release_t *diag_release, uint32_t *return_code);
275 static int mptsas_do_diag_action(mptsas_t *mpt, uint32_t action,
276 uint8_t *diag_action, uint32_t length, uint32_t *return_code,
277 int ioctl_mode);
278 static int mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *data,
279 int mode);
280
281 static int mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
282 int cmdlen, int tgtlen, int statuslen, int kf);
283 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
284
285 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
286 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
287
288 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
289 int kmflags);
290 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
291
292 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
293 mptsas_cmd_t *cmd);
294 static void mptsas_check_task_mgt(mptsas_t *mpt,
295 pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
296 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
297 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
298 int *resid);
299
300 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
301 static void mptsas_free_active_slots(mptsas_t *mpt);
302 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
303
304 static void mptsas_restart_hba(mptsas_t *mpt);
305 static void mptsas_restart_waitq(mptsas_t *mpt);
306
307 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
308 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);
309 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
310
311 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
312 static void mptsas_doneq_empty(mptsas_t *mpt);
313 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
314
315 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
316 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
317 static mptsas_cmd_t *mptsas_tx_waitq_rm(mptsas_t *mpt);
318 static void mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
319
320
321 static void mptsas_start_watch_reset_delay();
322 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
323 static void mptsas_watch_reset_delay(void *arg);
324 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
325
326 /*
327 * helper functions
328 */
329 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
330
331 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
332 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
333 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
334 int lun);
335 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
336 int lun);
337 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
338 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
339
340 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
341 int *lun);
342 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
343
344 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt,
345 mptsas_phymask_t phymask, uint8_t phy);
346 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt,
347 mptsas_phymask_t phymask, uint64_t wwid);
348 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt,
349 mptsas_phymask_t phymask, uint64_t wwid);
350
351 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
352 uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
353
354 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
355 uint16_t *handle, mptsas_target_t **pptgt);
356 static void mptsas_update_phymask(mptsas_t *mpt);
357
358 static int mptsas_flush_led_status(mptsas_t *mpt, mptsas_enclosure_t *mep,
359 uint16_t idx);
360 static int mptsas_send_sep(mptsas_t *mpt, mptsas_enclosure_t *mep, uint16_t idx,
361 uint32_t *status, uint8_t cmd);
362 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
363 mptsas_phymask_t *phymask);
364 static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr,
365 mptsas_phymask_t phymask);
366
367
368 /*
369 * Enumeration / DR functions
370 */
371 static void mptsas_config_all(dev_info_t *pdip);
372 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
373 dev_info_t **lundip);
374 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
375 dev_info_t **lundip);
376
377 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
378 static int mptsas_offline_target(dev_info_t *pdip, char *name);
379
380 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
381 dev_info_t **dip);
382
383 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
384 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
385 dev_info_t **dip, mptsas_target_t *ptgt);
386
387 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
388 dev_info_t **dip, mptsas_target_t *ptgt, int lun);
389
390 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
391 char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
392 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
393 char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
394 int lun);
395
396 static void mptsas_offline_missed_luns(dev_info_t *pdip,
397 uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
398 static int mptsas_offline_lun(dev_info_t *rdip, mdi_pathinfo_t *rpip);
399
400 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
401 dev_info_t **smp_dip);
402 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node);
403
404 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
405 int mode, int *rval);
406 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
407 int mode, int *rval);
408 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
409 int mode, int *rval);
410 static void mptsas_record_event(void *args);
411 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
412 int mode);
413
414 mptsas_target_t *mptsas_tgt_alloc(refhash_t *, uint16_t, uint64_t,
415 uint32_t, mptsas_phymask_t, uint8_t);
416 static mptsas_smp_t *mptsas_smp_alloc(mptsas_t *, mptsas_smp_t *);
417 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
418 dev_info_t **smp_dip);
419
420 /*
421 * Power management functions
422 */
423 static int mptsas_get_pci_cap(mptsas_t *mpt);
424 static int mptsas_init_pm(mptsas_t *mpt);
425
426 /*
427 * MPT MSI tunable:
428 *
429 * By default MSI is enabled on all supported platforms.
430 */
431 boolean_t mptsas_enable_msi = B_TRUE;
432 boolean_t mptsas_physical_bind_failed_page_83 = B_FALSE;
433
434 /*
435 * Global switch for use of MPI2.5 FAST PATH.
436 * We don't really know what FAST PATH actually does, so if it is suspected
437 * to cause problems it can be turned off by setting this variable to B_FALSE.
438 */
439 boolean_t mptsas_use_fastpath = B_TRUE;
440
441 static int mptsas_register_intrs(mptsas_t *);
442 static void mptsas_unregister_intrs(mptsas_t *);
443 static int mptsas_add_intrs(mptsas_t *, int);
444 static void mptsas_rem_intrs(mptsas_t *);
445
446 /*
447 * FMA Prototypes
448 */
449 static void mptsas_fm_init(mptsas_t *mpt);
450 static void mptsas_fm_fini(mptsas_t *mpt);
451 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
452
453 extern pri_t minclsyspri, maxclsyspri;
454
455 /*
456 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
457 * under this device that the paths to a physical device are created when
458 * MPxIO is used.
459 */
460 extern dev_info_t *scsi_vhci_dip;
461
462 /*
463 * Tunable timeout value for Inquiry VPD page 0x83
464 * By default the value is 30 seconds.
465 */
466 int mptsas_inq83_retry_timeout = 30;
467
468 /*
469 * This is used to allocate memory for message frame storage, not for
470 * data I/O DMA. All message frames must be stored in the first 4G of
471 * physical memory.
472 */
473 ddi_dma_attr_t mptsas_dma_attrs = {
474 DMA_ATTR_V0, /* attribute layout version */
475 0x0ull, /* address low - should be 0 (longlong) */
476 0xffffffffull, /* address high - 32-bit max range */
477 0x00ffffffull, /* count max - max DMA object size */
478 4, /* allocation alignment requirements */
479 0x78, /* burstsizes - binary encoded values */
480 1, /* minxfer - gran. of DMA engine */
481 0x00ffffffull, /* maxxfer - gran. of DMA engine */
482 0xffffffffull, /* max segment size (DMA boundary) */
483 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
484 512, /* granularity - device transfer size */
485 0 /* flags, set to 0 */
486 };
487
488 /*
489 * This is used for data I/O DMA memory allocation. (full 64-bit DMA
490 * physical addresses are supported.)
491 */
492 ddi_dma_attr_t mptsas_dma_attrs64 = {
493 DMA_ATTR_V0, /* attribute layout version */
494 0x0ull, /* address low - should be 0 (longlong) */
495 0xffffffffffffffffull, /* address high - 64-bit max */
496 0x00ffffffull, /* count max - max DMA object size */
497 4, /* allocation alignment requirements */
498 0x78, /* burstsizes - binary encoded values */
499 1, /* minxfer - gran. of DMA engine */
500 0x00ffffffull, /* maxxfer - gran. of DMA engine */
501 0xffffffffull, /* max segment size (DMA boundary) */
502 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
503 512, /* granularity - device transfer size */
504 0 /* flags, set to 0 */
505 };
506
507 ddi_device_acc_attr_t mptsas_dev_attr = {
508 DDI_DEVICE_ATTR_V1,
509 DDI_STRUCTURE_LE_ACC,
510 DDI_STRICTORDER_ACC,
511 DDI_DEFAULT_ACC
512 };
513
514 static struct cb_ops mptsas_cb_ops = {
515 scsi_hba_open, /* open */
516 scsi_hba_close, /* close */
517 nodev, /* strategy */
518 nodev, /* print */
519 nodev, /* dump */
520 nodev, /* read */
521 nodev, /* write */
522 mptsas_ioctl, /* ioctl */
523 nodev, /* devmap */
524 nodev, /* mmap */
525 nodev, /* segmap */
526 nochpoll, /* chpoll */
527 ddi_prop_op, /* cb_prop_op */
528 NULL, /* streamtab */
529 D_MP, /* cb_flag */
530 CB_REV, /* rev */
531 nodev, /* aread */
532 nodev /* awrite */
533 };
534
535 static struct dev_ops mptsas_ops = {
536 DEVO_REV, /* devo_rev, */
537 0, /* refcnt */
538 ddi_no_info, /* info */
539 nulldev, /* identify */
540 nulldev, /* probe */
541 mptsas_attach, /* attach */
542 mptsas_detach, /* detach */
543 #ifdef __sparc
544 mptsas_reset,
545 #else
546 nodev, /* reset */
547 #endif /* __sparc */
548 &mptsas_cb_ops, /* driver operations */
549 NULL, /* bus operations */
550 mptsas_power, /* power management */
551 #ifdef __sparc
552 ddi_quiesce_not_needed
553 #else
554 mptsas_quiesce /* quiesce */
555 #endif /* __sparc */
556 };
557
558
559 #define MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24"
560
561 static struct modldrv modldrv = {
562 &mod_driverops, /* Type of module. This one is a driver */
563 MPTSAS_MOD_STRING, /* Name of the module. */
564 &mptsas_ops, /* driver ops */
565 };
566
567 static struct modlinkage modlinkage = {
568 MODREV_1, &modldrv, NULL
569 };
570 #define TARGET_PROP "target"
571 #define LUN_PROP "lun"
572 #define LUN64_PROP "lun64"
573 #define SAS_PROP "sas-mpt"
574 #define MDI_GUID "wwn"
575 #define NDI_GUID "guid"
576 #define MPTSAS_DEV_GONE "mptsas_dev_gone"
577
578 /*
579 * Local static data
580 */
581 #if defined(MPTSAS_DEBUG)
582 /*
583 * Flags to indicate which debug messages are to be printed and which go to the
584 * debug log ring buffer. Default is to not print anything, and to log
585 * everything except the watchsubr() output which normally happens every second.
586 */
587 uint32_t mptsas_debugprt_flags = 0x0;
588 uint32_t mptsas_debuglog_flags = ~(1U << 30);
589 #endif /* defined(MPTSAS_DEBUG) */
590 uint32_t mptsas_debug_resets = 0;
591
592 static kmutex_t mptsas_global_mutex;
593 static void *mptsas_state; /* soft state ptr */
594 static krwlock_t mptsas_global_rwlock;
595
596 static kmutex_t mptsas_log_mutex;
597 static char mptsas_log_buf[256];
598 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex, mptsas_log_buf))
599
600 static mptsas_t *mptsas_head, *mptsas_tail;
601 static clock_t mptsas_scsi_watchdog_tick;
602 static clock_t mptsas_tick;
603 static timeout_id_t mptsas_reset_watch;
604 static timeout_id_t mptsas_timeout_id;
605 static int mptsas_timeouts_enabled = 0;
606
607 /*
608 * Default length for extended auto request sense buffers.
609 * All sense buffers need to be under the same alloc because there
610 * is only one common top 32bits (of 64bits) address register.
611 * Most requests only require 32 bytes, but some request >256.
612 * We use rmalloc()/rmfree() on this additional memory to manage the
613 * "extended" requests.
614 */
615 int mptsas_extreq_sense_bufsize = 256*64;
616
617 /*
618 * We believe that all software resrictions of having to run with DMA
619 * attributes to limit allocation to the first 4G are removed.
620 * However, this flag remains to enable quick switchback should suspicious
621 * problems emerge.
622 * Note that scsi_alloc_consistent_buf() does still adhere to allocating
623 * 32 bit addressable memory, but we can cope if that is changed now.
624 */
625 int mptsas_use_64bit_msgaddr = 1;
626
627 /*
628 * warlock directives
629 */
630 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \
631 mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status))
632 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt))
633 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
634 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private))
635 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private))
636
637 /*
638 * SM - HBA statics
639 */
640 char *mptsas_driver_rev = MPTSAS_MOD_STRING;
641
642 #ifdef MPTSAS_DEBUG
643 void debug_enter(char *);
644 #endif
645
646 /*
647 * Notes:
648 * - scsi_hba_init(9F) initializes SCSI HBA modules
649 * - must call scsi_hba_fini(9F) if modload() fails
650 */
651 int
652 _init(void)
653 {
654 int status;
655 /* CONSTCOND */
656 ASSERT(NO_COMPETING_THREADS);
657
658 NDBG0(("_init"));
659
660 status = ddi_soft_state_init(&mptsas_state, MPTSAS_SIZE,
661 MPTSAS_INITIAL_SOFT_SPACE);
662 if (status != 0) {
663 return (status);
664 }
665
666 if ((status = scsi_hba_init(&modlinkage)) != 0) {
667 ddi_soft_state_fini(&mptsas_state);
668 return (status);
669 }
670
671 mutex_init(&mptsas_global_mutex, NULL, MUTEX_DRIVER, NULL);
672 rw_init(&mptsas_global_rwlock, NULL, RW_DRIVER, NULL);
673 mutex_init(&mptsas_log_mutex, NULL, MUTEX_DRIVER, NULL);
674
675 if ((status = mod_install(&modlinkage)) != 0) {
676 mutex_destroy(&mptsas_log_mutex);
677 rw_destroy(&mptsas_global_rwlock);
678 mutex_destroy(&mptsas_global_mutex);
679 ddi_soft_state_fini(&mptsas_state);
680 scsi_hba_fini(&modlinkage);
681 }
682
683 return (status);
684 }
685
686 /*
687 * Notes:
688 * - scsi_hba_fini(9F) uninitializes SCSI HBA modules
689 */
690 int
691 _fini(void)
692 {
693 int status;
694 /* CONSTCOND */
695 ASSERT(NO_COMPETING_THREADS);
696
697 NDBG0(("_fini"));
698
699 if ((status = mod_remove(&modlinkage)) == 0) {
700 ddi_soft_state_fini(&mptsas_state);
701 scsi_hba_fini(&modlinkage);
702 mutex_destroy(&mptsas_global_mutex);
703 rw_destroy(&mptsas_global_rwlock);
704 mutex_destroy(&mptsas_log_mutex);
705 }
706 return (status);
707 }
708
709 /*
710 * The loadable-module _info(9E) entry point
711 */
712 int
713 _info(struct modinfo *modinfop)
714 {
715 /* CONSTCOND */
716 ASSERT(NO_COMPETING_THREADS);
717 NDBG0(("mptsas _info"));
718
719 return (mod_info(&modlinkage, modinfop));
720 }
721
722 static int
723 mptsas_target_eval_devhdl(const void *op, void *arg)
724 {
725 uint16_t dh = *(uint16_t *)arg;
726 const mptsas_target_t *tp = op;
727
728 return ((int)tp->m_devhdl - (int)dh);
729 }
730
731 static int
732 mptsas_target_eval_nowwn(const void *op, void *arg)
733 {
734 uint8_t phy = *(uint8_t *)arg;
735 const mptsas_target_t *tp = op;
736
737 if (tp->m_addr.mta_wwn != 0)
738 return (-1);
739
740 return ((int)tp->m_phynum - (int)phy);
741 }
742
743 static int
744 mptsas_smp_eval_devhdl(const void *op, void *arg)
745 {
746 uint16_t dh = *(uint16_t *)arg;
747 const mptsas_smp_t *sp = op;
748
749 return ((int)sp->m_devhdl - (int)dh);
750 }
751
752 static uint64_t
753 mptsas_target_addr_hash(const void *tp)
754 {
755 const mptsas_target_addr_t *tap = tp;
756
757 return ((tap->mta_wwn & 0xffffffffffffULL) |
758 ((uint64_t)tap->mta_phymask << 48));
759 }
760
761 static int
762 mptsas_target_addr_cmp(const void *a, const void *b)
763 {
764 const mptsas_target_addr_t *aap = a;
765 const mptsas_target_addr_t *bap = b;
766
767 if (aap->mta_wwn < bap->mta_wwn)
768 return (-1);
769 if (aap->mta_wwn > bap->mta_wwn)
770 return (1);
771 return ((int)bap->mta_phymask - (int)aap->mta_phymask);
772 }
773
774 static uint64_t
775 mptsas_tmp_target_hash(const void *tp)
776 {
777 return ((uint64_t)(uintptr_t)tp);
778 }
779
780 static int
781 mptsas_tmp_target_cmp(const void *a, const void *b)
782 {
783 if (a > b)
784 return (1);
785 if (b < a)
786 return (-1);
787
788 return (0);
789 }
790
791 static void
792 mptsas_target_free(void *op)
793 {
794 kmem_free(op, sizeof (mptsas_target_t));
795 }
796
797 static void
798 mptsas_smp_free(void *op)
799 {
800 kmem_free(op, sizeof (mptsas_smp_t));
801 }
802
803 static void
804 mptsas_destroy_hashes(mptsas_t *mpt)
805 {
806 mptsas_target_t *tp;
807 mptsas_smp_t *sp;
808
809 for (tp = refhash_first(mpt->m_targets); tp != NULL;
810 tp = refhash_next(mpt->m_targets, tp)) {
811 refhash_remove(mpt->m_targets, tp);
812 }
813 for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
814 sp = refhash_next(mpt->m_smp_targets, sp)) {
815 refhash_remove(mpt->m_smp_targets, sp);
816 }
817 refhash_destroy(mpt->m_tmp_targets);
818 refhash_destroy(mpt->m_targets);
819 refhash_destroy(mpt->m_smp_targets);
820 mpt->m_targets = NULL;
821 mpt->m_smp_targets = NULL;
822 }
823
824 static int
825 mptsas_iport_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
826 {
827 dev_info_t *pdip;
828 mptsas_t *mpt;
829 scsi_hba_tran_t *hba_tran;
830 char *iport = NULL;
831 char phymask[MPTSAS_MAX_PHYS];
832 mptsas_phymask_t phy_mask = 0;
833 int dynamic_port = 0;
834 uint32_t page_address;
835 char initiator_wwnstr[MPTSAS_WWN_STRLEN];
836 int rval = DDI_FAILURE;
837 int i = 0;
838 uint8_t numphys = 0;
839 uint8_t phy_id;
840 uint8_t phy_port = 0;
841 uint16_t attached_devhdl = 0;
842 uint32_t dev_info;
843 uint64_t attached_sas_wwn;
844 uint16_t dev_hdl;
845 uint16_t pdev_hdl;
846 uint16_t bay_num, enclosure, io_flags;
847 char attached_wwnstr[MPTSAS_WWN_STRLEN];
848
849 /* CONSTCOND */
850 ASSERT(NO_COMPETING_THREADS);
851
852 switch (cmd) {
853 case DDI_ATTACH:
854 break;
855
856 case DDI_RESUME:
857 /*
858 * If this a scsi-iport node, nothing to do here.
859 */
860 return (DDI_SUCCESS);
861
862 default:
863 return (DDI_FAILURE);
864 }
865
866 pdip = ddi_get_parent(dip);
867
868 if ((hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SCSI_DEVICE)) ==
869 NULL) {
870 cmn_err(CE_WARN, "Failed attach iport because fail to "
871 "get tran vector for the HBA node");
872 return (DDI_FAILURE);
873 }
874
875 mpt = TRAN2MPT(hba_tran);
876 ASSERT(mpt != NULL);
877 if (mpt == NULL)
878 return (DDI_FAILURE);
879
880 if ((hba_tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) ==
881 NULL) {
882 mptsas_log(mpt, CE_WARN, "Failed attach iport because fail to "
883 "get tran vector for the iport node");
884 return (DDI_FAILURE);
885 }
886
887 /*
888 * Overwrite parent's tran_hba_private to iport's tran vector
889 */
890 hba_tran->tran_hba_private = mpt;
891
892 ddi_report_dev(dip);
893
894 /*
895 * Get SAS address for initiator port according dev_handle
896 */
897 iport = ddi_get_name_addr(dip);
898 if (iport && strncmp(iport, "v0", 2) == 0) {
899 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
900 MPTSAS_VIRTUAL_PORT, 1) !=
901 DDI_PROP_SUCCESS) {
902 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
903 MPTSAS_VIRTUAL_PORT);
904 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
905 "prop update failed");
906 return (DDI_FAILURE);
907 }
908 return (DDI_SUCCESS);
909 }
910
911 mutex_enter(&mpt->m_mutex);
912 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
913 bzero(phymask, sizeof (phymask));
914 (void) sprintf(phymask,
915 "%x", mpt->m_phy_info[i].phy_mask);
916 if (strcmp(phymask, iport) == 0) {
917 break;
918 }
919 }
920
921 if (i == MPTSAS_MAX_PHYS) {
922 mptsas_log(mpt, CE_WARN, "Failed attach port %s because port"
923 "seems not exist", iport);
924 mutex_exit(&mpt->m_mutex);
925 return (DDI_FAILURE);
926 }
927
928 phy_mask = mpt->m_phy_info[i].phy_mask;
929
930 if (mpt->m_phy_info[i].port_flags & AUTO_PORT_CONFIGURATION)
931 dynamic_port = 1;
932 else
933 dynamic_port = 0;
934
935 /*
936 * Update PHY info for smhba
937 */
938 if (mptsas_smhba_phy_init(mpt)) {
939 mutex_exit(&mpt->m_mutex);
940 mptsas_log(mpt, CE_WARN, "mptsas phy update "
941 "failed");
942 return (DDI_FAILURE);
943 }
944
945 mutex_exit(&mpt->m_mutex);
946
947 numphys = 0;
948 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
949 if ((phy_mask >> i) & 0x01) {
950 numphys++;
951 }
952 }
953
954 bzero(initiator_wwnstr, sizeof (initiator_wwnstr));
955 (void) sprintf(initiator_wwnstr, "w%016"PRIx64,
956 mpt->un.m_base_wwid);
957
958 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
959 SCSI_ADDR_PROP_INITIATOR_PORT, initiator_wwnstr) !=
960 DDI_PROP_SUCCESS) {
961 (void) ddi_prop_remove(DDI_DEV_T_NONE,
962 dip, SCSI_ADDR_PROP_INITIATOR_PORT);
963 mptsas_log(mpt, CE_WARN, "mptsas Initiator port "
964 "prop update failed");
965 return (DDI_FAILURE);
966 }
967 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
968 MPTSAS_NUM_PHYS, numphys) !=
969 DDI_PROP_SUCCESS) {
970 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, MPTSAS_NUM_PHYS);
971 return (DDI_FAILURE);
972 }
973
974 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
975 "phymask", phy_mask) !=
976 DDI_PROP_SUCCESS) {
977 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "phymask");
978 mptsas_log(mpt, CE_WARN, "mptsas phy mask "
979 "prop update failed");
980 return (DDI_FAILURE);
981 }
982
983 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
984 "dynamic-port", dynamic_port) !=
985 DDI_PROP_SUCCESS) {
986 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "dynamic-port");
987 mptsas_log(mpt, CE_WARN, "mptsas dynamic port "
988 "prop update failed");
989 return (DDI_FAILURE);
990 }
991 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
992 MPTSAS_VIRTUAL_PORT, 0) !=
993 DDI_PROP_SUCCESS) {
994 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
995 MPTSAS_VIRTUAL_PORT);
996 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
997 "prop update failed");
998 return (DDI_FAILURE);
999 }
1000 mptsas_smhba_set_all_phy_props(mpt, dip, numphys, phy_mask,
1001 &attached_devhdl);
1002
1003 mutex_enter(&mpt->m_mutex);
1004 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
1005 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)attached_devhdl;
1006 rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
1007 &attached_sas_wwn, &dev_info, &phy_port, &phy_id,
1008 &pdev_hdl, &bay_num, &enclosure, &io_flags);
1009 if (rval != DDI_SUCCESS) {
1010 mptsas_log(mpt, CE_WARN,
1011 "Failed to get device page0 for handle:%d",
1012 attached_devhdl);
1013 mutex_exit(&mpt->m_mutex);
1014 return (DDI_FAILURE);
1015 }
1016
1017 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1018 bzero(phymask, sizeof (phymask));
1019 (void) sprintf(phymask, "%x", mpt->m_phy_info[i].phy_mask);
1020 if (strcmp(phymask, iport) == 0) {
1021 (void) sprintf(&mpt->m_phy_info[i].smhba_info.path[0],
1022 "%x",
1023 mpt->m_phy_info[i].phy_mask);
1024 }
1025 }
1026 mutex_exit(&mpt->m_mutex);
1027
1028 bzero(attached_wwnstr, sizeof (attached_wwnstr));
1029 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
1030 attached_sas_wwn);
1031 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
1032 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
1033 DDI_PROP_SUCCESS) {
1034 (void) ddi_prop_remove(DDI_DEV_T_NONE,
1035 dip, SCSI_ADDR_PROP_ATTACHED_PORT);
1036 return (DDI_FAILURE);
1037 }
1038
1039 /* Create kstats for each phy on this iport */
1040
1041 mptsas_create_phy_stats(mpt, iport, dip);
1042
1043 /*
1044 * register sas hba iport with mdi (MPxIO/vhci)
1045 */
1046 if (mdi_phci_register(MDI_HCI_CLASS_SCSI,
1047 dip, 0) == MDI_SUCCESS) {
1048 mpt->m_mpxio_enable = TRUE;
1049 }
1050 return (DDI_SUCCESS);
1051 }
1052
1053 /*
1054 * Notes:
1055 * Set up all device state and allocate data structures,
1056 * mutexes, condition variables, etc. for device operation.
1057 * Add interrupts needed.
1058 * Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
1059 */
1060 static int
1061 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1062 {
1063 mptsas_t *mpt = NULL;
1064 int instance, i, j;
1065 int doneq_thread_num;
1066 char intr_added = 0;
1067 char map_setup = 0;
1068 char config_setup = 0;
1069 char hba_attach_setup = 0;
1070 char smp_attach_setup = 0;
1071 char enc_attach_setup = 0;
1072 char mutex_init_done = 0;
1073 char event_taskq_create = 0;
1074 char dr_taskq_create = 0;
1075 char doneq_thread_create = 0;
1076 char added_watchdog = 0;
1077 scsi_hba_tran_t *hba_tran;
1078 uint_t mem_bar = MEM_SPACE;
1079 int rval = DDI_FAILURE;
1080
1081 /* CONSTCOND */
1082 ASSERT(NO_COMPETING_THREADS);
1083
1084 if (scsi_hba_iport_unit_address(dip)) {
1085 return (mptsas_iport_attach(dip, cmd));
1086 }
1087
1088 switch (cmd) {
1089 case DDI_ATTACH:
1090 break;
1091
1092 case DDI_RESUME:
1093 if ((hba_tran = ddi_get_driver_private(dip)) == NULL)
1094 return (DDI_FAILURE);
1095
1096 mpt = TRAN2MPT(hba_tran);
1097
1098 if (!mpt) {
1099 return (DDI_FAILURE);
1100 }
1101
1102 /*
1103 * Reset hardware and softc to "no outstanding commands"
1104 * Note that a check condition can result on first command
1105 * to a target.
1106 */
1107 mutex_enter(&mpt->m_mutex);
1108
1109 /*
1110 * raise power.
1111 */
1112 if (mpt->m_options & MPTSAS_OPT_PM) {
1113 mutex_exit(&mpt->m_mutex);
1114 (void) pm_busy_component(dip, 0);
1115 rval = pm_power_has_changed(dip, 0, PM_LEVEL_D0);
1116 if (rval == DDI_SUCCESS) {
1117 mutex_enter(&mpt->m_mutex);
1118 } else {
1119 /*
1120 * The pm_raise_power() call above failed,
1121 * and that can only occur if we were unable
1122 * to reset the hardware. This is probably
1123 * due to unhealty hardware, and because
1124 * important filesystems(such as the root
1125 * filesystem) could be on the attached disks,
1126 * it would not be a good idea to continue,
1127 * as we won't be entirely certain we are
1128 * writing correct data. So we panic() here
1129 * to not only prevent possible data corruption,
1130 * but to give developers or end users a hope
1131 * of identifying and correcting any problems.
1132 */
1133 fm_panic("mptsas could not reset hardware "
1134 "during resume");
1135 }
1136 }
1137
1138 mpt->m_suspended = 0;
1139
1140 /*
1141 * Reinitialize ioc
1142 */
1143 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1144 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
1145 mutex_exit(&mpt->m_mutex);
1146 if (mpt->m_options & MPTSAS_OPT_PM) {
1147 (void) pm_idle_component(dip, 0);
1148 }
1149 fm_panic("mptsas init chip fail during resume");
1150 }
1151 /*
1152 * mptsas_update_driver_data needs interrupts so enable them
1153 * first.
1154 */
1155 MPTSAS_ENABLE_INTR(mpt);
1156 mptsas_update_driver_data(mpt);
1157
1158 /* start requests, if possible */
1159 mptsas_restart_hba(mpt);
1160
1161 mutex_exit(&mpt->m_mutex);
1162
1163 /*
1164 * Restart watch thread
1165 */
1166 mutex_enter(&mptsas_global_mutex);
1167 if (mptsas_timeout_id == 0) {
1168 mptsas_timeout_id = timeout(mptsas_watch, NULL,
1169 mptsas_tick);
1170 mptsas_timeouts_enabled = 1;
1171 }
1172 mutex_exit(&mptsas_global_mutex);
1173
1174 /* report idle status to pm framework */
1175 if (mpt->m_options & MPTSAS_OPT_PM) {
1176 (void) pm_idle_component(dip, 0);
1177 }
1178
1179 return (DDI_SUCCESS);
1180
1181 default:
1182 return (DDI_FAILURE);
1183
1184 }
1185
1186 instance = ddi_get_instance(dip);
1187
1188 /*
1189 * Allocate softc information.
1190 */
1191 if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
1192 mptsas_log(NULL, CE_WARN,
1193 "mptsas%d: cannot allocate soft state", instance);
1194 goto fail;
1195 }
1196
1197 mpt = ddi_get_soft_state(mptsas_state, instance);
1198
1199 if (mpt == NULL) {
1200 mptsas_log(NULL, CE_WARN,
1201 "mptsas%d: cannot get soft state", instance);
1202 goto fail;
1203 }
1204
1205 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
1206 scsi_size_clean(dip);
1207
1208 mpt->m_dip = dip;
1209 mpt->m_instance = instance;
1210
1211 /* Make a per-instance copy of the structures */
1212 mpt->m_io_dma_attr = mptsas_dma_attrs64;
1213 if (mptsas_use_64bit_msgaddr) {
1214 mpt->m_msg_dma_attr = mptsas_dma_attrs64;
1215 } else {
1216 mpt->m_msg_dma_attr = mptsas_dma_attrs;
1217 }
1218 mpt->m_reg_acc_attr = mptsas_dev_attr;
1219 mpt->m_dev_acc_attr = mptsas_dev_attr;
1220
1221 /*
1222 * Size of individual request sense buffer
1223 */
1224 mpt->m_req_sense_size = EXTCMDS_STATUS_SIZE;
1225
1226 /*
1227 * Initialize FMA
1228 */
1229 mpt->m_fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, mpt->m_dip,
1230 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
1231 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1232 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1233
1234 mptsas_fm_init(mpt);
1235
1236 if (mptsas_alloc_handshake_msg(mpt,
1237 sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) {
1238 mptsas_log(mpt, CE_WARN, "cannot initialize handshake msg.");
1239 goto fail;
1240 }
1241
1242 /*
1243 * Setup configuration space
1244 */
1245 if (mptsas_config_space_init(mpt) == FALSE) {
1246 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init failed");
1247 goto fail;
1248 }
1249 config_setup++;
1250
1251 if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&mpt->m_reg,
1252 0, 0, &mpt->m_reg_acc_attr, &mpt->m_datap) != DDI_SUCCESS) {
1253 mptsas_log(mpt, CE_WARN, "map setup failed");
1254 goto fail;
1255 }
1256 map_setup++;
1257
1258 /*
1259 * A taskq is created for dealing with the event handler
1260 */
1261 if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
1262 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1263 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
1264 goto fail;
1265 }
1266 event_taskq_create++;
1267
1268 /*
1269 * A taskq is created for dealing with dr events
1270 */
1271 if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1272 "mptsas_dr_taskq",
1273 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1274 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1275 "failed");
1276 goto fail;
1277 }
1278 dr_taskq_create++;
1279
1280 mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1281 0, "mptsas_doneq_thread_threshold_prop", 10);
1282 mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1283 0, "mptsas_doneq_length_threshold_prop", 8);
1284 mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1285 0, "mptsas_doneq_thread_n_prop", 8);
1286
1287 if (mpt->m_doneq_thread_n) {
1288 cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1289 mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1290
1291 mutex_enter(&mpt->m_doneq_mutex);
1292 mpt->m_doneq_thread_id =
1293 kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1294 * mpt->m_doneq_thread_n, KM_SLEEP);
1295
1296 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1297 cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1298 CV_DRIVER, NULL);
1299 mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1300 MUTEX_DRIVER, NULL);
1301 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1302 mpt->m_doneq_thread_id[j].flag |=
1303 MPTSAS_DONEQ_THREAD_ACTIVE;
1304 mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1305 mpt->m_doneq_thread_id[j].arg.t = j;
1306 mpt->m_doneq_thread_id[j].threadp =
1307 thread_create(NULL, 0, mptsas_doneq_thread,
1308 &mpt->m_doneq_thread_id[j].arg,
1309 0, &p0, TS_RUN, minclsyspri);
1310 mpt->m_doneq_thread_id[j].donetail =
1311 &mpt->m_doneq_thread_id[j].doneq;
1312 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1313 }
1314 mutex_exit(&mpt->m_doneq_mutex);
1315 doneq_thread_create++;
1316 }
1317
1318 /*
1319 * Disable hardware interrupt since we're not ready to
1320 * handle it yet.
1321 */
1322 MPTSAS_DISABLE_INTR(mpt);
1323 if (mptsas_register_intrs(mpt) == FALSE)
1324 goto fail;
1325 intr_added++;
1326
1327 /* Initialize mutex used in interrupt handler */
1328 mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1329 DDI_INTR_PRI(mpt->m_intr_pri));
1330 mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1331 mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1332 DDI_INTR_PRI(mpt->m_intr_pri));
1333 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1334 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1335 NULL, MUTEX_DRIVER,
1336 DDI_INTR_PRI(mpt->m_intr_pri));
1337 }
1338
1339 cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1340 cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1341 cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1342 cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1343 cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1344 cv_init(&mpt->m_extreq_sense_refcount_cv, NULL, CV_DRIVER, NULL);
1345 mutex_init_done++;
1346
1347 mutex_enter(&mpt->m_mutex);
1348 /*
1349 * Initialize power management component
1350 */
1351 if (mpt->m_options & MPTSAS_OPT_PM) {
1352 if (mptsas_init_pm(mpt)) {
1353 mutex_exit(&mpt->m_mutex);
1354 mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1355 "failed");
1356 goto fail;
1357 }
1358 }
1359
1360 /*
1361 * Initialize chip using Message Unit Reset, if allowed
1362 */
1363 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1364 if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1365 mutex_exit(&mpt->m_mutex);
1366 mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1367 goto fail;
1368 }
1369
1370 mpt->m_targets = refhash_create(MPTSAS_TARGET_BUCKET_COUNT,
1371 mptsas_target_addr_hash, mptsas_target_addr_cmp,
1372 mptsas_target_free, sizeof (mptsas_target_t),
1373 offsetof(mptsas_target_t, m_link),
1374 offsetof(mptsas_target_t, m_addr), KM_SLEEP);
1375
1376 /*
1377 * The refhash for temporary targets uses the address of the target
1378 * struct itself as tag, so the tag offset is 0. See the implementation
1379 * of mptsas_tmp_target_hash() and mptsas_tmp_target_cmp().
1380 */
1381 mpt->m_tmp_targets = refhash_create(MPTSAS_TMP_TARGET_BUCKET_COUNT,
1382 mptsas_tmp_target_hash, mptsas_tmp_target_cmp,
1383 mptsas_target_free, sizeof (mptsas_target_t),
1384 offsetof(mptsas_target_t, m_link), 0, KM_SLEEP);
1385
1386 /*
1387 * Fill in the phy_info structure and get the base WWID
1388 */
1389 if (mptsas_get_manufacture_page5(mpt) == DDI_FAILURE) {
1390 mptsas_log(mpt, CE_WARN,
1391 "mptsas_get_manufacture_page5 failed!");
1392 goto fail;
1393 }
1394
1395 if (mptsas_get_sas_io_unit_page_hndshk(mpt)) {
1396 mptsas_log(mpt, CE_WARN,
1397 "mptsas_get_sas_io_unit_page_hndshk failed!");
1398 goto fail;
1399 }
1400
1401 if (mptsas_get_manufacture_page0(mpt) == DDI_FAILURE) {
1402 mptsas_log(mpt, CE_WARN,
1403 "mptsas_get_manufacture_page0 failed!");
1404 goto fail;
1405 }
1406
1407 mutex_exit(&mpt->m_mutex);
1408
1409 /*
1410 * Register the iport for multiple port HBA
1411 */
1412 mptsas_iport_register(mpt);
1413
1414 /*
1415 * initialize SCSI HBA transport structure
1416 */
1417 if (mptsas_hba_setup(mpt) == FALSE)
1418 goto fail;
1419 hba_attach_setup++;
1420
1421 if (mptsas_smp_setup(mpt) == FALSE)
1422 goto fail;
1423 smp_attach_setup++;
1424
1425 if (mptsas_enc_setup(mpt) == FALSE)
1426 goto fail;
1427 enc_attach_setup++;
1428
1429 if (mptsas_cache_create(mpt) == FALSE)
1430 goto fail;
1431
1432 mpt->m_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1433 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
1434 if (mpt->m_scsi_reset_delay == 0) {
1435 mptsas_log(mpt, CE_NOTE,
1436 "scsi_reset_delay of 0 is not recommended,"
1437 " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1438 mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1439 }
1440
1441 /*
1442 * Initialize the wait and done FIFO queue
1443 */
1444 mpt->m_donetail = &mpt->m_doneq;
1445 mpt->m_waitqtail = &mpt->m_waitq;
1446 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
1447 mpt->m_tx_draining = 0;
1448
1449 /*
1450 * ioc cmd queue initialize
1451 */
1452 mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1453 mpt->m_dev_handle = 0xFFFF;
1454
1455 MPTSAS_ENABLE_INTR(mpt);
1456
1457 /*
1458 * enable event notification
1459 */
1460 mutex_enter(&mpt->m_mutex);
1461 if (mptsas_ioc_enable_event_notification(mpt)) {
1462 mutex_exit(&mpt->m_mutex);
1463 goto fail;
1464 }
1465 mutex_exit(&mpt->m_mutex);
1466
1467 /*
1468 * used for mptsas_watch
1469 */
1470 mptsas_list_add(mpt);
1471
1472 mutex_enter(&mptsas_global_mutex);
1473 if (mptsas_timeouts_enabled == 0) {
1474 mptsas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
1475 dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
1476
1477 mptsas_tick = mptsas_scsi_watchdog_tick *
1478 drv_usectohz((clock_t)1000000);
1479
1480 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
1481 mptsas_timeouts_enabled = 1;
1482 }
1483 mutex_exit(&mptsas_global_mutex);
1484 added_watchdog++;
1485
1486 /*
1487 * Initialize PHY info for smhba.
1488 * This requires watchdog to be enabled otherwise if interrupts
1489 * don't work the system will hang.
1490 */
1491 if (mptsas_smhba_setup(mpt)) {
1492 mptsas_log(mpt, CE_WARN, "mptsas phy initialization "
1493 "failed");
1494 goto fail;
1495 }
1496
1497 /* Check all dma handles allocated in attach */
1498 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl)
1499 != DDI_SUCCESS) ||
1500 (mptsas_check_dma_handle(mpt->m_dma_req_sense_hdl)
1501 != DDI_SUCCESS) ||
1502 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl)
1503 != DDI_SUCCESS) ||
1504 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl)
1505 != DDI_SUCCESS) ||
1506 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl)
1507 != DDI_SUCCESS) ||
1508 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl)
1509 != DDI_SUCCESS)) {
1510 goto fail;
1511 }
1512
1513 /* Check all acc handles allocated in attach */
1514 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
1515 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl)
1516 != DDI_SUCCESS) ||
1517 (mptsas_check_acc_handle(mpt->m_acc_req_sense_hdl)
1518 != DDI_SUCCESS) ||
1519 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl)
1520 != DDI_SUCCESS) ||
1521 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl)
1522 != DDI_SUCCESS) ||
1523 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl)
1524 != DDI_SUCCESS) ||
1525 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl)
1526 != DDI_SUCCESS) ||
1527 (mptsas_check_acc_handle(mpt->m_config_handle)
1528 != DDI_SUCCESS)) {
1529 goto fail;
1530 }
1531
1532 /*
1533 * After this point, we are not going to fail the attach.
1534 */
1535
1536 /* Print message of HBA present */
1537 ddi_report_dev(dip);
1538
1539 /* report idle status to pm framework */
1540 if (mpt->m_options & MPTSAS_OPT_PM) {
1541 (void) pm_idle_component(dip, 0);
1542 }
1543
1544 return (DDI_SUCCESS);
1545
1546 fail:
1547 mptsas_log(mpt, CE_WARN, "attach failed");
1548 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
1549 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
1550 if (mpt) {
1551 /* deallocate in reverse order */
1552 if (added_watchdog) {
1553 mptsas_list_del(mpt);
1554 mutex_enter(&mptsas_global_mutex);
1555
1556 if (mptsas_timeout_id && (mptsas_head == NULL)) {
1557 timeout_id_t tid = mptsas_timeout_id;
1558 mptsas_timeouts_enabled = 0;
1559 mptsas_timeout_id = 0;
1560 mutex_exit(&mptsas_global_mutex);
1561 (void) untimeout(tid);
1562 mutex_enter(&mptsas_global_mutex);
1563 }
1564 mutex_exit(&mptsas_global_mutex);
1565 }
1566
1567 mptsas_cache_destroy(mpt);
1568
1569 if (smp_attach_setup) {
1570 mptsas_smp_teardown(mpt);
1571 }
1572 if (enc_attach_setup) {
1573 mptsas_enc_teardown(mpt);
1574 }
1575 if (hba_attach_setup) {
1576 mptsas_hba_teardown(mpt);
1577 }
1578
1579 if (mpt->m_tmp_targets)
1580 refhash_destroy(mpt->m_tmp_targets);
1581 if (mpt->m_targets)
1582 refhash_destroy(mpt->m_targets);
1583 if (mpt->m_smp_targets)
1584 refhash_destroy(mpt->m_smp_targets);
1585
1586 if (mpt->m_active) {
1587 mptsas_free_active_slots(mpt);
1588 }
1589 if (intr_added) {
1590 mptsas_unregister_intrs(mpt);
1591 }
1592
1593 if (doneq_thread_create) {
1594 mutex_enter(&mpt->m_doneq_mutex);
1595 doneq_thread_num = mpt->m_doneq_thread_n;
1596 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1597 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1598 mpt->m_doneq_thread_id[j].flag &=
1599 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1600 cv_signal(&mpt->m_doneq_thread_id[j].cv);
1601 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1602 }
1603 while (mpt->m_doneq_thread_n) {
1604 cv_wait(&mpt->m_doneq_thread_cv,
1605 &mpt->m_doneq_mutex);
1606 }
1607 for (j = 0; j < doneq_thread_num; j++) {
1608 cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1609 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1610 }
1611 kmem_free(mpt->m_doneq_thread_id,
1612 sizeof (mptsas_doneq_thread_list_t)
1613 * doneq_thread_num);
1614 mutex_exit(&mpt->m_doneq_mutex);
1615 cv_destroy(&mpt->m_doneq_thread_cv);
1616 mutex_destroy(&mpt->m_doneq_mutex);
1617 }
1618 if (event_taskq_create) {
1619 ddi_taskq_destroy(mpt->m_event_taskq);
1620 }
1621 if (dr_taskq_create) {
1622 ddi_taskq_destroy(mpt->m_dr_taskq);
1623 }
1624 if (mutex_init_done) {
1625 mutex_destroy(&mpt->m_tx_waitq_mutex);
1626 mutex_destroy(&mpt->m_passthru_mutex);
1627 mutex_destroy(&mpt->m_mutex);
1628 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1629 mutex_destroy(
1630 &mpt->m_phy_info[i].smhba_info.phy_mutex);
1631 }
1632 cv_destroy(&mpt->m_cv);
1633 cv_destroy(&mpt->m_passthru_cv);
1634 cv_destroy(&mpt->m_fw_cv);
1635 cv_destroy(&mpt->m_config_cv);
1636 cv_destroy(&mpt->m_fw_diag_cv);
1637 cv_destroy(&mpt->m_extreq_sense_refcount_cv);
1638 }
1639
1640 if (map_setup) {
1641 mptsas_cfg_fini(mpt);
1642 }
1643 if (config_setup) {
1644 mptsas_config_space_fini(mpt);
1645 }
1646 mptsas_free_handshake_msg(mpt);
1647 mptsas_hba_fini(mpt);
1648
1649 mptsas_fm_fini(mpt);
1650 ddi_soft_state_free(mptsas_state, instance);
1651 ddi_prop_remove_all(dip);
1652 }
1653 return (DDI_FAILURE);
1654 }
1655
1656 static int
1657 mptsas_suspend(dev_info_t *devi)
1658 {
1659 mptsas_t *mpt, *g;
1660 scsi_hba_tran_t *tran;
1661
1662 if (scsi_hba_iport_unit_address(devi)) {
1663 return (DDI_SUCCESS);
1664 }
1665
1666 if ((tran = ddi_get_driver_private(devi)) == NULL)
1667 return (DDI_SUCCESS);
1668
1669 mpt = TRAN2MPT(tran);
1670 if (!mpt) {
1671 return (DDI_SUCCESS);
1672 }
1673
1674 mutex_enter(&mpt->m_mutex);
1675
1676 if (mpt->m_suspended++) {
1677 mutex_exit(&mpt->m_mutex);
1678 return (DDI_SUCCESS);
1679 }
1680
1681 /*
1682 * Cancel timeout threads for this mpt
1683 */
1684 if (mpt->m_quiesce_timeid) {
1685 timeout_id_t tid = mpt->m_quiesce_timeid;
1686 mpt->m_quiesce_timeid = 0;
1687 mutex_exit(&mpt->m_mutex);
1688 (void) untimeout(tid);
1689 mutex_enter(&mpt->m_mutex);
1690 }
1691
1692 if (mpt->m_restart_cmd_timeid) {
1693 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1694 mpt->m_restart_cmd_timeid = 0;
1695 mutex_exit(&mpt->m_mutex);
1696 (void) untimeout(tid);
1697 mutex_enter(&mpt->m_mutex);
1698 }
1699
1700 mutex_exit(&mpt->m_mutex);
1701
1702 (void) pm_idle_component(mpt->m_dip, 0);
1703
1704 /*
1705 * Cancel watch threads if all mpts suspended
1706 */
1707 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1708 for (g = mptsas_head; g != NULL; g = g->m_next) {
1709 if (!g->m_suspended)
1710 break;
1711 }
1712 rw_exit(&mptsas_global_rwlock);
1713
1714 mutex_enter(&mptsas_global_mutex);
1715 if (g == NULL) {
1716 timeout_id_t tid;
1717
1718 mptsas_timeouts_enabled = 0;
1719 if (mptsas_timeout_id) {
1720 tid = mptsas_timeout_id;
1721 mptsas_timeout_id = 0;
1722 mutex_exit(&mptsas_global_mutex);
1723 (void) untimeout(tid);
1724 mutex_enter(&mptsas_global_mutex);
1725 }
1726 if (mptsas_reset_watch) {
1727 tid = mptsas_reset_watch;
1728 mptsas_reset_watch = 0;
1729 mutex_exit(&mptsas_global_mutex);
1730 (void) untimeout(tid);
1731 mutex_enter(&mptsas_global_mutex);
1732 }
1733 }
1734 mutex_exit(&mptsas_global_mutex);
1735
1736 mutex_enter(&mpt->m_mutex);
1737
1738 /*
1739 * If this mpt is not in full power(PM_LEVEL_D0), just return.
1740 */
1741 if ((mpt->m_options & MPTSAS_OPT_PM) &&
1742 (mpt->m_power_level != PM_LEVEL_D0)) {
1743 mutex_exit(&mpt->m_mutex);
1744 return (DDI_SUCCESS);
1745 }
1746
1747 /* Disable HBA interrupts in hardware */
1748 MPTSAS_DISABLE_INTR(mpt);
1749 /*
1750 * Send RAID action system shutdown to sync IR
1751 */
1752 mptsas_raid_action_system_shutdown(mpt);
1753
1754 mutex_exit(&mpt->m_mutex);
1755
1756 /* drain the taskq */
1757 ddi_taskq_wait(mpt->m_event_taskq);
1758 ddi_taskq_wait(mpt->m_dr_taskq);
1759
1760 return (DDI_SUCCESS);
1761 }
1762
1763 #ifdef __sparc
1764 /*ARGSUSED*/
1765 static int
1766 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd)
1767 {
1768 mptsas_t *mpt;
1769 scsi_hba_tran_t *tran;
1770
1771 /*
1772 * If this call is for iport, just return.
1773 */
1774 if (scsi_hba_iport_unit_address(devi))
1775 return (DDI_SUCCESS);
1776
1777 if ((tran = ddi_get_driver_private(devi)) == NULL)
1778 return (DDI_SUCCESS);
1779
1780 if ((mpt = TRAN2MPT(tran)) == NULL)
1781 return (DDI_SUCCESS);
1782
1783 /*
1784 * Send RAID action system shutdown to sync IR. Disable HBA
1785 * interrupts in hardware first.
1786 */
1787 MPTSAS_DISABLE_INTR(mpt);
1788 mptsas_raid_action_system_shutdown(mpt);
1789
1790 return (DDI_SUCCESS);
1791 }
1792 #else /* __sparc */
1793 /*
1794 * quiesce(9E) entry point.
1795 *
1796 * This function is called when the system is single-threaded at high
1797 * PIL with preemption disabled. Therefore, this function must not be
1798 * blocked.
1799 *
1800 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1801 * DDI_FAILURE indicates an error condition and should almost never happen.
1802 */
1803 static int
1804 mptsas_quiesce(dev_info_t *devi)
1805 {
1806 mptsas_t *mpt;
1807 scsi_hba_tran_t *tran;
1808
1809 /*
1810 * If this call is for iport, just return.
1811 */
1812 if (scsi_hba_iport_unit_address(devi))
1813 return (DDI_SUCCESS);
1814
1815 if ((tran = ddi_get_driver_private(devi)) == NULL)
1816 return (DDI_SUCCESS);
1817
1818 if ((mpt = TRAN2MPT(tran)) == NULL)
1819 return (DDI_SUCCESS);
1820
1821 /* Disable HBA interrupts in hardware */
1822 MPTSAS_DISABLE_INTR(mpt);
1823 /* Send RAID action system shutdonw to sync IR */
1824 mptsas_raid_action_system_shutdown(mpt);
1825
1826 return (DDI_SUCCESS);
1827 }
1828 #endif /* __sparc */
1829
1830 /*
1831 * detach(9E). Remove all device allocations and system resources;
1832 * disable device interrupts.
1833 * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
1834 */
1835 static int
1836 mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1837 {
1838 /* CONSTCOND */
1839 ASSERT(NO_COMPETING_THREADS);
1840 NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi, (void *)cmd));
1841
1842 switch (cmd) {
1843 case DDI_DETACH:
1844 return (mptsas_do_detach(devi));
1845
1846 case DDI_SUSPEND:
1847 return (mptsas_suspend(devi));
1848
1849 default:
1850 return (DDI_FAILURE);
1851 }
1852 /* NOTREACHED */
1853 }
1854
1855 static int
1856 mptsas_do_detach(dev_info_t *dip)
1857 {
1858 mptsas_t *mpt;
1859 scsi_hba_tran_t *tran;
1860 int circ = 0;
1861 int circ1 = 0;
1862 mdi_pathinfo_t *pip = NULL;
1863 int i;
1864 int doneq_thread_num = 0;
1865
1866 NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1867
1868 if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1869 return (DDI_FAILURE);
1870
1871 mpt = TRAN2MPT(tran);
1872 if (!mpt) {
1873 return (DDI_FAILURE);
1874 }
1875 /*
1876 * Still have pathinfo child, should not detach mpt driver
1877 */
1878 if (scsi_hba_iport_unit_address(dip)) {
1879 if (mpt->m_mpxio_enable) {
1880 /*
1881 * MPxIO enabled for the iport
1882 */
1883 ndi_devi_enter(scsi_vhci_dip, &circ1);
1884 ndi_devi_enter(dip, &circ);
1885 while ((pip = mdi_get_next_client_path(dip, NULL)) !=
1886 NULL) {
1887 if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1888 continue;
1889 }
1890 ndi_devi_exit(dip, circ);
1891 ndi_devi_exit(scsi_vhci_dip, circ1);
1892 NDBG12(("detach failed because of "
1893 "outstanding path info"));
1894 return (DDI_FAILURE);
1895 }
1896 ndi_devi_exit(dip, circ);
1897 ndi_devi_exit(scsi_vhci_dip, circ1);
1898 (void) mdi_phci_unregister(dip, 0);
1899 }
1900
1901 ddi_prop_remove_all(dip);
1902
1903 return (DDI_SUCCESS);
1904 }
1905
1906 /* Make sure power level is D0 before accessing registers */
1907 if (mpt->m_options & MPTSAS_OPT_PM) {
1908 (void) pm_busy_component(dip, 0);
1909 if (mpt->m_power_level != PM_LEVEL_D0) {
1910 if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1911 DDI_SUCCESS) {
1912 mptsas_log(mpt, CE_WARN,
1913 "mptsas%d: Raise power request failed.",
1914 mpt->m_instance);
1915 (void) pm_idle_component(dip, 0);
1916 return (DDI_FAILURE);
1917 }
1918 }
1919 }
1920
1921 /*
1922 * Send RAID action system shutdown to sync IR. After action, send a
1923 * Message Unit Reset. Since after that DMA resource will be freed,
1924 * set ioc to READY state will avoid HBA initiated DMA operation.
1925 */
1926 mutex_enter(&mpt->m_mutex);
1927 MPTSAS_DISABLE_INTR(mpt);
1928 mptsas_raid_action_system_shutdown(mpt);
1929 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1930 (void) mptsas_ioc_reset(mpt, FALSE);
1931 mutex_exit(&mpt->m_mutex);
1932 mptsas_rem_intrs(mpt);
1933 ddi_taskq_destroy(mpt->m_event_taskq);
1934 ddi_taskq_destroy(mpt->m_dr_taskq);
1935
1936 if (mpt->m_doneq_thread_n) {
1937 mutex_enter(&mpt->m_doneq_mutex);
1938 doneq_thread_num = mpt->m_doneq_thread_n;
1939 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1940 mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1941 mpt->m_doneq_thread_id[i].flag &=
1942 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1943 cv_signal(&mpt->m_doneq_thread_id[i].cv);
1944 mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1945 }
1946 while (mpt->m_doneq_thread_n) {
1947 cv_wait(&mpt->m_doneq_thread_cv,
1948 &mpt->m_doneq_mutex);
1949 }
1950 for (i = 0; i < doneq_thread_num; i++) {
1951 cv_destroy(&mpt->m_doneq_thread_id[i].cv);
1952 mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);
1953 }
1954 kmem_free(mpt->m_doneq_thread_id,
1955 sizeof (mptsas_doneq_thread_list_t)
1956 * doneq_thread_num);
1957 mutex_exit(&mpt->m_doneq_mutex);
1958 cv_destroy(&mpt->m_doneq_thread_cv);
1959 mutex_destroy(&mpt->m_doneq_mutex);
1960 }
1961
1962 scsi_hba_reset_notify_tear_down(mpt->m_reset_notify_listf);
1963
1964 mptsas_list_del(mpt);
1965
1966 /*
1967 * Cancel timeout threads for this mpt
1968 */
1969 mutex_enter(&mpt->m_mutex);
1970 if (mpt->m_quiesce_timeid) {
1971 timeout_id_t tid = mpt->m_quiesce_timeid;
1972 mpt->m_quiesce_timeid = 0;
1973 mutex_exit(&mpt->m_mutex);
1974 (void) untimeout(tid);
1975 mutex_enter(&mpt->m_mutex);
1976 }
1977
1978 if (mpt->m_restart_cmd_timeid) {
1979 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1980 mpt->m_restart_cmd_timeid = 0;
1981 mutex_exit(&mpt->m_mutex);
1982 (void) untimeout(tid);
1983 mutex_enter(&mpt->m_mutex);
1984 }
1985
1986 mutex_exit(&mpt->m_mutex);
1987
1988 /*
1989 * last mpt? ... if active, CANCEL watch threads.
1990 */
1991 mutex_enter(&mptsas_global_mutex);
1992 if (mptsas_head == NULL) {
1993 timeout_id_t tid;
1994 /*
1995 * Clear mptsas_timeouts_enable so that the watch thread
1996 * gets restarted on DDI_ATTACH
1997 */
1998 mptsas_timeouts_enabled = 0;
1999 if (mptsas_timeout_id) {
2000 tid = mptsas_timeout_id;
2001 mptsas_timeout_id = 0;
2002 mutex_exit(&mptsas_global_mutex);
2003 (void) untimeout(tid);
2004 mutex_enter(&mptsas_global_mutex);
2005 }
2006 if (mptsas_reset_watch) {
2007 tid = mptsas_reset_watch;
2008 mptsas_reset_watch = 0;
2009 mutex_exit(&mptsas_global_mutex);
2010 (void) untimeout(tid);
2011 mutex_enter(&mptsas_global_mutex);
2012 }
2013 }
2014 mutex_exit(&mptsas_global_mutex);
2015
2016 /*
2017 * Delete Phy stats
2018 */
2019 mptsas_destroy_phy_stats(mpt);
2020
2021 mptsas_destroy_hashes(mpt);
2022
2023 /*
2024 * Delete nt_active.
2025 */
2026 mutex_enter(&mpt->m_mutex);
2027 mptsas_free_active_slots(mpt);
2028 mutex_exit(&mpt->m_mutex);
2029
2030 /* deallocate everything that was allocated in mptsas_attach */
2031 mptsas_cache_destroy(mpt);
2032
2033 mptsas_hba_fini(mpt);
2034 mptsas_cfg_fini(mpt);
2035
2036 /* Lower the power informing PM Framework */
2037 if (mpt->m_options & MPTSAS_OPT_PM) {
2038 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
2039 mptsas_log(mpt, CE_WARN,
2040 "!mptsas%d: Lower power request failed "
2041 "during detach, ignoring.",
2042 mpt->m_instance);
2043 }
2044
2045 mutex_destroy(&mpt->m_tx_waitq_mutex);
2046 mutex_destroy(&mpt->m_passthru_mutex);
2047 mutex_destroy(&mpt->m_mutex);
2048 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
2049 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
2050 }
2051 cv_destroy(&mpt->m_cv);
2052 cv_destroy(&mpt->m_passthru_cv);
2053 cv_destroy(&mpt->m_fw_cv);
2054 cv_destroy(&mpt->m_config_cv);
2055 cv_destroy(&mpt->m_fw_diag_cv);
2056 cv_destroy(&mpt->m_extreq_sense_refcount_cv);
2057
2058 mptsas_smp_teardown(mpt);
2059 mptsas_enc_teardown(mpt);
2060 mptsas_hba_teardown(mpt);
2061
2062 mptsas_config_space_fini(mpt);
2063
2064 mptsas_free_handshake_msg(mpt);
2065
2066 mptsas_fm_fini(mpt);
2067 ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
2068 ddi_prop_remove_all(dip);
2069
2070 return (DDI_SUCCESS);
2071 }
2072
2073 static void
2074 mptsas_list_add(mptsas_t *mpt)
2075 {
2076 rw_enter(&mptsas_global_rwlock, RW_WRITER);
2077
2078 if (mptsas_head == NULL) {
2079 mptsas_head = mpt;
2080 } else {
2081 mptsas_tail->m_next = mpt;
2082 }
2083 mptsas_tail = mpt;
2084 rw_exit(&mptsas_global_rwlock);
2085 }
2086
2087 static void
2088 mptsas_list_del(mptsas_t *mpt)
2089 {
2090 mptsas_t *m;
2091 /*
2092 * Remove device instance from the global linked list
2093 */
2094 rw_enter(&mptsas_global_rwlock, RW_WRITER);
2095 if (mptsas_head == mpt) {
2096 m = mptsas_head = mpt->m_next;
2097 } else {
2098 for (m = mptsas_head; m != NULL; m = m->m_next) {
2099 if (m->m_next == mpt) {
2100 m->m_next = mpt->m_next;
2101 break;
2102 }
2103 }
2104 if (m == NULL) {
2105 mptsas_log(mpt, CE_PANIC, "Not in softc list!");
2106 }
2107 }
2108
2109 if (mptsas_tail == mpt) {
2110 mptsas_tail = m;
2111 }
2112 rw_exit(&mptsas_global_rwlock);
2113 }
2114
2115 static int
2116 mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size)
2117 {
2118 ddi_dma_attr_t task_dma_attrs;
2119
2120 mpt->m_hshk_dma_size = 0;
2121 task_dma_attrs = mpt->m_msg_dma_attr;
2122 task_dma_attrs.dma_attr_sgllen = 1;
2123 task_dma_attrs.dma_attr_granular = (uint32_t)(alloc_size);
2124
2125 /* allocate Task Management ddi_dma resources */
2126 if (mptsas_dma_addr_create(mpt, task_dma_attrs,
2127 &mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl, &mpt->m_hshk_memp,
2128 alloc_size, NULL) == FALSE) {
2129 return (DDI_FAILURE);
2130 }
2131 mpt->m_hshk_dma_size = alloc_size;
2132
2133 return (DDI_SUCCESS);
2134 }
2135
2136 static void
2137 mptsas_free_handshake_msg(mptsas_t *mpt)
2138 {
2139 if (mpt->m_hshk_dma_size == 0)
2140 return;
2141 mptsas_dma_addr_destroy(&mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl);
2142 mpt->m_hshk_dma_size = 0;
2143 }
2144
2145 static int
2146 mptsas_hba_setup(mptsas_t *mpt)
2147 {
2148 scsi_hba_tran_t *hba_tran;
2149 int tran_flags;
2150
2151 /* Allocate a transport structure */
2152 hba_tran = mpt->m_tran = scsi_hba_tran_alloc(mpt->m_dip,
2153 SCSI_HBA_CANSLEEP);
2154 ASSERT(mpt->m_tran != NULL);
2155
2156 hba_tran->tran_hba_private = mpt;
2157 hba_tran->tran_tgt_private = NULL;
2158
2159 hba_tran->tran_tgt_init = mptsas_scsi_tgt_init;
2160 hba_tran->tran_tgt_free = mptsas_scsi_tgt_free;
2161
2162 hba_tran->tran_start = mptsas_scsi_start;
2163 hba_tran->tran_reset = mptsas_scsi_reset;
2164 hba_tran->tran_abort = mptsas_scsi_abort;
2165 hba_tran->tran_getcap = mptsas_scsi_getcap;
2166 hba_tran->tran_setcap = mptsas_scsi_setcap;
2167 hba_tran->tran_init_pkt = mptsas_scsi_init_pkt;
2168 hba_tran->tran_destroy_pkt = mptsas_scsi_destroy_pkt;
2169
2170 hba_tran->tran_dmafree = mptsas_scsi_dmafree;
2171 hba_tran->tran_sync_pkt = mptsas_scsi_sync_pkt;
2172 hba_tran->tran_reset_notify = mptsas_scsi_reset_notify;
2173
2174 hba_tran->tran_get_bus_addr = mptsas_get_bus_addr;
2175 hba_tran->tran_get_name = mptsas_get_name;
2176
2177 hba_tran->tran_quiesce = mptsas_scsi_quiesce;
2178 hba_tran->tran_unquiesce = mptsas_scsi_unquiesce;
2179 hba_tran->tran_bus_reset = NULL;
2180
2181 hba_tran->tran_add_eventcall = NULL;
2182 hba_tran->tran_get_eventcookie = NULL;
2183 hba_tran->tran_post_event = NULL;
2184 hba_tran->tran_remove_eventcall = NULL;
2185
2186 hba_tran->tran_bus_config = mptsas_bus_config;
2187
2188 hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
2189
2190 /*
2191 * All children of the HBA are iports. We need tran was cloned.
2192 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
2193 * inherited to iport's tran vector.
2194 */
2195 tran_flags = (SCSI_HBA_HBA | SCSI_HBA_TRAN_CLONE);
2196
2197 if (scsi_hba_attach_setup(mpt->m_dip, &mpt->m_msg_dma_attr,
2198 hba_tran, tran_flags) != DDI_SUCCESS) {
2199 mptsas_log(mpt, CE_WARN, "hba attach setup failed");
2200 scsi_hba_tran_free(hba_tran);
2201 mpt->m_tran = NULL;
2202 return (FALSE);
2203 }
2204 return (TRUE);
2205 }
2206
2207 static void
2208 mptsas_hba_teardown(mptsas_t *mpt)
2209 {
2210 (void) scsi_hba_detach(mpt->m_dip);
2211 if (mpt->m_tran != NULL) {
2212 scsi_hba_tran_free(mpt->m_tran);
2213 mpt->m_tran = NULL;
2214 }
2215 }
2216
2217 static void
2218 mptsas_iport_register(mptsas_t *mpt)
2219 {
2220 int i, j;
2221 mptsas_phymask_t mask = 0x0;
2222 /*
2223 * initial value of mask is 0
2224 */
2225 mutex_enter(&mpt->m_mutex);
2226 for (i = 0; i < mpt->m_num_phys; i++) {
2227 mptsas_phymask_t phy_mask = 0x0;
2228 char phy_mask_name[MPTSAS_MAX_PHYS];
2229 uint8_t current_port;
2230
2231 if (mpt->m_phy_info[i].attached_devhdl == 0)
2232 continue;
2233
2234 bzero(phy_mask_name, sizeof (phy_mask_name));
2235
2236 current_port = mpt->m_phy_info[i].port_num;
2237
2238 if ((mask & (1 << i)) != 0)
2239 continue;
2240
2241 for (j = 0; j < mpt->m_num_phys; j++) {
2242 if (mpt->m_phy_info[j].attached_devhdl &&
2243 (mpt->m_phy_info[j].port_num == current_port)) {
2244 phy_mask |= (1 << j);
2245 }
2246 }
2247 mask = mask | phy_mask;
2248
2249 for (j = 0; j < mpt->m_num_phys; j++) {
2250 if ((phy_mask >> j) & 0x01) {
2251 mpt->m_phy_info[j].phy_mask = phy_mask;
2252 }
2253 }
2254
2255 (void) sprintf(phy_mask_name, "%x", phy_mask);
2256
2257 mutex_exit(&mpt->m_mutex);
2258 /*
2259 * register a iport
2260 */
2261 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
2262 mutex_enter(&mpt->m_mutex);
2263 }
2264 mutex_exit(&mpt->m_mutex);
2265 /*
2266 * register a virtual port for RAID volume always
2267 */
2268 (void) scsi_hba_iport_register(mpt->m_dip, "v0");
2269
2270 }
2271
2272 static int
2273 mptsas_smp_setup(mptsas_t *mpt)
2274 {
2275 mpt->m_smptran = smp_hba_tran_alloc(mpt->m_dip);
2276 ASSERT(mpt->m_smptran != NULL);
2277 mpt->m_smptran->smp_tran_hba_private = mpt;
2278 mpt->m_smptran->smp_tran_start = mptsas_smp_start;
2279 if (smp_hba_attach_setup(mpt->m_dip, mpt->m_smptran) != DDI_SUCCESS) {
2280 mptsas_log(mpt, CE_WARN, "smp attach setup failed");
2281 smp_hba_tran_free(mpt->m_smptran);
2282 mpt->m_smptran = NULL;
2283 return (FALSE);
2284 }
2285 /*
2286 * Initialize smp hash table
2287 */
2288 mpt->m_smp_targets = refhash_create(MPTSAS_SMP_BUCKET_COUNT,
2289 mptsas_target_addr_hash, mptsas_target_addr_cmp,
2290 mptsas_smp_free, sizeof (mptsas_smp_t),
2291 offsetof(mptsas_smp_t, m_link), offsetof(mptsas_smp_t, m_addr),
2292 KM_SLEEP);
2293 mpt->m_smp_devhdl = 0xFFFF;
2294
2295 return (TRUE);
2296 }
2297
2298 static void
2299 mptsas_smp_teardown(mptsas_t *mpt)
2300 {
2301 (void) smp_hba_detach(mpt->m_dip);
2302 if (mpt->m_smptran != NULL) {
2303 smp_hba_tran_free(mpt->m_smptran);
2304 mpt->m_smptran = NULL;
2305 }
2306 mpt->m_smp_devhdl = 0;
2307 }
2308
2309 static int
2310 mptsas_enc_setup(mptsas_t *mpt)
2311 {
2312 list_create(&mpt->m_enclosures, sizeof (mptsas_enclosure_t),
2313 offsetof(mptsas_enclosure_t, me_link));
2314 return (TRUE);
2315 }
2316
2317 static void
2318 mptsas_enc_free(mptsas_enclosure_t *mep)
2319 {
2320 if (mep == NULL)
2321 return;
2322 if (mep->me_slotleds != NULL) {
2323 VERIFY3U(mep->me_nslots, >, 0);
2324 kmem_free(mep->me_slotleds, sizeof (uint8_t) * mep->me_nslots);
2325 }
2326 kmem_free(mep, sizeof (mptsas_enclosure_t));
2327 }
2328
2329 static void
2330 mptsas_enc_teardown(mptsas_t *mpt)
2331 {
2332 mptsas_enclosure_t *mep;
2333
2334 while ((mep = list_remove_head(&mpt->m_enclosures)) != NULL) {
2335 mptsas_enc_free(mep);
2336 }
2337 list_destroy(&mpt->m_enclosures);
2338 }
2339
2340 static mptsas_enclosure_t *
2341 mptsas_enc_lookup(mptsas_t *mpt, uint16_t hdl)
2342 {
2343 mptsas_enclosure_t *mep;
2344
2345 ASSERT(MUTEX_HELD(&mpt->m_mutex));
2346
2347 for (mep = list_head(&mpt->m_enclosures); mep != NULL;
2348 mep = list_next(&mpt->m_enclosures, mep)) {
2349 if (hdl == mep->me_enchdl) {
2350 return (mep);
2351 }
2352 }
2353
2354 return (NULL);
2355 }
2356
2357 static int
2358 mptsas_cache_create(mptsas_t *mpt)
2359 {
2360 int instance = mpt->m_instance;
2361 char buf[64];
2362
2363 /*
2364 * create kmem cache for packets
2365 */
2366 (void) sprintf(buf, "mptsas%d_cache", instance);
2367 mpt->m_kmem_cache = kmem_cache_create(buf,
2368 sizeof (struct mptsas_cmd) + scsi_pkt_size(), 8,
2369 mptsas_kmem_cache_constructor, mptsas_kmem_cache_destructor,
2370 NULL, (void *)mpt, NULL, 0);
2371
2372 if (mpt->m_kmem_cache == NULL) {
2373 mptsas_log(mpt, CE_WARN, "creating kmem cache failed");
2374 return (FALSE);
2375 }
2376
2377 /*
2378 * create kmem cache for extra SGL frames if SGL cannot
2379 * be accomodated into main request frame.
2380 */
2381 (void) sprintf(buf, "mptsas%d_cache_frames", instance);
2382 mpt->m_cache_frames = kmem_cache_create(buf,
2383 sizeof (mptsas_cache_frames_t), 8,
2384 mptsas_cache_frames_constructor, mptsas_cache_frames_destructor,
2385 NULL, (void *)mpt, NULL, 0);
2386
2387 if (mpt->m_cache_frames == NULL) {
2388 mptsas_log(mpt, CE_WARN, "creating cache for frames failed");
2389 return (FALSE);
2390 }
2391
2392 return (TRUE);
2393 }
2394
2395 static void
2396 mptsas_cache_destroy(mptsas_t *mpt)
2397 {
2398 /* deallocate in reverse order */
2399 if (mpt->m_cache_frames) {
2400 kmem_cache_destroy(mpt->m_cache_frames);
2401 mpt->m_cache_frames = NULL;
2402 }
2403 if (mpt->m_kmem_cache) {
2404 kmem_cache_destroy(mpt->m_kmem_cache);
2405 mpt->m_kmem_cache = NULL;
2406 }
2407 }
2408
2409 static int
2410 mptsas_power(dev_info_t *dip, int component, int level)
2411 {
2412 #ifndef __lock_lint
2413 _NOTE(ARGUNUSED(component))
2414 #endif
2415 mptsas_t *mpt;
2416 int rval = DDI_SUCCESS;
2417 int polls = 0;
2418 uint32_t ioc_status;
2419
2420 if (scsi_hba_iport_unit_address(dip) != 0)
2421 return (DDI_SUCCESS);
2422
2423 mpt = ddi_get_soft_state(mptsas_state, ddi_get_instance(dip));
2424 if (mpt == NULL) {
2425 return (DDI_FAILURE);
2426 }
2427
2428 mutex_enter(&mpt->m_mutex);
2429
2430 /*
2431 * If the device is busy, don't lower its power level
2432 */
2433 if (mpt->m_busy && (mpt->m_power_level > level)) {
2434 mutex_exit(&mpt->m_mutex);
2435 return (DDI_FAILURE);
2436 }
2437 switch (level) {
2438 case PM_LEVEL_D0:
2439 NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
2440 MPTSAS_POWER_ON(mpt);
2441 /*
2442 * Wait up to 30 seconds for IOC to come out of reset.
2443 */
2444 while (((ioc_status = ddi_get32(mpt->m_datap,
2445 &mpt->m_reg->Doorbell)) &
2446 MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2447 if (polls++ > 3000) {
2448 break;
2449 }
2450 delay(drv_usectohz(10000));
2451 }
2452 /*
2453 * If IOC is not in operational state, try to hard reset it.
2454 */
2455 if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2456 MPI2_IOC_STATE_OPERATIONAL) {
2457 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2458 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2459 mptsas_log(mpt, CE_WARN,
2460 "mptsas_power: hard reset failed");
2461 mutex_exit(&mpt->m_mutex);
2462 return (DDI_FAILURE);
2463 }
2464 }
2465 mpt->m_power_level = PM_LEVEL_D0;
2466 break;
2467 case PM_LEVEL_D3:
2468 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2469 MPTSAS_POWER_OFF(mpt);
2470 break;
2471 default:
2472 mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2473 mpt->m_instance, level);
2474 rval = DDI_FAILURE;
2475 break;
2476 }
2477 mutex_exit(&mpt->m_mutex);
2478 return (rval);
2479 }
2480
2481 /*
2482 * Initialize configuration space and figure out which
2483 * chip and revison of the chip the mpt driver is using.
2484 */
2485 static int
2486 mptsas_config_space_init(mptsas_t *mpt)
2487 {
2488 NDBG0(("mptsas_config_space_init"));
2489
2490 if (mpt->m_config_handle != NULL)
2491 return (TRUE);
2492
2493 if (pci_config_setup(mpt->m_dip,
2494 &mpt->m_config_handle) != DDI_SUCCESS) {
2495 mptsas_log(mpt, CE_WARN, "cannot map configuration space.");
2496 return (FALSE);
2497 }
2498
2499 /*
2500 * This is a workaround for a XMITS ASIC bug which does not
2501 * drive the CBE upper bits.
2502 */
2503 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) &
2504 PCI_STAT_PERROR) {
2505 pci_config_put16(mpt->m_config_handle, PCI_CONF_STAT,
2506 PCI_STAT_PERROR);
2507 }
2508
2509 mptsas_setup_cmd_reg(mpt);
2510
2511 /*
2512 * Get the chip device id:
2513 */
2514 mpt->m_devid = pci_config_get16(mpt->m_config_handle, PCI_CONF_DEVID);
2515
2516 /*
2517 * Save the revision.
2518 */
2519 mpt->m_revid = pci_config_get8(mpt->m_config_handle, PCI_CONF_REVID);
2520
2521 /*
2522 * Save the SubSystem Vendor and Device IDs
2523 */
2524 mpt->m_svid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBVENID);
2525 mpt->m_ssid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBSYSID);
2526
2527 /*
2528 * Set the latency timer to 0x40 as specified by the upa -> pci
2529 * bridge chip design team. This may be done by the sparc pci
2530 * bus nexus driver, but the driver should make sure the latency
2531 * timer is correct for performance reasons.
2532 */
2533 pci_config_put8(mpt->m_config_handle, PCI_CONF_LATENCY_TIMER,
2534 MPTSAS_LATENCY_TIMER);
2535
2536 (void) mptsas_get_pci_cap(mpt);
2537 return (TRUE);
2538 }
2539
2540 static void
2541 mptsas_config_space_fini(mptsas_t *mpt)
2542 {
2543 if (mpt->m_config_handle != NULL) {
2544 mptsas_disable_bus_master(mpt);
2545 pci_config_teardown(&mpt->m_config_handle);
2546 mpt->m_config_handle = NULL;
2547 }
2548 }
2549
2550 static void
2551 mptsas_setup_cmd_reg(mptsas_t *mpt)
2552 {
2553 ushort_t cmdreg;
2554
2555 /*
2556 * Set the command register to the needed values.
2557 */
2558 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2559 cmdreg |= (PCI_COMM_ME | PCI_COMM_SERR_ENABLE |
2560 PCI_COMM_PARITY_DETECT | PCI_COMM_MAE);
2561 cmdreg &= ~PCI_COMM_IO;
2562 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2563 }
2564
2565 static void
2566 mptsas_disable_bus_master(mptsas_t *mpt)
2567 {
2568 ushort_t cmdreg;
2569
2570 /*
2571 * Clear the master enable bit in the PCI command register.
2572 * This prevents any bus mastering activity like DMA.
2573 */
2574 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2575 cmdreg &= ~PCI_COMM_ME;
2576 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2577 }
2578
2579 int
2580 mptsas_dma_alloc(mptsas_t *mpt, mptsas_dma_alloc_state_t *dma_statep)
2581 {
2582 ddi_dma_attr_t attrs;
2583
2584 attrs = mpt->m_io_dma_attr;
2585 attrs.dma_attr_sgllen = 1;
2586
2587 ASSERT(dma_statep != NULL);
2588
2589 if (mptsas_dma_addr_create(mpt, attrs, &dma_statep->handle,
2590 &dma_statep->accessp, &dma_statep->memp, dma_statep->size,
2591 &dma_statep->cookie) == FALSE) {
2592 return (DDI_FAILURE);
2593 }
2594
2595 return (DDI_SUCCESS);
2596 }
2597
2598 void
2599 mptsas_dma_free(mptsas_dma_alloc_state_t *dma_statep)
2600 {
2601 ASSERT(dma_statep != NULL);
2602 mptsas_dma_addr_destroy(&dma_statep->handle, &dma_statep->accessp);
2603 dma_statep->size = 0;
2604 }
2605
2606 int
2607 mptsas_do_dma(mptsas_t *mpt, uint32_t size, int var, int (*callback)())
2608 {
2609 ddi_dma_attr_t attrs;
2610 ddi_dma_handle_t dma_handle;
2611 caddr_t memp;
2612 ddi_acc_handle_t accessp;
2613 int rval;
2614
2615 ASSERT(mutex_owned(&mpt->m_mutex));
2616
2617 attrs = mpt->m_msg_dma_attr;
2618 attrs.dma_attr_sgllen = 1;
2619 attrs.dma_attr_granular = size;
2620
2621 if (mptsas_dma_addr_create(mpt, attrs, &dma_handle,
2622 &accessp, &memp, size, NULL) == FALSE) {
2623 return (DDI_FAILURE);
2624 }
2625
2626 rval = (*callback) (mpt, memp, var, accessp);
2627
2628 if ((mptsas_check_dma_handle(dma_handle) != DDI_SUCCESS) ||
2629 (mptsas_check_acc_handle(accessp) != DDI_SUCCESS)) {
2630 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
2631 rval = DDI_FAILURE;
2632 }
2633
2634 mptsas_dma_addr_destroy(&dma_handle, &accessp);
2635 return (rval);
2636
2637 }
2638
2639 static int
2640 mptsas_alloc_request_frames(mptsas_t *mpt)
2641 {
2642 ddi_dma_attr_t frame_dma_attrs;
2643 caddr_t memp;
2644 ddi_dma_cookie_t cookie;
2645 size_t mem_size;
2646
2647 /*
2648 * re-alloc when it has already alloced
2649 */
2650 if (mpt->m_dma_req_frame_hdl)
2651 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2652 &mpt->m_acc_req_frame_hdl);
2653
2654 /*
2655 * The size of the request frame pool is:
2656 * Number of Request Frames * Request Frame Size
2657 */
2658 mem_size = mpt->m_max_requests * mpt->m_req_frame_size;
2659
2660 /*
2661 * set the DMA attributes. System Request Message Frames must be
2662 * aligned on a 16-byte boundry.
2663 */
2664 frame_dma_attrs = mpt->m_msg_dma_attr;
2665 frame_dma_attrs.dma_attr_align = 16;
2666 frame_dma_attrs.dma_attr_sgllen = 1;
2667
2668 /*
2669 * allocate the request frame pool.
2670 */
2671 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2672 &mpt->m_dma_req_frame_hdl, &mpt->m_acc_req_frame_hdl, &memp,
2673 mem_size, &cookie) == FALSE) {
2674 return (DDI_FAILURE);
2675 }
2676
2677 /*
2678 * Store the request frame memory address. This chip uses this
2679 * address to dma to and from the driver's frame. The second
2680 * address is the address mpt uses to fill in the frame.
2681 */
2682 mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2683 mpt->m_req_frame = memp;
2684
2685 /*
2686 * Clear the request frame pool.
2687 */
2688 bzero(mpt->m_req_frame, mem_size);
2689
2690 return (DDI_SUCCESS);
2691 }
2692
2693 static int
2694 mptsas_alloc_sense_bufs(mptsas_t *mpt)
2695 {
2696 ddi_dma_attr_t sense_dma_attrs;
2697 caddr_t memp;
2698 ddi_dma_cookie_t cookie;
2699 size_t mem_size;
2700 int num_extrqsense_bufs;
2701
2702 ASSERT(mpt->m_extreq_sense_refcount == 0);
2703
2704 /*
2705 * re-alloc when it has already alloced
2706 */
2707 if (mpt->m_dma_req_sense_hdl) {
2708 rmfreemap(mpt->m_erqsense_map);
2709 mptsas_dma_addr_destroy(&mpt->m_dma_req_sense_hdl,
2710 &mpt->m_acc_req_sense_hdl);
2711 }
2712
2713 /*
2714 * The size of the request sense pool is:
2715 * (Number of Request Frames - 2 ) * Request Sense Size +
2716 * extra memory for extended sense requests.
2717 */
2718 mem_size = ((mpt->m_max_requests - 2) * mpt->m_req_sense_size) +
2719 mptsas_extreq_sense_bufsize;
2720
2721 /*
2722 * set the DMA attributes. ARQ buffers
2723 * aligned on a 16-byte boundry.
2724 */
2725 sense_dma_attrs = mpt->m_msg_dma_attr;
2726 sense_dma_attrs.dma_attr_align = 16;
2727 sense_dma_attrs.dma_attr_sgllen = 1;
2728
2729 /*
2730 * allocate the request sense buffer pool.
2731 */
2732 if (mptsas_dma_addr_create(mpt, sense_dma_attrs,
2733 &mpt->m_dma_req_sense_hdl, &mpt->m_acc_req_sense_hdl, &memp,
2734 mem_size, &cookie) == FALSE) {
2735 return (DDI_FAILURE);
2736 }
2737
2738 /*
2739 * Store the request sense base memory address. This chip uses this
2740 * address to dma the request sense data. The second
2741 * address is the address mpt uses to access the data.
2742 * The third is the base for the extended rqsense buffers.
2743 */
2744 mpt->m_req_sense_dma_addr = cookie.dmac_laddress;
2745 mpt->m_req_sense = memp;
2746 memp += (mpt->m_max_requests - 2) * mpt->m_req_sense_size;
2747 mpt->m_extreq_sense = memp;
2748
2749 /*
2750 * The extra memory is divided up into multiples of the base
2751 * buffer size in order to allocate via rmalloc().
2752 * Note that the rmallocmap cannot start at zero!
2753 */
2754 num_extrqsense_bufs = mptsas_extreq_sense_bufsize /
2755 mpt->m_req_sense_size;
2756 mpt->m_erqsense_map = rmallocmap_wait(num_extrqsense_bufs);
2757 rmfree(mpt->m_erqsense_map, num_extrqsense_bufs, 1);
2758
2759 /*
2760 * Clear the pool.
2761 */
2762 bzero(mpt->m_req_sense, mem_size);
2763
2764 return (DDI_SUCCESS);
2765 }
2766
2767 static int
2768 mptsas_alloc_reply_frames(mptsas_t *mpt)
2769 {
2770 ddi_dma_attr_t frame_dma_attrs;
2771 caddr_t memp;
2772 ddi_dma_cookie_t cookie;
2773 size_t mem_size;
2774
2775 /*
2776 * re-alloc when it has already alloced
2777 */
2778 if (mpt->m_dma_reply_frame_hdl) {
2779 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2780 &mpt->m_acc_reply_frame_hdl);
2781 }
2782
2783 /*
2784 * The size of the reply frame pool is:
2785 * Number of Reply Frames * Reply Frame Size
2786 */
2787 mem_size = mpt->m_max_replies * mpt->m_reply_frame_size;
2788
2789 /*
2790 * set the DMA attributes. System Reply Message Frames must be
2791 * aligned on a 4-byte boundry. This is the default.
2792 */
2793 frame_dma_attrs = mpt->m_msg_dma_attr;
2794 frame_dma_attrs.dma_attr_sgllen = 1;
2795
2796 /*
2797 * allocate the reply frame pool
2798 */
2799 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2800 &mpt->m_dma_reply_frame_hdl, &mpt->m_acc_reply_frame_hdl, &memp,
2801 mem_size, &cookie) == FALSE) {
2802 return (DDI_FAILURE);
2803 }
2804
2805 /*
2806 * Store the reply frame memory address. This chip uses this
2807 * address to dma to and from the driver's frame. The second
2808 * address is the address mpt uses to process the frame.
2809 */
2810 mpt->m_reply_frame_dma_addr = cookie.dmac_laddress;
2811 mpt->m_reply_frame = memp;
2812
2813 /*
2814 * Clear the reply frame pool.
2815 */
2816 bzero(mpt->m_reply_frame, mem_size);
2817
2818 return (DDI_SUCCESS);
2819 }
2820
2821 static int
2822 mptsas_alloc_free_queue(mptsas_t *mpt)
2823 {
2824 ddi_dma_attr_t frame_dma_attrs;
2825 caddr_t memp;
2826 ddi_dma_cookie_t cookie;
2827 size_t mem_size;
2828
2829 /*
2830 * re-alloc when it has already alloced
2831 */
2832 if (mpt->m_dma_free_queue_hdl) {
2833 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2834 &mpt->m_acc_free_queue_hdl);
2835 }
2836
2837 /*
2838 * The reply free queue size is:
2839 * Reply Free Queue Depth * 4
2840 * The "4" is the size of one 32 bit address (low part of 64-bit
2841 * address)
2842 */
2843 mem_size = mpt->m_free_queue_depth * 4;
2844
2845 /*
2846 * set the DMA attributes The Reply Free Queue must be aligned on a
2847 * 16-byte boundry.
2848 */
2849 frame_dma_attrs = mpt->m_msg_dma_attr;
2850 frame_dma_attrs.dma_attr_align = 16;
2851 frame_dma_attrs.dma_attr_sgllen = 1;
2852
2853 /*
2854 * allocate the reply free queue
2855 */
2856 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2857 &mpt->m_dma_free_queue_hdl, &mpt->m_acc_free_queue_hdl, &memp,
2858 mem_size, &cookie) == FALSE) {
2859 return (DDI_FAILURE);
2860 }
2861
2862 /*
2863 * Store the reply free queue memory address. This chip uses this
2864 * address to read from the reply free queue. The second address
2865 * is the address mpt uses to manage the queue.
2866 */
2867 mpt->m_free_queue_dma_addr = cookie.dmac_laddress;
2868 mpt->m_free_queue = memp;
2869
2870 /*
2871 * Clear the reply free queue memory.
2872 */
2873 bzero(mpt->m_free_queue, mem_size);
2874
2875 return (DDI_SUCCESS);
2876 }
2877
2878 static int
2879 mptsas_alloc_post_queue(mptsas_t *mpt)
2880 {
2881 ddi_dma_attr_t frame_dma_attrs;
2882 caddr_t memp;
2883 ddi_dma_cookie_t cookie;
2884 size_t mem_size;
2885
2886 /*
2887 * re-alloc when it has already alloced
2888 */
2889 if (mpt->m_dma_post_queue_hdl) {
2890 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2891 &mpt->m_acc_post_queue_hdl);
2892 }
2893
2894 /*
2895 * The reply descriptor post queue size is:
2896 * Reply Descriptor Post Queue Depth * 8
2897 * The "8" is the size of each descriptor (8 bytes or 64 bits).
2898 */
2899 mem_size = mpt->m_post_queue_depth * 8;
2900
2901 /*
2902 * set the DMA attributes. The Reply Descriptor Post Queue must be
2903 * aligned on a 16-byte boundry.
2904 */
2905 frame_dma_attrs = mpt->m_msg_dma_attr;
2906 frame_dma_attrs.dma_attr_align = 16;
2907 frame_dma_attrs.dma_attr_sgllen = 1;
2908
2909 /*
2910 * allocate the reply post queue
2911 */
2912 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2913 &mpt->m_dma_post_queue_hdl, &mpt->m_acc_post_queue_hdl, &memp,
2914 mem_size, &cookie) == FALSE) {
2915 return (DDI_FAILURE);
2916 }
2917
2918 /*
2919 * Store the reply descriptor post queue memory address. This chip
2920 * uses this address to write to the reply descriptor post queue. The
2921 * second address is the address mpt uses to manage the queue.
2922 */
2923 mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
2924 mpt->m_post_queue = memp;
2925
2926 /*
2927 * Clear the reply post queue memory.
2928 */
2929 bzero(mpt->m_post_queue, mem_size);
2930
2931 return (DDI_SUCCESS);
2932 }
2933
2934 static void
2935 mptsas_alloc_reply_args(mptsas_t *mpt)
2936 {
2937 if (mpt->m_replyh_args == NULL) {
2938 mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
2939 mpt->m_max_replies, KM_SLEEP);
2940 }
2941 }
2942
2943 static int
2944 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2945 {
2946 mptsas_cache_frames_t *frames = NULL;
2947 if (cmd->cmd_extra_frames == NULL) {
2948 frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
2949 if (frames == NULL) {
2950 return (DDI_FAILURE);
2951 }
2952 cmd->cmd_extra_frames = frames;
2953 }
2954 return (DDI_SUCCESS);
2955 }
2956
2957 static void
2958 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2959 {
2960 if (cmd->cmd_extra_frames) {
2961 kmem_cache_free(mpt->m_cache_frames,
2962 (void *)cmd->cmd_extra_frames);
2963 cmd->cmd_extra_frames = NULL;
2964 }
2965 }
2966
2967 static void
2968 mptsas_cfg_fini(mptsas_t *mpt)
2969 {
2970 NDBG0(("mptsas_cfg_fini"));
2971 ddi_regs_map_free(&mpt->m_datap);
2972 }
2973
2974 static void
2975 mptsas_hba_fini(mptsas_t *mpt)
2976 {
2977 NDBG0(("mptsas_hba_fini"));
2978
2979 /*
2980 * Free up any allocated memory
2981 */
2982 if (mpt->m_dma_req_frame_hdl) {
2983 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2984 &mpt->m_acc_req_frame_hdl);
2985 }
2986
2987 if (mpt->m_dma_req_sense_hdl) {
2988 rmfreemap(mpt->m_erqsense_map);
2989 mptsas_dma_addr_destroy(&mpt->m_dma_req_sense_hdl,
2990 &mpt->m_acc_req_sense_hdl);
2991 }
2992
2993 if (mpt->m_dma_reply_frame_hdl) {
2994 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2995 &mpt->m_acc_reply_frame_hdl);
2996 }
2997
2998 if (mpt->m_dma_free_queue_hdl) {
2999 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
3000 &mpt->m_acc_free_queue_hdl);
3001 }
3002
3003 if (mpt->m_dma_post_queue_hdl) {
3004 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
3005 &mpt->m_acc_post_queue_hdl);
3006 }
3007
3008 if (mpt->m_replyh_args != NULL) {
3009 kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
3010 * mpt->m_max_replies);
3011 }
3012 }
3013
3014 static int
3015 mptsas_name_child(dev_info_t *lun_dip, char *name, int len)
3016 {
3017 int lun = 0;
3018 char *sas_wwn = NULL;
3019 int phynum = -1;
3020 int reallen = 0;
3021
3022 /* Get the target num */
3023 lun = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
3024 LUN_PROP, 0);
3025
3026 if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
3027 DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
3028 /*
3029 * Stick in the address of form "pPHY,LUN"
3030 */
3031 reallen = snprintf(name, len, "p%x,%x", phynum, lun);
3032 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
3033 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn)
3034 == DDI_PROP_SUCCESS) {
3035 /*
3036 * Stick in the address of the form "wWWN,LUN"
3037 */
3038 reallen = snprintf(name, len, "%s,%x", sas_wwn, lun);
3039 ddi_prop_free(sas_wwn);
3040 } else {
3041 return (DDI_FAILURE);
3042 }
3043
3044 ASSERT(reallen < len);
3045 if (reallen >= len) {
3046 mptsas_log(0, CE_WARN, "!mptsas_get_name: name parameter "
3047 "length too small, it needs to be %d bytes", reallen + 1);
3048 }
3049 return (DDI_SUCCESS);
3050 }
3051
3052 /*
3053 * tran_tgt_init(9E) - target device instance initialization
3054 */
3055 static int
3056 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3057 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
3058 {
3059 #ifndef __lock_lint
3060 _NOTE(ARGUNUSED(hba_tran))
3061 #endif
3062
3063 /*
3064 * At this point, the scsi_device structure already exists
3065 * and has been initialized.
3066 *
3067 * Use this function to allocate target-private data structures,
3068 * if needed by this HBA. Add revised flow-control and queue
3069 * properties for child here, if desired and if you can tell they
3070 * support tagged queueing by now.
3071 */
3072 mptsas_t *mpt;
3073 int lun = sd->sd_address.a_lun;
3074 mdi_pathinfo_t *pip = NULL;
3075 mptsas_tgt_private_t *tgt_private = NULL;
3076 mptsas_target_t *ptgt = NULL;
3077 char *psas_wwn = NULL;
3078 mptsas_phymask_t phymask = 0;
3079 uint64_t sas_wwn = 0;
3080 mptsas_target_addr_t addr;
3081 mpt = SDEV2MPT(sd);
3082
3083 ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
3084
3085 NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
3086 (void *)hba_dip, (void *)tgt_dip, lun));
3087
3088 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
3089 (void) ndi_merge_node(tgt_dip, mptsas_name_child);
3090 ddi_set_name_addr(tgt_dip, NULL);
3091 return (DDI_FAILURE);
3092 }
3093 /*
3094 * phymask is 0 means the virtual port for RAID
3095 */
3096 phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
3097 "phymask", 0);
3098 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
3099 if ((pip = (void *)(sd->sd_private)) == NULL) {
3100 /*
3101 * Very bad news if this occurs. Somehow scsi_vhci has
3102 * lost the pathinfo node for this target.
3103 */
3104 return (DDI_NOT_WELL_FORMED);
3105 }
3106
3107 if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
3108 DDI_PROP_SUCCESS) {
3109 mptsas_log(mpt, CE_WARN, "Get lun property failed\n");
3110 return (DDI_FAILURE);
3111 }
3112
3113 if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
3114 &psas_wwn) == MDI_SUCCESS) {
3115 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
3116 sas_wwn = 0;
3117 }
3118 (void) mdi_prop_free(psas_wwn);
3119 }
3120 } else {
3121 lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
3122 DDI_PROP_DONTPASS, LUN_PROP, 0);
3123 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
3124 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
3125 DDI_PROP_SUCCESS) {
3126 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
3127 sas_wwn = 0;
3128 }
3129 ddi_prop_free(psas_wwn);
3130 } else {
3131 sas_wwn = 0;
3132 }
3133 }
3134
3135 ASSERT((sas_wwn != 0) || (phymask != 0));
3136 addr.mta_wwn = sas_wwn;
3137 addr.mta_phymask = phymask;
3138 mutex_enter(&mpt->m_mutex);
3139 ptgt = refhash_lookup(mpt->m_targets, &addr);
3140 mutex_exit(&mpt->m_mutex);
3141 if (ptgt == NULL) {
3142 mptsas_log(mpt, CE_WARN, "!tgt_init: target doesn't exist or "
3143 "gone already! phymask:%x, saswwn %"PRIx64, phymask,
3144 sas_wwn);
3145 return (DDI_FAILURE);
3146 }
3147 if (hba_tran->tran_tgt_private == NULL) {
3148 tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
3149 KM_SLEEP);
3150 tgt_private->t_lun = lun;
3151 tgt_private->t_private = ptgt;
3152 hba_tran->tran_tgt_private = tgt_private;
3153 }
3154
3155 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
3156 return (DDI_SUCCESS);
3157 }
3158 mutex_enter(&mpt->m_mutex);
3159
3160 if (ptgt->m_deviceinfo &
3161 (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
3162 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
3163 uchar_t *inq89 = NULL;
3164 int inq89_len = 0x238;
3165 int reallen = 0;
3166 int rval = 0;
3167 struct sata_id *sid = NULL;
3168 char model[SATA_ID_MODEL_LEN + 1];
3169 char fw[SATA_ID_FW_LEN + 1];
3170 char *vid, *pid;
3171
3172 mutex_exit(&mpt->m_mutex);
3173 /*
3174 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
3175 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
3176 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
3177 */
3178 inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
3179 rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
3180 inq89, inq89_len, &reallen, 1);
3181
3182 if (rval != 0) {
3183 if (inq89 != NULL) {
3184 kmem_free(inq89, inq89_len);
3185 }
3186
3187 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
3188 "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
3189 return (DDI_SUCCESS);
3190 }
3191 sid = (void *)(&inq89[60]);
3192
3193 swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
3194 swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
3195
3196 model[SATA_ID_MODEL_LEN] = 0;
3197 fw[SATA_ID_FW_LEN] = 0;
3198
3199 sata_split_model(model, &vid, &pid);
3200
3201 /*
3202 * override SCSA "inquiry-*" properties
3203 */
3204 if (vid)
3205 (void) scsi_device_prop_update_inqstring(sd,
3206 INQUIRY_VENDOR_ID, vid, strlen(vid));
3207 if (pid)
3208 (void) scsi_device_prop_update_inqstring(sd,
3209 INQUIRY_PRODUCT_ID, pid, strlen(pid));
3210 (void) scsi_device_prop_update_inqstring(sd,
3211 INQUIRY_REVISION_ID, fw, strlen(fw));
3212
3213 if (inq89 != NULL) {
3214 kmem_free(inq89, inq89_len);
3215 }
3216 } else {
3217 mutex_exit(&mpt->m_mutex);
3218 }
3219
3220 return (DDI_SUCCESS);
3221 }
3222 /*
3223 * tran_tgt_free(9E) - target device instance deallocation
3224 */
3225 static void
3226 mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3227 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
3228 {
3229 #ifndef __lock_lint
3230 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran, sd))
3231 #endif
3232
3233 mptsas_tgt_private_t *tgt_private = hba_tran->tran_tgt_private;
3234
3235 if (tgt_private != NULL) {
3236 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
3237 hba_tran->tran_tgt_private = NULL;
3238 }
3239 }
3240
3241 /*
3242 * scsi_pkt handling
3243 *
3244 * Visible to the external world via the transport structure.
3245 */
3246
3247 /*
3248 * Notes:
3249 * - transport the command to the addressed SCSI target/lun device
3250 * - normal operation is to schedule the command to be transported,
3251 * and return TRAN_ACCEPT if this is successful.
3252 * - if NO_INTR, tran_start must poll device for command completion
3253 */
3254 static int
3255 mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
3256 {
3257 #ifndef __lock_lint
3258 _NOTE(ARGUNUSED(ap))
3259 #endif
3260 mptsas_t *mpt = PKT2MPT(pkt);
3261 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3262 int rval;
3263 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3264
3265 NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt));
3266 ASSERT(ptgt);
3267 if (ptgt == NULL)
3268 return (TRAN_FATAL_ERROR);
3269
3270 /*
3271 * prepare the pkt before taking mutex.
3272 */
3273 rval = mptsas_prepare_pkt(cmd);
3274 if (rval != TRAN_ACCEPT) {
3275 return (rval);
3276 }
3277
3278 /*
3279 * Send the command to target/lun, however your HBA requires it.
3280 * If busy, return TRAN_BUSY; if there's some other formatting error
3281 * in the packet, return TRAN_BADPKT; otherwise, fall through to the
3282 * return of TRAN_ACCEPT.
3283 *
3284 * Remember that access to shared resources, including the mptsas_t
3285 * data structure and the HBA hardware registers, must be protected
3286 * with mutexes, here and everywhere.
3287 *
3288 * Also remember that at interrupt time, you'll get an argument
3289 * to the interrupt handler which is a pointer to your mptsas_t
3290 * structure; you'll have to remember which commands are outstanding
3291 * and which scsi_pkt is the currently-running command so the
3292 * interrupt handler can refer to the pkt to set completion
3293 * status, call the target driver back through pkt_comp, etc.
3294 *
3295 * If the instance lock is held by other thread, don't spin to wait
3296 * for it. Instead, queue the cmd and next time when the instance lock
3297 * is not held, accept all the queued cmd. A extra tx_waitq is
3298 * introduced to protect the queue.
3299 *
3300 * The polled cmd will not be queud and accepted as usual.
3301 *
3302 * Under the tx_waitq mutex, record whether a thread is draining
3303 * the tx_waitq. An IO requesting thread that finds the instance
3304 * mutex contended appends to the tx_waitq and while holding the
3305 * tx_wait mutex, if the draining flag is not set, sets it and then
3306 * proceeds to spin for the instance mutex. This scheme ensures that
3307 * the last cmd in a burst be processed.
3308 *
3309 * we enable this feature only when the helper threads are enabled,
3310 * at which we think the loads are heavy.
3311 *
3312 * per instance mutex m_tx_waitq_mutex is introduced to protect the
3313 * m_tx_waitqtail, m_tx_waitq, m_tx_draining.
3314 */
3315
3316 if (mpt->m_doneq_thread_n) {
3317 if (mutex_tryenter(&mpt->m_mutex) != 0) {
3318 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3319 mutex_exit(&mpt->m_mutex);
3320 } else if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3321 mutex_enter(&mpt->m_mutex);
3322 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3323 mutex_exit(&mpt->m_mutex);
3324 } else {
3325 mutex_enter(&mpt->m_tx_waitq_mutex);
3326 /*
3327 * ptgt->m_dr_flag is protected by m_mutex or
3328 * m_tx_waitq_mutex. In this case, m_tx_waitq_mutex
3329 * is acquired.
3330 */
3331 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3332 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3333 /*
3334 * The command should be allowed to
3335 * retry by returning TRAN_BUSY to
3336 * to stall the I/O's which come from
3337 * scsi_vhci since the device/path is
3338 * in unstable state now.
3339 */
3340 mutex_exit(&mpt->m_tx_waitq_mutex);
3341 return (TRAN_BUSY);
3342 } else {
3343 /*
3344 * The device is offline, just fail the
3345 * command by returning
3346 * TRAN_FATAL_ERROR.
3347 */
3348 mutex_exit(&mpt->m_tx_waitq_mutex);
3349 return (TRAN_FATAL_ERROR);
3350 }
3351 }
3352 if (mpt->m_tx_draining) {
3353 cmd->cmd_flags |= CFLAG_TXQ;
3354 *mpt->m_tx_waitqtail = cmd;
3355 mpt->m_tx_waitqtail = &cmd->cmd_linkp;
3356 mutex_exit(&mpt->m_tx_waitq_mutex);
3357 } else { /* drain the queue */
3358 mpt->m_tx_draining = 1;
3359 mutex_exit(&mpt->m_tx_waitq_mutex);
3360 mutex_enter(&mpt->m_mutex);
3361 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3362 mutex_exit(&mpt->m_mutex);
3363 }
3364 }
3365 } else {
3366 mutex_enter(&mpt->m_mutex);
3367 /*
3368 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3369 * in this case, m_mutex is acquired.
3370 */
3371 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3372 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3373 /*
3374 * commands should be allowed to retry by
3375 * returning TRAN_BUSY to stall the I/O's
3376 * which come from scsi_vhci since the device/
3377 * path is in unstable state now.
3378 */
3379 mutex_exit(&mpt->m_mutex);
3380 return (TRAN_BUSY);
3381 } else {
3382 /*
3383 * The device is offline, just fail the
3384 * command by returning TRAN_FATAL_ERROR.
3385 */
3386 mutex_exit(&mpt->m_mutex);
3387 return (TRAN_FATAL_ERROR);
3388 }
3389 }
3390 rval = mptsas_accept_pkt(mpt, cmd);
3391 mutex_exit(&mpt->m_mutex);
3392 }
3393
3394 return (rval);
3395 }
3396
3397 /*
3398 * Accept all the queued cmds(if any) before accept the current one.
3399 */
3400 static int
3401 mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3402 {
3403 int rval;
3404 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3405
3406 ASSERT(mutex_owned(&mpt->m_mutex));
3407 /*
3408 * The call to mptsas_accept_tx_waitq() must always be performed
3409 * because that is where mpt->m_tx_draining is cleared.
3410 */
3411 mutex_enter(&mpt->m_tx_waitq_mutex);
3412 mptsas_accept_tx_waitq(mpt);
3413 mutex_exit(&mpt->m_tx_waitq_mutex);
3414 /*
3415 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3416 * in this case, m_mutex is acquired.
3417 */
3418 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3419 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3420 /*
3421 * The command should be allowed to retry by returning
3422 * TRAN_BUSY to stall the I/O's which come from
3423 * scsi_vhci since the device/path is in unstable state
3424 * now.
3425 */
3426 return (TRAN_BUSY);
3427 } else {
3428 /*
3429 * The device is offline, just fail the command by
3430 * return TRAN_FATAL_ERROR.
3431 */
3432 return (TRAN_FATAL_ERROR);
3433 }
3434 }
3435 rval = mptsas_accept_pkt(mpt, cmd);
3436
3437 return (rval);
3438 }
3439
3440 static int
3441 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3442 {
3443 int rval = TRAN_ACCEPT;
3444 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3445
3446 NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3447
3448 ASSERT(mutex_owned(&mpt->m_mutex));
3449
3450 if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3451 rval = mptsas_prepare_pkt(cmd);
3452 if (rval != TRAN_ACCEPT) {
3453 cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3454 return (rval);
3455 }
3456 }
3457
3458 /*
3459 * reset the throttle if we were draining
3460 */
3461 if ((ptgt->m_t_ncmds == 0) &&
3462 (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3463 NDBG23(("reset throttle"));
3464 ASSERT(ptgt->m_reset_delay == 0);
3465 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3466 }
3467
3468 /*
3469 * If HBA is being reset, the DevHandles are being re-initialized,
3470 * which means that they could be invalid even if the target is still
3471 * attached. Check if being reset and if DevHandle is being
3472 * re-initialized. If this is the case, return BUSY so the I/O can be
3473 * retried later.
3474 */
3475 if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3476 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
3477 if (cmd->cmd_flags & CFLAG_TXQ) {
3478 mptsas_doneq_add(mpt, cmd);
3479 mptsas_doneq_empty(mpt);
3480 return (rval);
3481 } else {
3482 return (TRAN_BUSY);
3483 }
3484 }
3485
3486 /*
3487 * If device handle has already been invalidated, just
3488 * fail the command. In theory, command from scsi_vhci
3489 * client is impossible send down command with invalid
3490 * devhdl since devhdl is set after path offline, target
3491 * driver is not suppose to select a offlined path.
3492 */
3493 if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3494 NDBG3(("rejecting command, it might because invalid devhdl "
3495 "request."));
3496 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3497 if (cmd->cmd_flags & CFLAG_TXQ) {
3498 mptsas_doneq_add(mpt, cmd);
3499 mptsas_doneq_empty(mpt);
3500 return (rval);
3501 } else {
3502 return (TRAN_FATAL_ERROR);
3503 }
3504 }
3505 /*
3506 * The first case is the normal case. mpt gets a command from the
3507 * target driver and starts it.
3508 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3509 * commands is m_max_requests - 2.
3510 */
3511 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
3512 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
3513 (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3514 (ptgt->m_reset_delay == 0) &&
3515 (ptgt->m_t_nwait == 0) &&
3516 ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3517 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3518 (void) mptsas_start_cmd(mpt, cmd);
3519 } else {
3520 mptsas_waitq_add(mpt, cmd);
3521 }
3522 } else {
3523 /*
3524 * Add this pkt to the work queue
3525 */
3526 mptsas_waitq_add(mpt, cmd);
3527
3528 if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3529 (void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3530
3531 /*
3532 * Only flush the doneq if this is not a TM
3533 * cmd. For TM cmds the flushing of the
3534 * doneq will be done in those routines.
3535 */
3536 if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
3537 mptsas_doneq_empty(mpt);
3538 }
3539 }
3540 }
3541 return (rval);
3542 }
3543
3544 int
3545 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3546 {
3547 mptsas_slots_t *slots = mpt->m_active;
3548 uint_t slot, start_rotor;
3549 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3550
3551 ASSERT(MUTEX_HELD(&mpt->m_mutex));
3552
3553 /*
3554 * Account for reserved TM request slot and reserved SMID of 0.
3555 */
3556 ASSERT(slots->m_n_normal == (mpt->m_max_requests - 2));
3557
3558 /*
3559 * Find the next available slot, beginning at m_rotor. If no slot is
3560 * available, we'll return FALSE to indicate that. This mechanism
3561 * considers only the normal slots, not the reserved slot 0 nor the
3562 * task management slot m_n_normal + 1. The rotor is left to point to
3563 * the normal slot after the one we select, unless we select the last
3564 * normal slot in which case it returns to slot 1.
3565 */
3566 start_rotor = slots->m_rotor;
3567 do {
3568 slot = slots->m_rotor++;
3569 if (slots->m_rotor > slots->m_n_normal)
3570 slots->m_rotor = 1;
3571
3572 if (slots->m_rotor == start_rotor)
3573 break;
3574 } while (slots->m_slot[slot] != NULL);
3575
3576 if (slots->m_slot[slot] != NULL)
3577 return (FALSE);
3578
3579 ASSERT(slot != 0 && slot <= slots->m_n_normal);
3580
3581 cmd->cmd_slot = slot;
3582 slots->m_slot[slot] = cmd;
3583 mpt->m_ncmds++;
3584
3585 /*
3586 * only increment per target ncmds if this is not a
3587 * command that has no target associated with it (i.e. a
3588 * event acknoledgment)
3589 */
3590 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3591 /*
3592 * Expiration time is set in mptsas_start_cmd
3593 */
3594 ptgt->m_t_ncmds++;
3595 cmd->cmd_active_expiration = 0;
3596 } else {
3597 /*
3598 * Initialize expiration time for passthrough commands,
3599 */
3600 cmd->cmd_active_expiration = gethrtime() +
3601 (hrtime_t)cmd->cmd_pkt->pkt_time * NANOSEC;
3602 }
3603 return (TRUE);
3604 }
3605
3606 /*
3607 * prepare the pkt:
3608 * the pkt may have been resubmitted or just reused so
3609 * initialize some fields and do some checks.
3610 */
3611 static int
3612 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3613 {
3614 struct scsi_pkt *pkt = CMD2PKT(cmd);
3615
3616 NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3617
3618 /*
3619 * Reinitialize some fields that need it; the packet may
3620 * have been resubmitted
3621 */
3622 pkt->pkt_reason = CMD_CMPLT;
3623 pkt->pkt_state = 0;
3624 pkt->pkt_statistics = 0;
3625 pkt->pkt_resid = 0;
3626 cmd->cmd_age = 0;
3627 cmd->cmd_pkt_flags = pkt->pkt_flags;
3628
3629 /*
3630 * zero status byte.
3631 */
3632 *(pkt->pkt_scbp) = 0;
3633
3634 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3635 pkt->pkt_resid = cmd->cmd_dmacount;
3636
3637 /*
3638 * consistent packets need to be sync'ed first
3639 * (only for data going out)
3640 */
3641 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
3642 (cmd->cmd_flags & CFLAG_DMASEND)) {
3643 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
3644 DDI_DMA_SYNC_FORDEV);
3645 }
3646 }
3647
3648 cmd->cmd_flags =
3649 (cmd->cmd_flags & ~(CFLAG_TRANFLAG)) |
3650 CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
3651
3652 return (TRAN_ACCEPT);
3653 }
3654
3655 /*
3656 * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3657 *
3658 * One of three possibilities:
3659 * - allocate scsi_pkt
3660 * - allocate scsi_pkt and DMA resources
3661 * - allocate DMA resources to an already-allocated pkt
3662 */
3663 static struct scsi_pkt *
3664 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3665 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3666 int (*callback)(), caddr_t arg)
3667 {
3668 mptsas_cmd_t *cmd, *new_cmd;
3669 mptsas_t *mpt = ADDR2MPT(ap);
3670 uint_t oldcookiec;
3671 mptsas_target_t *ptgt = NULL;
3672 int rval;
3673 mptsas_tgt_private_t *tgt_private;
3674 int kf;
3675
3676 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3677
3678 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3679 tran_tgt_private;
3680 ASSERT(tgt_private != NULL);
3681 if (tgt_private == NULL) {
3682 return (NULL);
3683 }
3684 ptgt = tgt_private->t_private;
3685 ASSERT(ptgt != NULL);
3686 if (ptgt == NULL)
3687 return (NULL);
3688 ap->a_target = ptgt->m_devhdl;
3689 ap->a_lun = tgt_private->t_lun;
3690
3691 ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3692 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3693 statuslen *= 100; tgtlen *= 4;
3694 #endif
3695 NDBG3(("mptsas_scsi_init_pkt:\n"
3696 "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3697 ap->a_target, (void *)pkt, (void *)bp,
3698 cmdlen, statuslen, tgtlen, flags));
3699
3700 /*
3701 * Allocate the new packet.
3702 */
3703 if (pkt == NULL) {
3704 ddi_dma_handle_t save_dma_handle;
3705
3706 cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3707 if (cmd == NULL)
3708 return (NULL);
3709
3710 save_dma_handle = cmd->cmd_dmahandle;
3711 bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3712 cmd->cmd_dmahandle = save_dma_handle;
3713
3714 pkt = (void *)((uchar_t *)cmd +
3715 sizeof (struct mptsas_cmd));
3716 pkt->pkt_ha_private = (opaque_t)cmd;
3717 pkt->pkt_address = *ap;
3718 pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3719 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3720 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3721 cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3722 cmd->cmd_cdblen = (uchar_t)cmdlen;
3723 cmd->cmd_scblen = statuslen;
3724 cmd->cmd_rqslen = SENSE_LENGTH;
3725 cmd->cmd_tgt_addr = ptgt;
3726
3727 if ((cmdlen > sizeof (cmd->cmd_cdb)) ||
3728 (tgtlen > PKT_PRIV_LEN) ||
3729 (statuslen > EXTCMDS_STATUS_SIZE)) {
3730 int failure;
3731
3732 /*
3733 * We are going to allocate external packet space which
3734 * might include the sense data buffer for DMA so we
3735 * need to increase the reference counter here. In a
3736 * case the HBA is in reset we just simply free the
3737 * allocated packet and bail out.
3738 */
3739 mutex_enter(&mpt->m_mutex);
3740 if (mpt->m_in_reset) {
3741 mutex_exit(&mpt->m_mutex);
3742
3743 cmd->cmd_flags = CFLAG_FREE;
3744 kmem_cache_free(mpt->m_kmem_cache, cmd);
3745 return (NULL);
3746 }
3747 mpt->m_extreq_sense_refcount++;
3748 ASSERT(mpt->m_extreq_sense_refcount > 0);
3749 mutex_exit(&mpt->m_mutex);
3750
3751 /*
3752 * if extern alloc fails, all will be
3753 * deallocated, including cmd
3754 */
3755 failure = mptsas_pkt_alloc_extern(mpt, cmd,
3756 cmdlen, tgtlen, statuslen, kf);
3757
3758 if (failure != 0 || cmd->cmd_extrqslen == 0) {
3759 /*
3760 * If the external packet space allocation
3761 * failed, or we didn't allocate the sense
3762 * data buffer for DMA we need to decrease the
3763 * reference counter.
3764 */
3765 mutex_enter(&mpt->m_mutex);
3766 ASSERT(mpt->m_extreq_sense_refcount > 0);
3767 mpt->m_extreq_sense_refcount--;
3768 if (mpt->m_extreq_sense_refcount == 0)
3769 cv_broadcast(
3770 &mpt->m_extreq_sense_refcount_cv);
3771 mutex_exit(&mpt->m_mutex);
3772
3773 if (failure != 0) {
3774 /*
3775 * if extern allocation fails, it will
3776 * deallocate the new pkt as well
3777 */
3778 return (NULL);
3779 }
3780 }
3781 }
3782 new_cmd = cmd;
3783
3784 } else {
3785 cmd = PKT2CMD(pkt);
3786 new_cmd = NULL;
3787 }
3788
3789
3790 /* grab cmd->cmd_cookiec here as oldcookiec */
3791
3792 oldcookiec = cmd->cmd_cookiec;
3793
3794 /*
3795 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3796 * greater than 0 and we'll need to grab the next dma window
3797 */
3798 /*
3799 * SLM-not doing extra command frame right now; may add later
3800 */
3801
3802 if (cmd->cmd_nwin > 0) {
3803
3804 /*
3805 * Make sure we havn't gone past the the total number
3806 * of windows
3807 */
3808 if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3809 return (NULL);
3810 }
3811 if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3812 &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
3813 &cmd->cmd_cookie, &cmd->cmd_cookiec) == DDI_FAILURE) {
3814 return (NULL);
3815 }
3816 goto get_dma_cookies;
3817 }
3818
3819
3820 if (flags & PKT_XARQ) {
3821 cmd->cmd_flags |= CFLAG_XARQ;
3822 }
3823
3824 /*
3825 * DMA resource allocation. This version assumes your
3826 * HBA has some sort of bus-mastering or onboard DMA capability, with a
3827 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
3828 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
3829 */
3830 if (bp && (bp->b_bcount != 0) &&
3831 (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
3832
3833 int cnt, dma_flags;
3834 mptti_t *dmap; /* ptr to the S/G list */
3835
3836 /*
3837 * Set up DMA memory and position to the next DMA segment.
3838 */
3839 ASSERT(cmd->cmd_dmahandle != NULL);
3840
3841 if (bp->b_flags & B_READ) {
3842 dma_flags = DDI_DMA_READ;
3843 cmd->cmd_flags &= ~CFLAG_DMASEND;
3844 } else {
3845 dma_flags = DDI_DMA_WRITE;
3846 cmd->cmd_flags |= CFLAG_DMASEND;
3847 }
3848 if (flags & PKT_CONSISTENT) {
3849 cmd->cmd_flags |= CFLAG_CMDIOPB;
3850 dma_flags |= DDI_DMA_CONSISTENT;
3851 }
3852
3853 if (flags & PKT_DMA_PARTIAL) {
3854 dma_flags |= DDI_DMA_PARTIAL;
3855 }
3856
3857 /*
3858 * workaround for byte hole issue on psycho and
3859 * schizo pre 2.1
3860 */
3861 if ((bp->b_flags & B_READ) && ((bp->b_flags &
3862 (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
3863 ((uintptr_t)bp->b_un.b_addr & 0x7)) {
3864 dma_flags |= DDI_DMA_CONSISTENT;
3865 }
3866
3867 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
3868 dma_flags, callback, arg,
3869 &cmd->cmd_cookie, &cmd->cmd_cookiec);
3870 if (rval == DDI_DMA_PARTIAL_MAP) {
3871 (void) ddi_dma_numwin(cmd->cmd_dmahandle,
3872 &cmd->cmd_nwin);
3873 cmd->cmd_winindex = 0;
3874 (void) ddi_dma_getwin(cmd->cmd_dmahandle,
3875 cmd->cmd_winindex, &cmd->cmd_dma_offset,
3876 &cmd->cmd_dma_len, &cmd->cmd_cookie,
3877 &cmd->cmd_cookiec);
3878 } else if (rval && (rval != DDI_DMA_MAPPED)) {
3879 switch (rval) {
3880 case DDI_DMA_NORESOURCES:
3881 bioerror(bp, 0);
3882 break;
3883 case DDI_DMA_BADATTR:
3884 case DDI_DMA_NOMAPPING:
3885 bioerror(bp, EFAULT);
3886 break;
3887 case DDI_DMA_TOOBIG:
3888 default:
3889 bioerror(bp, EINVAL);
3890 break;
3891 }
3892 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3893 if (new_cmd) {
3894 mptsas_scsi_destroy_pkt(ap, pkt);
3895 }
3896 return ((struct scsi_pkt *)NULL);
3897 }
3898
3899 get_dma_cookies:
3900 cmd->cmd_flags |= CFLAG_DMAVALID;
3901 ASSERT(cmd->cmd_cookiec > 0);
3902
3903 if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
3904 mptsas_log(mpt, CE_NOTE, "large cookiec received %d\n",
3905 cmd->cmd_cookiec);
3906 bioerror(bp, EINVAL);
3907 if (new_cmd) {
3908 mptsas_scsi_destroy_pkt(ap, pkt);
3909 }
3910 return ((struct scsi_pkt *)NULL);
3911 }
3912
3913 /*
3914 * Allocate extra SGL buffer if needed.
3915 */
3916 if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
3917 (cmd->cmd_extra_frames == NULL)) {
3918 if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
3919 DDI_FAILURE) {
3920 mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
3921 "failed");
3922 bioerror(bp, ENOMEM);
3923 if (new_cmd) {
3924 mptsas_scsi_destroy_pkt(ap, pkt);
3925 }
3926 return ((struct scsi_pkt *)NULL);
3927 }
3928 }
3929
3930 /*
3931 * Always use scatter-gather transfer
3932 * Use the loop below to store physical addresses of
3933 * DMA segments, from the DMA cookies, into your HBA's
3934 * scatter-gather list.
3935 * We need to ensure we have enough kmem alloc'd
3936 * for the sg entries since we are no longer using an
3937 * array inside mptsas_cmd_t.
3938 *
3939 * We check cmd->cmd_cookiec against oldcookiec so
3940 * the scatter-gather list is correctly allocated
3941 */
3942
3943 if (oldcookiec != cmd->cmd_cookiec) {
3944 if (cmd->cmd_sg != (mptti_t *)NULL) {
3945 kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
3946 oldcookiec);
3947 cmd->cmd_sg = NULL;
3948 }
3949 }
3950
3951 if (cmd->cmd_sg == (mptti_t *)NULL) {
3952 cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3953 cmd->cmd_cookiec), kf);
3954
3955 if (cmd->cmd_sg == (mptti_t *)NULL) {
3956 mptsas_log(mpt, CE_WARN,
3957 "unable to kmem_alloc enough memory "
3958 "for scatter/gather list");
3959 /*
3960 * if we have an ENOMEM condition we need to behave
3961 * the same way as the rest of this routine
3962 */
3963
3964 bioerror(bp, ENOMEM);
3965 if (new_cmd) {
3966 mptsas_scsi_destroy_pkt(ap, pkt);
3967 }
3968 return ((struct scsi_pkt *)NULL);
3969 }
3970 }
3971
3972 dmap = cmd->cmd_sg;
3973
3974 ASSERT(cmd->cmd_cookie.dmac_size != 0);
3975
3976 /*
3977 * store the first segment into the S/G list
3978 */
3979 dmap->count = cmd->cmd_cookie.dmac_size;
3980 dmap->addr.address64.Low = (uint32_t)
3981 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3982 dmap->addr.address64.High = (uint32_t)
3983 (cmd->cmd_cookie.dmac_laddress >> 32);
3984
3985 /*
3986 * dmacount counts the size of the dma for this window
3987 * (if partial dma is being used). totaldmacount
3988 * keeps track of the total amount of dma we have
3989 * transferred for all the windows (needed to calculate
3990 * the resid value below).
3991 */
3992 cmd->cmd_dmacount = cmd->cmd_cookie.dmac_size;
3993 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3994
3995 /*
3996 * We already stored the first DMA scatter gather segment,
3997 * start at 1 if we need to store more.
3998 */
3999 for (cnt = 1; cnt < cmd->cmd_cookiec; cnt++) {
4000 /*
4001 * Get next DMA cookie
4002 */
4003 ddi_dma_nextcookie(cmd->cmd_dmahandle,
4004 &cmd->cmd_cookie);
4005 dmap++;
4006
4007 cmd->cmd_dmacount += cmd->cmd_cookie.dmac_size;
4008 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
4009
4010 /*
4011 * store the segment parms into the S/G list
4012 */
4013 dmap->count = cmd->cmd_cookie.dmac_size;
4014 dmap->addr.address64.Low = (uint32_t)
4015 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
4016 dmap->addr.address64.High = (uint32_t)
4017 (cmd->cmd_cookie.dmac_laddress >> 32);
4018 }
4019
4020 /*
4021 * If this was partially allocated we set the resid
4022 * the amount of data NOT transferred in this window
4023 * If there is only one window, the resid will be 0
4024 */
4025 pkt->pkt_resid = (bp->b_bcount - cmd->cmd_totaldmacount);
4026 NDBG3(("mptsas_scsi_init_pkt: cmd_dmacount=%d.",
4027 cmd->cmd_dmacount));
4028 }
4029 return (pkt);
4030 }
4031
4032 /*
4033 * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
4034 *
4035 * Notes:
4036 * - also frees DMA resources if allocated
4037 * - implicit DMA synchonization
4038 */
4039 static void
4040 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4041 {
4042 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4043 mptsas_t *mpt = ADDR2MPT(ap);
4044
4045 NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
4046 ap->a_target, (void *)pkt));
4047
4048 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4049 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4050 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4051 }
4052
4053 if (cmd->cmd_sg) {
4054 kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
4055 cmd->cmd_sg = NULL;
4056 }
4057
4058 mptsas_free_extra_sgl_frame(mpt, cmd);
4059
4060 if ((cmd->cmd_flags &
4061 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
4062 CFLAG_SCBEXTERN)) == 0) {
4063 cmd->cmd_flags = CFLAG_FREE;
4064 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4065 } else {
4066 boolean_t extrqslen = cmd->cmd_extrqslen != 0;
4067
4068 mptsas_pkt_destroy_extern(mpt, cmd);
4069
4070 /*
4071 * If the packet had the sense data buffer for DMA allocated we
4072 * need to decrease the reference counter.
4073 */
4074 if (extrqslen) {
4075 mutex_enter(&mpt->m_mutex);
4076 ASSERT(mpt->m_extreq_sense_refcount > 0);
4077 mpt->m_extreq_sense_refcount--;
4078 if (mpt->m_extreq_sense_refcount == 0)
4079 cv_broadcast(&mpt->m_extreq_sense_refcount_cv);
4080 mutex_exit(&mpt->m_mutex);
4081 }
4082 }
4083 }
4084
4085 /*
4086 * kmem cache constructor and destructor:
4087 * When constructing, we bzero the cmd and allocate the dma handle
4088 * When destructing, just free the dma handle
4089 */
4090 static int
4091 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
4092 {
4093 mptsas_cmd_t *cmd = buf;
4094 mptsas_t *mpt = cdrarg;
4095 int (*callback)(caddr_t);
4096
4097 callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
4098
4099 NDBG4(("mptsas_kmem_cache_constructor"));
4100
4101 /*
4102 * allocate a dma handle
4103 */
4104 if ((ddi_dma_alloc_handle(mpt->m_dip, &mpt->m_io_dma_attr, callback,
4105 NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
4106 cmd->cmd_dmahandle = NULL;
4107 return (-1);
4108 }
4109 return (0);
4110 }
4111
4112 static void
4113 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
4114 {
4115 #ifndef __lock_lint
4116 _NOTE(ARGUNUSED(cdrarg))
4117 #endif
4118 mptsas_cmd_t *cmd = buf;
4119
4120 NDBG4(("mptsas_kmem_cache_destructor"));
4121
4122 if (cmd->cmd_dmahandle) {
4123 ddi_dma_free_handle(&cmd->cmd_dmahandle);
4124 cmd->cmd_dmahandle = NULL;
4125 }
4126 }
4127
4128 static int
4129 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
4130 {
4131 mptsas_cache_frames_t *p = buf;
4132 mptsas_t *mpt = cdrarg;
4133 ddi_dma_attr_t frame_dma_attr;
4134 size_t mem_size, alloc_len;
4135 ddi_dma_cookie_t cookie;
4136 uint_t ncookie;
4137 int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
4138 ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
4139
4140 frame_dma_attr = mpt->m_msg_dma_attr;
4141 frame_dma_attr.dma_attr_align = 0x10;
4142 frame_dma_attr.dma_attr_sgllen = 1;
4143
4144 if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
4145 &p->m_dma_hdl) != DDI_SUCCESS) {
4146 mptsas_log(mpt, CE_WARN, "Unable to allocate dma handle for"
4147 " extra SGL.");
4148 return (DDI_FAILURE);
4149 }
4150
4151 mem_size = (mpt->m_max_request_frames - 1) * mpt->m_req_frame_size;
4152
4153 if (ddi_dma_mem_alloc(p->m_dma_hdl, mem_size, &mpt->m_dev_acc_attr,
4154 DDI_DMA_CONSISTENT, callback, NULL, (caddr_t *)&p->m_frames_addr,
4155 &alloc_len, &p->m_acc_hdl) != DDI_SUCCESS) {
4156 ddi_dma_free_handle(&p->m_dma_hdl);
4157 p->m_dma_hdl = NULL;
4158 mptsas_log(mpt, CE_WARN, "Unable to allocate dma memory for"
4159 " extra SGL.");
4160 return (DDI_FAILURE);
4161 }
4162
4163 if (ddi_dma_addr_bind_handle(p->m_dma_hdl, NULL, p->m_frames_addr,
4164 alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
4165 &cookie, &ncookie) != DDI_DMA_MAPPED) {
4166 (void) ddi_dma_mem_free(&p->m_acc_hdl);
4167 ddi_dma_free_handle(&p->m_dma_hdl);
4168 p->m_dma_hdl = NULL;
4169 mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources for"
4170 " extra SGL");
4171 return (DDI_FAILURE);
4172 }
4173
4174 /*
4175 * Store the SGL memory address. This chip uses this
4176 * address to dma to and from the driver. The second
4177 * address is the address mpt uses to fill in the SGL.
4178 */
4179 p->m_phys_addr = cookie.dmac_laddress;
4180
4181 return (DDI_SUCCESS);
4182 }
4183
4184 static void
4185 mptsas_cache_frames_destructor(void *buf, void *cdrarg)
4186 {
4187 #ifndef __lock_lint
4188 _NOTE(ARGUNUSED(cdrarg))
4189 #endif
4190 mptsas_cache_frames_t *p = buf;
4191 if (p->m_dma_hdl != NULL) {
4192 (void) ddi_dma_unbind_handle(p->m_dma_hdl);
4193 (void) ddi_dma_mem_free(&p->m_acc_hdl);
4194 ddi_dma_free_handle(&p->m_dma_hdl);
4195 p->m_phys_addr = NULL;
4196 p->m_frames_addr = NULL;
4197 p->m_dma_hdl = NULL;
4198 p->m_acc_hdl = NULL;
4199 }
4200
4201 }
4202
4203 /*
4204 * Figure out if we need to use a different method for the request
4205 * sense buffer and allocate from the map if necessary.
4206 */
4207 static boolean_t
4208 mptsas_cmdarqsize(mptsas_t *mpt, mptsas_cmd_t *cmd, size_t senselength, int kf)
4209 {
4210 if (senselength > mpt->m_req_sense_size) {
4211 unsigned long i;
4212
4213 /* Sense length is limited to an 8 bit value in MPI Spec. */
4214 if (senselength > 255)
4215 senselength = 255;
4216 cmd->cmd_extrqschunks = (senselength +
4217 (mpt->m_req_sense_size - 1))/mpt->m_req_sense_size;
4218 i = (kf == KM_SLEEP ? rmalloc_wait : rmalloc)
4219 (mpt->m_erqsense_map, cmd->cmd_extrqschunks);
4220
4221 if (i == 0)
4222 return (B_FALSE);
4223
4224 cmd->cmd_extrqslen = (uint16_t)senselength;
4225 cmd->cmd_extrqsidx = i - 1;
4226 cmd->cmd_arq_buf = mpt->m_extreq_sense +
4227 (cmd->cmd_extrqsidx * mpt->m_req_sense_size);
4228 } else {
4229 cmd->cmd_rqslen = (uchar_t)senselength;
4230 }
4231
4232 return (B_TRUE);
4233 }
4234
4235 /*
4236 * allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
4237 * for non-standard length cdb, pkt_private, status areas
4238 * if allocation fails, then deallocate all external space and the pkt
4239 */
4240 /* ARGSUSED */
4241 static int
4242 mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
4243 int cmdlen, int tgtlen, int statuslen, int kf)
4244 {
4245 caddr_t cdbp, scbp, tgt;
4246
4247 NDBG3(("mptsas_pkt_alloc_extern: "
4248 "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
4249 (void *)cmd, cmdlen, tgtlen, statuslen, kf));
4250
4251 tgt = cdbp = scbp = NULL;
4252 cmd->cmd_scblen = statuslen;
4253 cmd->cmd_privlen = (uchar_t)tgtlen;
4254
4255 if (cmdlen > sizeof (cmd->cmd_cdb)) {
4256 if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
4257 goto fail;
4258 }
4259 cmd->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
4260 cmd->cmd_flags |= CFLAG_CDBEXTERN;
4261 }
4262 if (tgtlen > PKT_PRIV_LEN) {
4263 if ((tgt = kmem_zalloc((size_t)tgtlen, kf)) == NULL) {
4264 goto fail;
4265 }
4266 cmd->cmd_flags |= CFLAG_PRIVEXTERN;
4267 cmd->cmd_pkt->pkt_private = tgt;
4268 }
4269 if (statuslen > EXTCMDS_STATUS_SIZE) {
4270 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
4271 goto fail;
4272 }
4273 cmd->cmd_flags |= CFLAG_SCBEXTERN;
4274 cmd->cmd_pkt->pkt_scbp = (opaque_t)scbp;
4275
4276 /* allocate sense data buf for DMA */
4277 if (mptsas_cmdarqsize(mpt, cmd, statuslen -
4278 MPTSAS_GET_ITEM_OFF(struct scsi_arq_status, sts_sensedata),
4279 kf) == B_FALSE)
4280 goto fail;
4281 }
4282 return (0);
4283 fail:
4284 mptsas_pkt_destroy_extern(mpt, cmd);
4285 return (1);
4286 }
4287
4288 /*
4289 * deallocate external pkt space and deallocate the pkt
4290 */
4291 static void
4292 mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd)
4293 {
4294 NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd));
4295
4296 if (cmd->cmd_flags & CFLAG_FREE) {
4297 mptsas_log(mpt, CE_PANIC,
4298 "mptsas_pkt_destroy_extern: freeing free packet");
4299 _NOTE(NOT_REACHED)
4300 /* NOTREACHED */
4301 }
4302 if (cmd->cmd_extrqslen != 0) {
4303 rmfree(mpt->m_erqsense_map, cmd->cmd_extrqschunks,
4304 cmd->cmd_extrqsidx + 1);
4305 }
4306 if (cmd->cmd_flags & CFLAG_CDBEXTERN) {
4307 kmem_free(cmd->cmd_pkt->pkt_cdbp, (size_t)cmd->cmd_cdblen);
4308 }
4309 if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
4310 kmem_free(cmd->cmd_pkt->pkt_scbp, (size_t)cmd->cmd_scblen);
4311 }
4312 if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
4313 kmem_free(cmd->cmd_pkt->pkt_private, (size_t)cmd->cmd_privlen);
4314 }
4315 cmd->cmd_flags = CFLAG_FREE;
4316 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4317 }
4318
4319 /*
4320 * tran_sync_pkt(9E) - explicit DMA synchronization
4321 */
4322 /*ARGSUSED*/
4323 static void
4324 mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4325 {
4326 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4327
4328 NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
4329 ap->a_target, (void *)pkt));
4330
4331 if (cmd->cmd_dmahandle) {
4332 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4333 (cmd->cmd_flags & CFLAG_DMASEND) ?
4334 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
4335 }
4336 }
4337
4338 /*
4339 * tran_dmafree(9E) - deallocate DMA resources allocated for command
4340 */
4341 /*ARGSUSED*/
4342 static void
4343 mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
4344 {
4345 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4346 mptsas_t *mpt = ADDR2MPT(ap);
4347
4348 NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
4349 ap->a_target, (void *)pkt));
4350
4351 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4352 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4353 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4354 }
4355
4356 mptsas_free_extra_sgl_frame(mpt, cmd);
4357 }
4358
4359 static void
4360 mptsas_pkt_comp(struct scsi_pkt *pkt, mptsas_cmd_t *cmd)
4361 {
4362 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
4363 (!(cmd->cmd_flags & CFLAG_DMASEND))) {
4364 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4365 DDI_DMA_SYNC_FORCPU);
4366 }
4367 (*pkt->pkt_comp)(pkt);
4368 }
4369
4370 static void
4371 mptsas_sge_mainframe(mptsas_cmd_t *cmd, pMpi2SCSIIORequest_t frame,
4372 ddi_acc_handle_t acc_hdl, uint_t cookiec, uint32_t end_flags)
4373 {
4374 pMpi2SGESimple64_t sge;
4375 mptti_t *dmap;
4376 uint32_t flags;
4377
4378 dmap = cmd->cmd_sg;
4379
4380 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4381 while (cookiec--) {
4382 ddi_put32(acc_hdl,
4383 &sge->Address.Low, dmap->addr.address64.Low);
4384 ddi_put32(acc_hdl,
4385 &sge->Address.High, dmap->addr.address64.High);
4386 ddi_put32(acc_hdl, &sge->FlagsLength,
4387 dmap->count);
4388 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4389 flags |= ((uint32_t)
4390 (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4391 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4392 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4393 MPI2_SGE_FLAGS_SHIFT);
4394
4395 /*
4396 * If this is the last cookie, we set the flags
4397 * to indicate so
4398 */
4399 if (cookiec == 0) {
4400 flags |= end_flags;
4401 }
4402 if (cmd->cmd_flags & CFLAG_DMASEND) {
4403 flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4404 MPI2_SGE_FLAGS_SHIFT);
4405 } else {
4406 flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4407 MPI2_SGE_FLAGS_SHIFT);
4408 }
4409 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4410 dmap++;
4411 sge++;
4412 }
4413 }
4414
4415 static void
4416 mptsas_sge_chain(mptsas_t *mpt, mptsas_cmd_t *cmd,
4417 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4418 {
4419 pMpi2SGESimple64_t sge;
4420 pMpi2SGEChain64_t sgechain;
4421 uint64_t nframe_phys_addr;
4422 uint_t cookiec;
4423 mptti_t *dmap;
4424 uint32_t flags;
4425
4426 /*
4427 * Save the number of entries in the DMA
4428 * Scatter/Gather list
4429 */
4430 cookiec = cmd->cmd_cookiec;
4431
4432 /*
4433 * Hereby we start to deal with multiple frames.
4434 * The process is as follows:
4435 * 1. Determine how many frames are needed for SGL element
4436 * storage; Note that all frames are stored in contiguous
4437 * memory space and in 64-bit DMA mode each element is
4438 * 3 double-words (12 bytes) long.
4439 * 2. Fill up the main frame. We need to do this separately
4440 * since it contains the SCSI IO request header and needs
4441 * dedicated processing. Note that the last 4 double-words
4442 * of the SCSI IO header is for SGL element storage
4443 * (MPI2_SGE_IO_UNION).
4444 * 3. Fill the chain element in the main frame, so the DMA
4445 * engine can use the following frames.
4446 * 4. Enter a loop to fill the remaining frames. Note that the
4447 * last frame contains no chain element. The remaining
4448 * frames go into the mpt SGL buffer allocated on the fly,
4449 * not immediately following the main message frame, as in
4450 * Gen1.
4451 * Some restrictions:
4452 * 1. For 64-bit DMA, the simple element and chain element
4453 * are both of 3 double-words (12 bytes) in size, even
4454 * though all frames are stored in the first 4G of mem
4455 * range and the higher 32-bits of the address are always 0.
4456 * 2. On some controllers (like the 1064/1068), a frame can
4457 * hold SGL elements with the last 1 or 2 double-words
4458 * (4 or 8 bytes) un-used. On these controllers, we should
4459 * recognize that there's not enough room for another SGL
4460 * element and move the sge pointer to the next frame.
4461 */
4462 int i, j, k, l, frames, sgemax;
4463 int temp;
4464 uint8_t chainflags;
4465 uint16_t chainlength;
4466 mptsas_cache_frames_t *p;
4467
4468 /*
4469 * Sgemax is the number of SGE's that will fit
4470 * each extra frame and frames is total
4471 * number of frames we'll need. 1 sge entry per
4472 * frame is reseverd for the chain element thus the -1 below.
4473 */
4474 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64))
4475 - 1);
4476 temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4477
4478 /*
4479 * A little check to see if we need to round up the number
4480 * of frames we need
4481 */
4482 if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4483 sgemax) > 1) {
4484 frames = (temp + 1);
4485 } else {
4486 frames = temp;
4487 }
4488 dmap = cmd->cmd_sg;
4489 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4490
4491 /*
4492 * First fill in the main frame
4493 */
4494 j = MPTSAS_MAX_FRAME_SGES64(mpt) - 1;
4495 mptsas_sge_mainframe(cmd, frame, acc_hdl, j,
4496 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4497 MPI2_SGE_FLAGS_SHIFT));
4498 dmap += j;
4499 sge += j;
4500 j++;
4501
4502 /*
4503 * Fill in the chain element in the main frame.
4504 * About calculation on ChainOffset:
4505 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4506 * in the end reserved for SGL element storage
4507 * (MPI2_SGE_IO_UNION); we should count it in our
4508 * calculation. See its definition in the header file.
4509 * 2. Constant j is the counter of the current SGL element
4510 * that will be processed, and (j - 1) is the number of
4511 * SGL elements that have been processed (stored in the
4512 * main frame).
4513 * 3. ChainOffset value should be in units of double-words (4
4514 * bytes) so the last value should be divided by 4.
4515 */
4516 ddi_put8(acc_hdl, &frame->ChainOffset,
4517 (sizeof (MPI2_SCSI_IO_REQUEST) -
4518 sizeof (MPI2_SGE_IO_UNION) +
4519 (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4520 sgechain = (pMpi2SGEChain64_t)sge;
4521 chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4522 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4523 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4524 ddi_put8(acc_hdl, &sgechain->Flags, chainflags);
4525
4526 /*
4527 * The size of the next frame is the accurate size of space
4528 * (in bytes) used to store the SGL elements. j is the counter
4529 * of SGL elements. (j - 1) is the number of SGL elements that
4530 * have been processed (stored in frames).
4531 */
4532 if (frames >= 2) {
4533 ASSERT(mpt->m_req_frame_size >= sizeof (MPI2_SGE_SIMPLE64));
4534 chainlength = mpt->m_req_frame_size /
4535 sizeof (MPI2_SGE_SIMPLE64) *
4536 sizeof (MPI2_SGE_SIMPLE64);
4537 } else {
4538 chainlength = ((cookiec - (j - 1)) *
4539 sizeof (MPI2_SGE_SIMPLE64));
4540 }
4541
4542 p = cmd->cmd_extra_frames;
4543
4544 ddi_put16(acc_hdl, &sgechain->Length, chainlength);
4545 ddi_put32(acc_hdl, &sgechain->Address.Low, p->m_phys_addr);
4546 ddi_put32(acc_hdl, &sgechain->Address.High, p->m_phys_addr >> 32);
4547
4548 /*
4549 * If there are more than 2 frames left we have to
4550 * fill in the next chain offset to the location of
4551 * the chain element in the next frame.
4552 * sgemax is the number of simple elements in an extra
4553 * frame. Note that the value NextChainOffset should be
4554 * in double-words (4 bytes).
4555 */
4556 if (frames >= 2) {
4557 ddi_put8(acc_hdl, &sgechain->NextChainOffset,
4558 (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4559 } else {
4560 ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0);
4561 }
4562
4563 /*
4564 * Jump to next frame;
4565 * Starting here, chain buffers go into the per command SGL.
4566 * This buffer is allocated when chain buffers are needed.
4567 */
4568 sge = (pMpi2SGESimple64_t)p->m_frames_addr;
4569 i = cookiec;
4570
4571 /*
4572 * Start filling in frames with SGE's. If we
4573 * reach the end of frame and still have SGE's
4574 * to fill we need to add a chain element and
4575 * use another frame. j will be our counter
4576 * for what cookie we are at and i will be
4577 * the total cookiec. k is the current frame
4578 */
4579 for (k = 1; k <= frames; k++) {
4580 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4581
4582 /*
4583 * If we have reached the end of frame
4584 * and we have more SGE's to fill in
4585 * we have to fill the final entry
4586 * with a chain element and then
4587 * continue to the next frame
4588 */
4589 if ((l == (sgemax + 1)) && (k != frames)) {
4590 sgechain = (pMpi2SGEChain64_t)sge;
4591 j--;
4592 chainflags = (
4593 MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4594 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4595 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4596 ddi_put8(p->m_acc_hdl,
4597 &sgechain->Flags, chainflags);
4598 /*
4599 * k is the frame counter and (k + 1)
4600 * is the number of the next frame.
4601 * Note that frames are in contiguous
4602 * memory space.
4603 */
4604 nframe_phys_addr = p->m_phys_addr +
4605 (mpt->m_req_frame_size * k);
4606 ddi_put32(p->m_acc_hdl,
4607 &sgechain->Address.Low,
4608 nframe_phys_addr);
4609 ddi_put32(p->m_acc_hdl,
4610 &sgechain->Address.High,
4611 nframe_phys_addr >> 32);
4612
4613 /*
4614 * If there are more than 2 frames left
4615 * we have to next chain offset to
4616 * the location of the chain element
4617 * in the next frame and fill in the
4618 * length of the next chain
4619 */
4620 if ((frames - k) >= 2) {
4621 ddi_put8(p->m_acc_hdl,
4622 &sgechain->NextChainOffset,
4623 (sgemax *
4624 sizeof (MPI2_SGE_SIMPLE64))
4625 >> 2);
4626 ddi_put16(p->m_acc_hdl,
4627 &sgechain->Length,
4628 mpt->m_req_frame_size /
4629 sizeof (MPI2_SGE_SIMPLE64) *
4630 sizeof (MPI2_SGE_SIMPLE64));
4631 } else {
4632 /*
4633 * This is the last frame. Set
4634 * the NextChainOffset to 0 and
4635 * Length is the total size of
4636 * all remaining simple elements
4637 */
4638 ddi_put8(p->m_acc_hdl,
4639 &sgechain->NextChainOffset,
4640 0);
4641 ddi_put16(p->m_acc_hdl,
4642 &sgechain->Length,
4643 (cookiec - j) *
4644 sizeof (MPI2_SGE_SIMPLE64));
4645 }
4646
4647 /* Jump to the next frame */
4648 sge = (pMpi2SGESimple64_t)
4649 ((char *)p->m_frames_addr +
4650 (int)mpt->m_req_frame_size * k);
4651
4652 continue;
4653 }
4654
4655 ddi_put32(p->m_acc_hdl,
4656 &sge->Address.Low,
4657 dmap->addr.address64.Low);
4658 ddi_put32(p->m_acc_hdl,
4659 &sge->Address.High,
4660 dmap->addr.address64.High);
4661 ddi_put32(p->m_acc_hdl,
4662 &sge->FlagsLength, dmap->count);
4663 flags = ddi_get32(p->m_acc_hdl,
4664 &sge->FlagsLength);
4665 flags |= ((uint32_t)(
4666 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4667 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4668 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4669 MPI2_SGE_FLAGS_SHIFT);
4670
4671 /*
4672 * If we are at the end of the frame and
4673 * there is another frame to fill in
4674 * we set the last simple element as last
4675 * element
4676 */
4677 if ((l == sgemax) && (k != frames)) {
4678 flags |= ((uint32_t)
4679 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4680 MPI2_SGE_FLAGS_SHIFT);
4681 }
4682
4683 /*
4684 * If this is the final cookie we
4685 * indicate it by setting the flags
4686 */
4687 if (j == i) {
4688 flags |= ((uint32_t)
4689 (MPI2_SGE_FLAGS_LAST_ELEMENT |
4690 MPI2_SGE_FLAGS_END_OF_BUFFER |
4691 MPI2_SGE_FLAGS_END_OF_LIST) <<
4692 MPI2_SGE_FLAGS_SHIFT);
4693 }
4694 if (cmd->cmd_flags & CFLAG_DMASEND) {
4695 flags |=
4696 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4697 MPI2_SGE_FLAGS_SHIFT);
4698 } else {
4699 flags |=
4700 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4701 MPI2_SGE_FLAGS_SHIFT);
4702 }
4703 ddi_put32(p->m_acc_hdl,
4704 &sge->FlagsLength, flags);
4705 dmap++;
4706 sge++;
4707 }
4708 }
4709
4710 /*
4711 * Sync DMA with the chain buffers that were just created
4712 */
4713 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4714 }
4715
4716 static void
4717 mptsas_ieee_sge_mainframe(mptsas_cmd_t *cmd, pMpi2SCSIIORequest_t frame,
4718 ddi_acc_handle_t acc_hdl, uint_t cookiec, uint8_t end_flag)
4719 {
4720 pMpi2IeeeSgeSimple64_t ieeesge;
4721 mptti_t *dmap;
4722 uint8_t flags;
4723
4724 dmap = cmd->cmd_sg;
4725
4726 NDBG1(("mptsas_ieee_sge_mainframe: cookiec=%d, %s", cookiec,
4727 cmd->cmd_flags & CFLAG_DMASEND?"Out":"In"));
4728
4729 ieeesge = (pMpi2IeeeSgeSimple64_t)(&frame->SGL);
4730 while (cookiec--) {
4731 ddi_put32(acc_hdl,
4732 &ieeesge->Address.Low, dmap->addr.address64.Low);
4733 ddi_put32(acc_hdl,
4734 &ieeesge->Address.High, dmap->addr.address64.High);
4735 ddi_put32(acc_hdl, &ieeesge->Length,
4736 dmap->count);
4737 NDBG1(("mptsas_ieee_sge_mainframe: len=%d", dmap->count));
4738 flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
4739 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4740
4741 /*
4742 * If this is the last cookie, we set the flags
4743 * to indicate so
4744 */
4745 if (cookiec == 0) {
4746 flags |= end_flag;
4747 }
4748
4749 ddi_put8(acc_hdl, &ieeesge->Flags, flags);
4750 dmap++;
4751 ieeesge++;
4752 }
4753 }
4754
4755 static void
4756 mptsas_ieee_sge_chain(mptsas_t *mpt, mptsas_cmd_t *cmd,
4757 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4758 {
4759 pMpi2IeeeSgeSimple64_t ieeesge;
4760 pMpi25IeeeSgeChain64_t ieeesgechain;
4761 uint64_t nframe_phys_addr;
4762 uint_t cookiec;
4763 mptti_t *dmap;
4764 uint8_t flags;
4765
4766 /*
4767 * Save the number of entries in the DMA
4768 * Scatter/Gather list
4769 */
4770 cookiec = cmd->cmd_cookiec;
4771
4772 NDBG1(("mptsas_ieee_sge_chain: cookiec=%d", cookiec));
4773
4774 /*
4775 * Hereby we start to deal with multiple frames.
4776 * The process is as follows:
4777 * 1. Determine how many frames are needed for SGL element
4778 * storage; Note that all frames are stored in contiguous
4779 * memory space and in 64-bit DMA mode each element is
4780 * 4 double-words (16 bytes) long.
4781 * 2. Fill up the main frame. We need to do this separately
4782 * since it contains the SCSI IO request header and needs
4783 * dedicated processing. Note that the last 4 double-words
4784 * of the SCSI IO header is for SGL element storage
4785 * (MPI2_SGE_IO_UNION).
4786 * 3. Fill the chain element in the main frame, so the DMA
4787 * engine can use the following frames.
4788 * 4. Enter a loop to fill the remaining frames. Note that the
4789 * last frame contains no chain element. The remaining
4790 * frames go into the mpt SGL buffer allocated on the fly,
4791 * not immediately following the main message frame, as in
4792 * Gen1.
4793 * Restrictions:
4794 * For 64-bit DMA, the simple element and chain element
4795 * are both of 4 double-words (16 bytes) in size, even
4796 * though all frames are stored in the first 4G of mem
4797 * range and the higher 32-bits of the address are always 0.
4798 */
4799 int i, j, k, l, frames, sgemax;
4800 int temp;
4801 uint8_t chainflags;
4802 uint32_t chainlength;
4803 mptsas_cache_frames_t *p;
4804
4805 /*
4806 * Sgemax is the number of SGE's that will fit
4807 * each extra frame and frames is total
4808 * number of frames we'll need. 1 sge entry per
4809 * frame is reseverd for the chain element thus the -1 below.
4810 */
4811 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_IEEE_SGE_SIMPLE64))
4812 - 1);
4813 temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4814
4815 /*
4816 * A little check to see if we need to round up the number
4817 * of frames we need
4818 */
4819 if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4820 sgemax) > 1) {
4821 frames = (temp + 1);
4822 } else {
4823 frames = temp;
4824 }
4825 NDBG1(("mptsas_ieee_sge_chain: temp=%d, frames=%d", temp, frames));
4826 dmap = cmd->cmd_sg;
4827 ieeesge = (pMpi2IeeeSgeSimple64_t)(&frame->SGL);
4828
4829 /*
4830 * First fill in the main frame
4831 */
4832 j = MPTSAS_MAX_FRAME_SGES64(mpt) - 1;
4833 mptsas_ieee_sge_mainframe(cmd, frame, acc_hdl, j, 0);
4834 dmap += j;
4835 ieeesge += j;
4836 j++;
4837
4838 /*
4839 * Fill in the chain element in the main frame.
4840 * About calculation on ChainOffset:
4841 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4842 * in the end reserved for SGL element storage
4843 * (MPI2_SGE_IO_UNION); we should count it in our
4844 * calculation. See its definition in the header file.
4845 * 2. Constant j is the counter of the current SGL element
4846 * that will be processed, and (j - 1) is the number of
4847 * SGL elements that have been processed (stored in the
4848 * main frame).
4849 * 3. ChainOffset value should be in units of quad-words (16
4850 * bytes) so the last value should be divided by 16.
4851 */
4852 ddi_put8(acc_hdl, &frame->ChainOffset,
4853 (sizeof (MPI2_SCSI_IO_REQUEST) -
4854 sizeof (MPI2_SGE_IO_UNION) +
4855 (j - 1) * sizeof (MPI2_IEEE_SGE_SIMPLE64)) >> 4);
4856 ieeesgechain = (pMpi25IeeeSgeChain64_t)ieeesge;
4857 chainflags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
4858 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4859 ddi_put8(acc_hdl, &ieeesgechain->Flags, chainflags);
4860
4861 /*
4862 * The size of the next frame is the accurate size of space
4863 * (in bytes) used to store the SGL elements. j is the counter
4864 * of SGL elements. (j - 1) is the number of SGL elements that
4865 * have been processed (stored in frames).
4866 */
4867 if (frames >= 2) {
4868 ASSERT(mpt->m_req_frame_size >=
4869 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4870 chainlength = mpt->m_req_frame_size /
4871 sizeof (MPI2_IEEE_SGE_SIMPLE64) *
4872 sizeof (MPI2_IEEE_SGE_SIMPLE64);
4873 } else {
4874 chainlength = ((cookiec - (j - 1)) *
4875 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4876 }
4877
4878 p = cmd->cmd_extra_frames;
4879
4880 ddi_put32(acc_hdl, &ieeesgechain->Length, chainlength);
4881 ddi_put32(acc_hdl, &ieeesgechain->Address.Low, p->m_phys_addr);
4882 ddi_put32(acc_hdl, &ieeesgechain->Address.High, p->m_phys_addr >> 32);
4883
4884 /*
4885 * If there are more than 2 frames left we have to
4886 * fill in the next chain offset to the location of
4887 * the chain element in the next frame.
4888 * sgemax is the number of simple elements in an extra
4889 * frame. Note that the value NextChainOffset should be
4890 * in double-words (4 bytes).
4891 */
4892 if (frames >= 2) {
4893 ddi_put8(acc_hdl, &ieeesgechain->NextChainOffset,
4894 (sgemax * sizeof (MPI2_IEEE_SGE_SIMPLE64)) >> 4);
4895 } else {
4896 ddi_put8(acc_hdl, &ieeesgechain->NextChainOffset, 0);
4897 }
4898
4899 /*
4900 * Jump to next frame;
4901 * Starting here, chain buffers go into the per command SGL.
4902 * This buffer is allocated when chain buffers are needed.
4903 */
4904 ieeesge = (pMpi2IeeeSgeSimple64_t)p->m_frames_addr;
4905 i = cookiec;
4906
4907 /*
4908 * Start filling in frames with SGE's. If we
4909 * reach the end of frame and still have SGE's
4910 * to fill we need to add a chain element and
4911 * use another frame. j will be our counter
4912 * for what cookie we are at and i will be
4913 * the total cookiec. k is the current frame
4914 */
4915 for (k = 1; k <= frames; k++) {
4916 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4917
4918 /*
4919 * If we have reached the end of frame
4920 * and we have more SGE's to fill in
4921 * we have to fill the final entry
4922 * with a chain element and then
4923 * continue to the next frame
4924 */
4925 if ((l == (sgemax + 1)) && (k != frames)) {
4926 ieeesgechain = (pMpi25IeeeSgeChain64_t)ieeesge;
4927 j--;
4928 chainflags =
4929 MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
4930 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
4931 ddi_put8(p->m_acc_hdl,
4932 &ieeesgechain->Flags, chainflags);
4933 /*
4934 * k is the frame counter and (k + 1)
4935 * is the number of the next frame.
4936 * Note that frames are in contiguous
4937 * memory space.
4938 */
4939 nframe_phys_addr = p->m_phys_addr +
4940 (mpt->m_req_frame_size * k);
4941 ddi_put32(p->m_acc_hdl,
4942 &ieeesgechain->Address.Low,
4943 nframe_phys_addr);
4944 ddi_put32(p->m_acc_hdl,
4945 &ieeesgechain->Address.High,
4946 nframe_phys_addr >> 32);
4947
4948 /*
4949 * If there are more than 2 frames left
4950 * we have to next chain offset to
4951 * the location of the chain element
4952 * in the next frame and fill in the
4953 * length of the next chain
4954 */
4955 if ((frames - k) >= 2) {
4956 ddi_put8(p->m_acc_hdl,
4957 &ieeesgechain->NextChainOffset,
4958 (sgemax *
4959 sizeof (MPI2_IEEE_SGE_SIMPLE64))
4960 >> 4);
4961 ASSERT(mpt->m_req_frame_size >=
4962 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4963 ddi_put32(p->m_acc_hdl,
4964 &ieeesgechain->Length,
4965 mpt->m_req_frame_size /
4966 sizeof (MPI2_IEEE_SGE_SIMPLE64) *
4967 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4968 } else {
4969 /*
4970 * This is the last frame. Set
4971 * the NextChainOffset to 0 and
4972 * Length is the total size of
4973 * all remaining simple elements
4974 */
4975 ddi_put8(p->m_acc_hdl,
4976 &ieeesgechain->NextChainOffset,
4977 0);
4978 ddi_put32(p->m_acc_hdl,
4979 &ieeesgechain->Length,
4980 (cookiec - j) *
4981 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4982 }
4983
4984 /* Jump to the next frame */
4985 ieeesge = (pMpi2IeeeSgeSimple64_t)
4986 ((char *)p->m_frames_addr +
4987 (int)mpt->m_req_frame_size * k);
4988
4989 continue;
4990 }
4991
4992 ddi_put32(p->m_acc_hdl,
4993 &ieeesge->Address.Low,
4994 dmap->addr.address64.Low);
4995 ddi_put32(p->m_acc_hdl,
4996 &ieeesge->Address.High,
4997 dmap->addr.address64.High);
4998 ddi_put32(p->m_acc_hdl,
4999 &ieeesge->Length, dmap->count);
5000 flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
5001 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
5002
5003 /*
5004 * If we are at the end of the frame and
5005 * there is another frame to fill in
5006 * do we need to do anything?
5007 * if ((l == sgemax) && (k != frames)) {
5008 * }
5009 */
5010
5011 /*
5012 * If this is the final cookie set end of list.
5013 */
5014 if (j == i) {
5015 flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
5016 }
5017
5018 ddi_put8(p->m_acc_hdl, &ieeesge->Flags, flags);
5019 dmap++;
5020 ieeesge++;
5021 }
5022 }
5023
5024 /*
5025 * Sync DMA with the chain buffers that were just created
5026 */
5027 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
5028 }
5029
5030 static void
5031 mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control,
5032 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
5033 {
5034 ASSERT(cmd->cmd_flags & CFLAG_DMAVALID);
5035
5036 NDBG1(("mptsas_sge_setup: cookiec=%d", cmd->cmd_cookiec));
5037
5038 /*
5039 * Set read/write bit in control.
5040 */
5041 if (cmd->cmd_flags & CFLAG_DMASEND) {
5042 *control |= MPI2_SCSIIO_CONTROL_WRITE;
5043 } else {
5044 *control |= MPI2_SCSIIO_CONTROL_READ;
5045 }
5046
5047 ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount);
5048
5049 /*
5050 * We have 4 cases here. First where we can fit all the
5051 * SG elements into the main frame, and the case
5052 * where we can't. The SG element is also different when using
5053 * MPI2.5 interface.
5054 * If we have more cookies than we can attach to a frame
5055 * we will need to use a chain element to point
5056 * a location of memory where the rest of the S/G
5057 * elements reside.
5058 */
5059 if (cmd->cmd_cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) {
5060 if (mpt->m_MPI25) {
5061 mptsas_ieee_sge_mainframe(cmd, frame, acc_hdl,
5062 cmd->cmd_cookiec,
5063 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
5064 } else {
5065 mptsas_sge_mainframe(cmd, frame, acc_hdl,
5066 cmd->cmd_cookiec,
5067 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
5068 | MPI2_SGE_FLAGS_END_OF_BUFFER
5069 | MPI2_SGE_FLAGS_END_OF_LIST) <<
5070 MPI2_SGE_FLAGS_SHIFT));
5071 }
5072 } else {
5073 if (mpt->m_MPI25) {
5074 mptsas_ieee_sge_chain(mpt, cmd, frame, acc_hdl);
5075 } else {
5076 mptsas_sge_chain(mpt, cmd, frame, acc_hdl);
5077 }
5078 }
5079 }
5080
5081 /*
5082 * Interrupt handling
5083 * Utility routine. Poll for status of a command sent to HBA
5084 * without interrupts (a FLAG_NOINTR command).
5085 */
5086 int
5087 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
5088 {
5089 int rval = TRUE;
5090
5091 NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
5092
5093 if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
5094 mptsas_restart_hba(mpt);
5095 }
5096
5097 /*
5098 * Wait, using drv_usecwait(), long enough for the command to
5099 * reasonably return from the target if the target isn't
5100 * "dead". A polled command may well be sent from scsi_poll, and
5101 * there are retries built in to scsi_poll if the transport
5102 * accepted the packet (TRAN_ACCEPT). scsi_poll waits 1 second
5103 * and retries the transport up to scsi_poll_busycnt times
5104 * (currently 60) if
5105 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
5106 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
5107 *
5108 * limit the waiting to avoid a hang in the event that the
5109 * cmd never gets started but we are still receiving interrupts
5110 */
5111 while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
5112 if (mptsas_wait_intr(mpt, polltime) == FALSE) {
5113 NDBG5(("mptsas_poll: command incomplete"));
5114 rval = FALSE;
5115 break;
5116 }
5117 }
5118
5119 if (rval == FALSE) {
5120
5121 /*
5122 * this isn't supposed to happen, the hba must be wedged
5123 * Mark this cmd as a timeout.
5124 */
5125 mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
5126 (STAT_TIMEOUT|STAT_ABORTED));
5127
5128 if (poll_cmd->cmd_queued == FALSE) {
5129
5130 NDBG5(("mptsas_poll: not on waitq"));
5131
5132 poll_cmd->cmd_pkt->pkt_state |=
5133 (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
5134 } else {
5135
5136 /* find and remove it from the waitq */
5137 NDBG5(("mptsas_poll: delete from waitq"));
5138 mptsas_waitq_delete(mpt, poll_cmd);
5139 }
5140
5141 }
5142 mptsas_fma_check(mpt, poll_cmd);
5143 NDBG5(("mptsas_poll: done"));
5144 return (rval);
5145 }
5146
5147 /*
5148 * Used for polling cmds and TM function
5149 */
5150 static int
5151 mptsas_wait_intr(mptsas_t *mpt, int polltime)
5152 {
5153 int cnt;
5154 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
5155 uint32_t int_mask;
5156
5157 NDBG5(("mptsas_wait_intr"));
5158
5159 mpt->m_polled_intr = 1;
5160
5161 /*
5162 * Get the current interrupt mask and disable interrupts. When
5163 * re-enabling ints, set mask to saved value.
5164 */
5165 int_mask = ddi_get32(mpt->m_datap, &mpt->m_reg->HostInterruptMask);
5166 MPTSAS_DISABLE_INTR(mpt);
5167
5168 /*
5169 * Keep polling for at least (polltime * 1000) seconds
5170 */
5171 for (cnt = 0; cnt < polltime; cnt++) {
5172 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5173 DDI_DMA_SYNC_FORCPU);
5174
5175 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5176 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5177
5178 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5179 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5180 ddi_get32(mpt->m_acc_post_queue_hdl,
5181 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5182 drv_usecwait(1000);
5183 continue;
5184 }
5185
5186 /*
5187 * The reply is valid, process it according to its
5188 * type.
5189 */
5190 mptsas_process_intr(mpt, reply_desc_union);
5191
5192 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5193 mpt->m_post_index = 0;
5194 }
5195
5196 /*
5197 * Update the global reply index
5198 */
5199 ddi_put32(mpt->m_datap,
5200 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5201 mpt->m_polled_intr = 0;
5202
5203 /*
5204 * Re-enable interrupts and quit.
5205 */
5206 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask,
5207 int_mask);
5208 return (TRUE);
5209
5210 }
5211
5212 /*
5213 * Clear polling flag, re-enable interrupts and quit.
5214 */
5215 mpt->m_polled_intr = 0;
5216 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask);
5217 return (FALSE);
5218 }
5219
5220 static void
5221 mptsas_handle_scsi_io_success(mptsas_t *mpt,
5222 pMpi2ReplyDescriptorsUnion_t reply_desc)
5223 {
5224 pMpi2SCSIIOSuccessReplyDescriptor_t scsi_io_success;
5225 uint16_t SMID;
5226 mptsas_slots_t *slots = mpt->m_active;
5227 mptsas_cmd_t *cmd = NULL;
5228 struct scsi_pkt *pkt;
5229
5230 ASSERT(mutex_owned(&mpt->m_mutex));
5231
5232 scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
5233 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &scsi_io_success->SMID);
5234
5235 /*
5236 * This is a success reply so just complete the IO. First, do a sanity
5237 * check on the SMID. The final slot is used for TM requests, which
5238 * would not come into this reply handler.
5239 */
5240 if ((SMID == 0) || (SMID > slots->m_n_normal)) {
5241 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
5242 SMID);
5243 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5244 return;
5245 }
5246
5247 cmd = slots->m_slot[SMID];
5248
5249 /*
5250 * print warning and return if the slot is empty
5251 */
5252 if (cmd == NULL) {
5253 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
5254 "in slot %d", SMID);
5255 return;
5256 }
5257
5258 pkt = CMD2PKT(cmd);
5259 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
5260 STATE_GOT_STATUS);
5261 if (cmd->cmd_flags & CFLAG_DMAVALID) {
5262 pkt->pkt_state |= STATE_XFERRED_DATA;
5263 }
5264 pkt->pkt_resid = 0;
5265
5266 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
5267 cmd->cmd_flags |= CFLAG_FINISHED;
5268 cv_broadcast(&mpt->m_passthru_cv);
5269 return;
5270 } else {
5271 mptsas_remove_cmd(mpt, cmd);
5272 }
5273
5274 if (cmd->cmd_flags & CFLAG_RETRY) {
5275 /*
5276 * The target returned QFULL or busy, do not add tihs
5277 * pkt to the doneq since the hba will retry
5278 * this cmd.
5279 *
5280 * The pkt has already been resubmitted in
5281 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5282 * Remove this cmd_flag here.
5283 */
5284 cmd->cmd_flags &= ~CFLAG_RETRY;
5285 } else {
5286 mptsas_doneq_add(mpt, cmd);
5287 }
5288 }
5289
5290 static void
5291 mptsas_handle_address_reply(mptsas_t *mpt,
5292 pMpi2ReplyDescriptorsUnion_t reply_desc)
5293 {
5294 pMpi2AddressReplyDescriptor_t address_reply;
5295 pMPI2DefaultReply_t reply;
5296 mptsas_fw_diagnostic_buffer_t *pBuffer;
5297 uint32_t reply_addr, reply_frame_dma_baseaddr;
5298 uint16_t SMID, iocstatus;
5299 mptsas_slots_t *slots = mpt->m_active;
5300 mptsas_cmd_t *cmd = NULL;
5301 uint8_t function, buffer_type;
5302 m_replyh_arg_t *args;
5303 int reply_frame_no;
5304
5305 ASSERT(mutex_owned(&mpt->m_mutex));
5306
5307 address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
5308 reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
5309 &address_reply->ReplyFrameAddress);
5310 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID);
5311
5312 /*
5313 * If reply frame is not in the proper range we should ignore this
5314 * message and exit the interrupt handler.
5315 */
5316 reply_frame_dma_baseaddr = mpt->m_reply_frame_dma_addr & 0xffffffffu;
5317 if ((reply_addr < reply_frame_dma_baseaddr) ||
5318 (reply_addr >= (reply_frame_dma_baseaddr +
5319 (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
5320 ((reply_addr - reply_frame_dma_baseaddr) %
5321 mpt->m_reply_frame_size != 0)) {
5322 mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
5323 "address 0x%x\n", reply_addr);
5324 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5325 return;
5326 }
5327
5328 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
5329 DDI_DMA_SYNC_FORCPU);
5330 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
5331 reply_frame_dma_baseaddr));
5332 function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
5333
5334 NDBG31(("mptsas_handle_address_reply: function 0x%x, reply_addr=0x%x",
5335 function, reply_addr));
5336
5337 /*
5338 * don't get slot information and command for events since these values
5339 * don't exist
5340 */
5341 if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) &&
5342 (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) {
5343 /*
5344 * This could be a TM reply, which use the last allocated SMID,
5345 * so allow for that.
5346 */
5347 if ((SMID == 0) || (SMID > (slots->m_n_normal + 1))) {
5348 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of "
5349 "%d\n", SMID);
5350 ddi_fm_service_impact(mpt->m_dip,
5351 DDI_SERVICE_UNAFFECTED);
5352 return;
5353 }
5354
5355 cmd = slots->m_slot[SMID];
5356
5357 /*
5358 * print warning and return if the slot is empty
5359 */
5360 if (cmd == NULL) {
5361 mptsas_log(mpt, CE_WARN, "?NULL command for address "
5362 "reply in slot %d", SMID);
5363 return;
5364 }
5365 if ((cmd->cmd_flags &
5366 (CFLAG_PASSTHRU | CFLAG_CONFIG | CFLAG_FW_DIAG))) {
5367 cmd->cmd_rfm = reply_addr;
5368 cmd->cmd_flags |= CFLAG_FINISHED;
5369 cv_broadcast(&mpt->m_passthru_cv);
5370 cv_broadcast(&mpt->m_config_cv);
5371 cv_broadcast(&mpt->m_fw_diag_cv);
5372 return;
5373 } else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
5374 mptsas_remove_cmd(mpt, cmd);
5375 }
5376 NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
5377 }
5378 /*
5379 * Depending on the function, we need to handle
5380 * the reply frame (and cmd) differently.
5381 */
5382 switch (function) {
5383 case MPI2_FUNCTION_SCSI_IO_REQUEST:
5384 mptsas_check_scsi_io_error(mpt, (pMpi2SCSIIOReply_t)reply, cmd);
5385 break;
5386 case MPI2_FUNCTION_SCSI_TASK_MGMT:
5387 cmd->cmd_rfm = reply_addr;
5388 mptsas_check_task_mgt(mpt, (pMpi2SCSIManagementReply_t)reply,
5389 cmd);
5390 break;
5391 case MPI2_FUNCTION_FW_DOWNLOAD:
5392 cmd->cmd_flags |= CFLAG_FINISHED;
5393 cv_signal(&mpt->m_fw_cv);
5394 break;
5395 case MPI2_FUNCTION_EVENT_NOTIFICATION:
5396 reply_frame_no = (reply_addr - reply_frame_dma_baseaddr) /
5397 mpt->m_reply_frame_size;
5398 args = &mpt->m_replyh_args[reply_frame_no];
5399 args->mpt = (void *)mpt;
5400 args->rfm = reply_addr;
5401
5402 /*
5403 * Record the event if its type is enabled in
5404 * this mpt instance by ioctl.
5405 */
5406 mptsas_record_event(args);
5407
5408 /*
5409 * Handle time critical events
5410 * NOT_RESPONDING/ADDED only now
5411 */
5412 if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
5413 /*
5414 * Would not return main process,
5415 * just let taskq resolve ack action
5416 * and ack would be sent in taskq thread
5417 */
5418 NDBG20(("send mptsas_handle_event_sync success"));
5419 }
5420
5421 if (mpt->m_in_reset) {
5422 NDBG20(("dropping event received during reset"));
5423 return;
5424 }
5425
5426 if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
5427 (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
5428 mptsas_log(mpt, CE_WARN, "No memory available"
5429 "for dispatch taskq");
5430 /*
5431 * Return the reply frame to the free queue.
5432 */
5433 ddi_put32(mpt->m_acc_free_queue_hdl,
5434 &((uint32_t *)(void *)
5435 mpt->m_free_queue)[mpt->m_free_index], reply_addr);
5436 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5437 DDI_DMA_SYNC_FORDEV);
5438 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5439 mpt->m_free_index = 0;
5440 }
5441
5442 ddi_put32(mpt->m_datap,
5443 &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
5444 }
5445 return;
5446 case MPI2_FUNCTION_DIAG_BUFFER_POST:
5447 /*
5448 * If SMID is 0, this implies that the reply is due to a
5449 * release function with a status that the buffer has been
5450 * released. Set the buffer flags accordingly.
5451 */
5452 if (SMID == 0) {
5453 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
5454 &reply->IOCStatus);
5455 buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl,
5456 &(((pMpi2DiagBufferPostReply_t)reply)->BufferType));
5457 if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) {
5458 pBuffer =
5459 &mpt->m_fw_diag_buffer_list[buffer_type];
5460 pBuffer->valid_data = TRUE;
5461 pBuffer->owned_by_firmware = FALSE;
5462 pBuffer->immediate = FALSE;
5463 }
5464 } else {
5465 /*
5466 * Normal handling of diag post reply with SMID.
5467 */
5468 cmd = slots->m_slot[SMID];
5469
5470 /*
5471 * print warning and return if the slot is empty
5472 */
5473 if (cmd == NULL) {
5474 mptsas_log(mpt, CE_WARN, "?NULL command for "
5475 "address reply in slot %d", SMID);
5476 return;
5477 }
5478 cmd->cmd_rfm = reply_addr;
5479 cmd->cmd_flags |= CFLAG_FINISHED;
5480 cv_broadcast(&mpt->m_fw_diag_cv);
5481 }
5482 return;
5483 default:
5484 mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
5485 break;
5486 }
5487
5488 /*
5489 * Return the reply frame to the free queue.
5490 */
5491 ddi_put32(mpt->m_acc_free_queue_hdl,
5492 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
5493 reply_addr);
5494 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5495 DDI_DMA_SYNC_FORDEV);
5496 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5497 mpt->m_free_index = 0;
5498 }
5499 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
5500 mpt->m_free_index);
5501
5502 if (cmd->cmd_flags & CFLAG_FW_CMD)
5503 return;
5504
5505 if (cmd->cmd_flags & CFLAG_RETRY) {
5506 /*
5507 * The target returned QFULL or busy, do not add this
5508 * pkt to the doneq since the hba will retry
5509 * this cmd.
5510 *
5511 * The pkt has already been resubmitted in
5512 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5513 * Remove this cmd_flag here.
5514 */
5515 cmd->cmd_flags &= ~CFLAG_RETRY;
5516 } else {
5517 mptsas_doneq_add(mpt, cmd);
5518 }
5519 }
5520
5521 #ifdef MPTSAS_DEBUG
5522 static uint8_t mptsas_last_sense[256];
5523 #endif
5524
5525 static void
5526 mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
5527 mptsas_cmd_t *cmd)
5528 {
5529 uint8_t scsi_status, scsi_state;
5530 uint16_t ioc_status, cmd_rqs_len;
5531 uint32_t xferred, sensecount, responsedata, loginfo = 0;
5532 struct scsi_pkt *pkt;
5533 struct scsi_arq_status *arqstat;
5534 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5535 uint8_t *sensedata = NULL;
5536 uint64_t sas_wwn;
5537 uint8_t phy;
5538 char wwn_str[MPTSAS_WWN_STRLEN];
5539
5540 scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
5541 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5542 scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
5543 xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
5544 sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
5545 responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl,
5546 &reply->ResponseInfo);
5547
5548 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
5549 sas_wwn = ptgt->m_addr.mta_wwn;
5550 phy = ptgt->m_phynum;
5551 if (sas_wwn == 0) {
5552 (void) sprintf(wwn_str, "p%x", phy);
5553 } else {
5554 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
5555 }
5556 loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
5557 &reply->IOCLogInfo);
5558 mptsas_log(mpt, CE_NOTE,
5559 "?Log info 0x%x received for target %d %s.\n"
5560 "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5561 loginfo, Tgt(cmd), wwn_str, scsi_status, ioc_status,
5562 scsi_state);
5563 }
5564
5565 NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5566 scsi_status, ioc_status, scsi_state));
5567
5568 pkt = CMD2PKT(cmd);
5569 *(pkt->pkt_scbp) = scsi_status;
5570
5571 if (loginfo == 0x31170000) {
5572 /*
5573 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
5574 * 0x31170000 comes, that means the device missing delay
5575 * is in progressing, the command need retry later.
5576 */
5577 *(pkt->pkt_scbp) = STATUS_BUSY;
5578 return;
5579 }
5580
5581 if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
5582 ((ioc_status & MPI2_IOCSTATUS_MASK) ==
5583 MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
5584 pkt->pkt_reason = CMD_INCOMPLETE;
5585 pkt->pkt_state |= STATE_GOT_BUS;
5586 if (ptgt->m_reset_delay == 0) {
5587 mptsas_set_throttle(mpt, ptgt,
5588 DRAIN_THROTTLE);
5589 }
5590 return;
5591 }
5592
5593 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5594 responsedata &= 0x000000FF;
5595 if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
5596 mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n");
5597 pkt->pkt_reason = CMD_TLR_OFF;
5598 return;
5599 }
5600 }
5601
5602
5603 switch (scsi_status) {
5604 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5605 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5606 arqstat = (void*)(pkt->pkt_scbp);
5607 arqstat->sts_rqpkt_status = *((struct scsi_status *)
5608 (pkt->pkt_scbp));
5609 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
5610 STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
5611 if (cmd->cmd_flags & CFLAG_XARQ) {
5612 pkt->pkt_state |= STATE_XARQ_DONE;
5613 }
5614 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5615 pkt->pkt_state |= STATE_XFERRED_DATA;
5616 }
5617 arqstat->sts_rqpkt_reason = pkt->pkt_reason;
5618 arqstat->sts_rqpkt_state = pkt->pkt_state;
5619 arqstat->sts_rqpkt_state |= STATE_XFERRED_DATA;
5620 arqstat->sts_rqpkt_statistics = pkt->pkt_statistics;
5621 sensedata = (uint8_t *)&arqstat->sts_sensedata;
5622 cmd_rqs_len = cmd->cmd_extrqslen ?
5623 cmd->cmd_extrqslen : cmd->cmd_rqslen;
5624 (void) ddi_dma_sync(mpt->m_dma_req_sense_hdl, 0, 0,
5625 DDI_DMA_SYNC_FORKERNEL);
5626 #ifdef MPTSAS_DEBUG
5627 bcopy(cmd->cmd_arq_buf, mptsas_last_sense,
5628 ((cmd_rqs_len >= sizeof (mptsas_last_sense)) ?
5629 sizeof (mptsas_last_sense):cmd_rqs_len));
5630 #endif
5631 bcopy((uchar_t *)cmd->cmd_arq_buf, sensedata,
5632 ((cmd_rqs_len >= sensecount) ? sensecount :
5633 cmd_rqs_len));
5634 arqstat->sts_rqpkt_resid = (cmd_rqs_len - sensecount);
5635 cmd->cmd_flags |= CFLAG_CMDARQ;
5636 /*
5637 * Set proper status for pkt if autosense was valid
5638 */
5639 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5640 struct scsi_status zero_status = { 0 };
5641 arqstat->sts_rqpkt_status = zero_status;
5642 }
5643
5644 /*
5645 * ASC=0x47 is parity error
5646 * ASC=0x48 is initiator detected error received
5647 */
5648 if ((scsi_sense_key(sensedata) == KEY_ABORTED_COMMAND) &&
5649 ((scsi_sense_asc(sensedata) == 0x47) ||
5650 (scsi_sense_asc(sensedata) == 0x48))) {
5651 mptsas_log(mpt, CE_NOTE, "Aborted_command!");
5652 }
5653
5654 /*
5655 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5656 * ASC/ASCQ=0x25/0x00 means invalid lun
5657 */
5658 if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
5659 (scsi_sense_asc(sensedata) == 0x3F) &&
5660 (scsi_sense_ascq(sensedata) == 0x0E)) ||
5661 ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
5662 (scsi_sense_asc(sensedata) == 0x25) &&
5663 (scsi_sense_ascq(sensedata) == 0x00))) {
5664 mptsas_topo_change_list_t *topo_node = NULL;
5665
5666 topo_node = kmem_zalloc(
5667 sizeof (mptsas_topo_change_list_t),
5668 KM_NOSLEEP);
5669 if (topo_node == NULL) {
5670 mptsas_log(mpt, CE_NOTE, "No memory"
5671 "resource for handle SAS dynamic"
5672 "reconfigure.\n");
5673 break;
5674 }
5675 topo_node->mpt = mpt;
5676 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
5677 topo_node->un.phymask = ptgt->m_addr.mta_phymask;
5678 topo_node->devhdl = ptgt->m_devhdl;
5679 topo_node->object = (void *)ptgt;
5680 topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5681
5682 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5683 mptsas_handle_dr,
5684 (void *)topo_node,
5685 DDI_NOSLEEP)) != DDI_SUCCESS) {
5686 kmem_free(topo_node,
5687 sizeof (mptsas_topo_change_list_t));
5688 mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5689 "for handle SAS dynamic reconfigure"
5690 "failed. \n");
5691 }
5692 }
5693 break;
5694 case MPI2_SCSI_STATUS_GOOD:
5695 switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5696 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5697 pkt->pkt_reason = CMD_DEV_GONE;
5698 pkt->pkt_state |= STATE_GOT_BUS;
5699 if (ptgt->m_reset_delay == 0) {
5700 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5701 }
5702 NDBG31(("lost disk for target%d, command:%x",
5703 Tgt(cmd), pkt->pkt_cdbp[0]));
5704 break;
5705 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5706 NDBG31(("data overrun: xferred=%d", xferred));
5707 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5708 pkt->pkt_reason = CMD_DATA_OVR;
5709 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5710 | STATE_SENT_CMD | STATE_GOT_STATUS
5711 | STATE_XFERRED_DATA);
5712 pkt->pkt_resid = 0;
5713 break;
5714 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5715 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5716 NDBG31(("data underrun: xferred=%d", xferred));
5717 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5718 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5719 | STATE_SENT_CMD | STATE_GOT_STATUS);
5720 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5721 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5722 pkt->pkt_state |= STATE_XFERRED_DATA;
5723 }
5724 break;
5725 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5726 if (cmd->cmd_active_expiration <= gethrtime()) {
5727 /*
5728 * When timeout requested, propagate
5729 * proper reason and statistics to
5730 * target drivers.
5731 */
5732 mptsas_set_pkt_reason(mpt, cmd, CMD_TIMEOUT,
5733 STAT_BUS_RESET | STAT_TIMEOUT);
5734 } else {
5735 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
5736 STAT_BUS_RESET);
5737 }
5738 break;
5739 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5740 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5741 mptsas_set_pkt_reason(mpt,
5742 cmd, CMD_RESET, STAT_DEV_RESET);
5743 break;
5744 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5745 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5746 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5747 mptsas_set_pkt_reason(mpt,
5748 cmd, CMD_TERMINATED, STAT_TERMINATED);
5749 break;
5750 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5751 case MPI2_IOCSTATUS_BUSY:
5752 /*
5753 * set throttles to drain
5754 */
5755 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
5756 ptgt = refhash_next(mpt->m_targets, ptgt)) {
5757 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5758 }
5759
5760 /*
5761 * retry command
5762 */
5763 cmd->cmd_flags |= CFLAG_RETRY;
5764 cmd->cmd_pkt_flags |= FLAG_HEAD;
5765
5766 (void) mptsas_accept_pkt(mpt, cmd);
5767 break;
5768 default:
5769 mptsas_log(mpt, CE_WARN,
5770 "unknown ioc_status = %x\n", ioc_status);
5771 mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5772 "count = %x, scsi_status = %x", scsi_state,
5773 xferred, scsi_status);
5774 break;
5775 }
5776 break;
5777 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5778 mptsas_handle_qfull(mpt, cmd);
5779 break;
5780 case MPI2_SCSI_STATUS_BUSY:
5781 NDBG31(("scsi_status busy received"));
5782 break;
5783 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5784 NDBG31(("scsi_status reservation conflict received"));
5785 break;
5786 default:
5787 mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x\n",
5788 scsi_status, ioc_status);
5789 mptsas_log(mpt, CE_WARN,
5790 "mptsas_process_intr: invalid scsi status\n");
5791 break;
5792 }
5793 }
5794
5795 static void
5796 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5797 mptsas_cmd_t *cmd)
5798 {
5799 uint8_t task_type;
5800 uint16_t ioc_status;
5801 uint32_t log_info;
5802 uint16_t dev_handle;
5803 struct scsi_pkt *pkt = CMD2PKT(cmd);
5804
5805 task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5806 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5807 log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5808 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5809
5810 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5811 mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5812 "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
5813 task_type, ioc_status, log_info, dev_handle);
5814 pkt->pkt_reason = CMD_INCOMPLETE;
5815 return;
5816 }
5817
5818 switch (task_type) {
5819 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5820 case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5821 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5822 case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5823 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5824 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5825 break;
5826 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5827 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5828 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5829 /*
5830 * Check for invalid DevHandle of 0 in case application
5831 * sends bad command. DevHandle of 0 could cause problems.
5832 */
5833 if (dev_handle == 0) {
5834 mptsas_log(mpt, CE_WARN, "!Can't flush target with"
5835 " DevHandle of 0.");
5836 } else {
5837 mptsas_flush_target(mpt, dev_handle, Lun(cmd),
5838 task_type);
5839 }
5840 break;
5841 default:
5842 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5843 task_type);
5844 mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5845 break;
5846 }
5847 }
5848
5849 static void
5850 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5851 {
5852 mptsas_t *mpt = arg->mpt;
5853 uint64_t t = arg->t;
5854 mptsas_cmd_t *cmd;
5855 struct scsi_pkt *pkt;
5856 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
5857
5858 mutex_enter(&item->mutex);
5859 while (item->flag & MPTSAS_DONEQ_THREAD_ACTIVE) {
5860 if (!item->doneq) {
5861 cv_wait(&item->cv, &item->mutex);
5862 }
5863 pkt = NULL;
5864 if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5865 cmd->cmd_flags |= CFLAG_COMPLETED;
5866 pkt = CMD2PKT(cmd);
5867 }
5868 mutex_exit(&item->mutex);
5869 if (pkt) {
5870 mptsas_pkt_comp(pkt, cmd);
5871 }
5872 mutex_enter(&item->mutex);
5873 }
5874 mutex_exit(&item->mutex);
5875 mutex_enter(&mpt->m_doneq_mutex);
5876 mpt->m_doneq_thread_n--;
5877 cv_broadcast(&mpt->m_doneq_thread_cv);
5878 mutex_exit(&mpt->m_doneq_mutex);
5879 }
5880
5881
5882 /*
5883 * mpt interrupt handler.
5884 */
5885 static uint_t
5886 mptsas_intr(caddr_t arg1, caddr_t arg2)
5887 {
5888 mptsas_t *mpt = (void *)arg1;
5889 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
5890 uchar_t did_reply = FALSE;
5891
5892 NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5893
5894 mutex_enter(&mpt->m_mutex);
5895
5896 /*
5897 * If interrupts are shared by two channels then check whether this
5898 * interrupt is genuinely for this channel by making sure first the
5899 * chip is in high power state.
5900 */
5901 if ((mpt->m_options & MPTSAS_OPT_PM) &&
5902 (mpt->m_power_level != PM_LEVEL_D0)) {
5903 mutex_exit(&mpt->m_mutex);
5904 return (DDI_INTR_UNCLAIMED);
5905 }
5906
5907 /*
5908 * If polling, interrupt was triggered by some shared interrupt because
5909 * IOC interrupts are disabled during polling, so polling routine will
5910 * handle any replies. Considering this, if polling is happening,
5911 * return with interrupt unclaimed.
5912 */
5913 if (mpt->m_polled_intr) {
5914 mutex_exit(&mpt->m_mutex);
5915 mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt");
5916 return (DDI_INTR_UNCLAIMED);
5917 }
5918
5919 /*
5920 * Read the istat register.
5921 */
5922 if ((INTPENDING(mpt)) != 0) {
5923 /*
5924 * read fifo until empty.
5925 */
5926 #ifndef __lock_lint
5927 _NOTE(CONSTCOND)
5928 #endif
5929 while (TRUE) {
5930 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5931 DDI_DMA_SYNC_FORCPU);
5932 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5933 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5934
5935 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5936 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5937 ddi_get32(mpt->m_acc_post_queue_hdl,
5938 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5939 break;
5940 }
5941
5942 /*
5943 * The reply is valid, process it according to its
5944 * type. Also, set a flag for updating the reply index
5945 * after they've all been processed.
5946 */
5947 did_reply = TRUE;
5948
5949 mptsas_process_intr(mpt, reply_desc_union);
5950
5951 /*
5952 * Increment post index and roll over if needed.
5953 */
5954 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5955 mpt->m_post_index = 0;
5956 }
5957 }
5958
5959 /*
5960 * Update the global reply index if at least one reply was
5961 * processed.
5962 */
5963 if (did_reply) {
5964 ddi_put32(mpt->m_datap,
5965 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5966 }
5967 } else {
5968 mutex_exit(&mpt->m_mutex);
5969 return (DDI_INTR_UNCLAIMED);
5970 }
5971 NDBG1(("mptsas_intr complete"));
5972
5973 /*
5974 * If no helper threads are created, process the doneq in ISR. If
5975 * helpers are created, use the doneq length as a metric to measure the
5976 * load on the interrupt CPU. If it is long enough, which indicates the
5977 * load is heavy, then we deliver the IO completions to the helpers.
5978 * This measurement has some limitations, although it is simple and
5979 * straightforward and works well for most of the cases at present.
5980 */
5981 if (!mpt->m_doneq_thread_n ||
5982 (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)) {
5983 mptsas_doneq_empty(mpt);
5984 } else {
5985 mptsas_deliver_doneq_thread(mpt);
5986 }
5987
5988 /*
5989 * If there are queued cmd, start them now.
5990 */
5991 if (mpt->m_waitq != NULL) {
5992 mptsas_restart_waitq(mpt);
5993 }
5994
5995 mutex_exit(&mpt->m_mutex);
5996 return (DDI_INTR_CLAIMED);
5997 }
5998
5999 static void
6000 mptsas_process_intr(mptsas_t *mpt,
6001 pMpi2ReplyDescriptorsUnion_t reply_desc_union)
6002 {
6003 uint8_t reply_type;
6004
6005 ASSERT(mutex_owned(&mpt->m_mutex));
6006
6007 /*
6008 * The reply is valid, process it according to its
6009 * type. Also, set a flag for updated the reply index
6010 * after they've all been processed.
6011 */
6012 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
6013 &reply_desc_union->Default.ReplyFlags);
6014 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
6015 if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
6016 reply_type == MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS) {
6017 mptsas_handle_scsi_io_success(mpt, reply_desc_union);
6018 } else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
6019 mptsas_handle_address_reply(mpt, reply_desc_union);
6020 } else {
6021 mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
6022 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
6023 }
6024
6025 /*
6026 * Clear the reply descriptor for re-use and increment
6027 * index.
6028 */
6029 ddi_put64(mpt->m_acc_post_queue_hdl,
6030 &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
6031 0xFFFFFFFFFFFFFFFF);
6032 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
6033 DDI_DMA_SYNC_FORDEV);
6034 }
6035
6036 /*
6037 * handle qfull condition
6038 */
6039 static void
6040 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
6041 {
6042 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
6043
6044 if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
6045 (ptgt->m_qfull_retries == 0)) {
6046 /*
6047 * We have exhausted the retries on QFULL, or,
6048 * the target driver has indicated that it
6049 * wants to handle QFULL itself by setting
6050 * qfull-retries capability to 0. In either case
6051 * we want the target driver's QFULL handling
6052 * to kick in. We do this by having pkt_reason
6053 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
6054 */
6055 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
6056 } else {
6057 if (ptgt->m_reset_delay == 0) {
6058 ptgt->m_t_throttle =
6059 max((ptgt->m_t_ncmds - 2), 0);
6060 }
6061
6062 cmd->cmd_pkt_flags |= FLAG_HEAD;
6063 cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
6064 cmd->cmd_flags |= CFLAG_RETRY;
6065
6066 (void) mptsas_accept_pkt(mpt, cmd);
6067
6068 /*
6069 * when target gives queue full status with no commands
6070 * outstanding (m_t_ncmds == 0), throttle is set to 0
6071 * (HOLD_THROTTLE), and the queue full handling start
6072 * (see psarc/1994/313); if there are commands outstanding,
6073 * throttle is set to (m_t_ncmds - 2)
6074 */
6075 if (ptgt->m_t_throttle == HOLD_THROTTLE) {
6076 /*
6077 * By setting throttle to QFULL_THROTTLE, we
6078 * avoid submitting new commands and in
6079 * mptsas_restart_cmd find out slots which need
6080 * their throttles to be cleared.
6081 */
6082 mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
6083 if (mpt->m_restart_cmd_timeid == 0) {
6084 mpt->m_restart_cmd_timeid =
6085 timeout(mptsas_restart_cmd, mpt,
6086 ptgt->m_qfull_retry_interval);
6087 }
6088 }
6089 }
6090 }
6091
6092 mptsas_phymask_t
6093 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
6094 {
6095 mptsas_phymask_t phy_mask = 0;
6096 uint8_t i = 0;
6097
6098 NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
6099
6100 ASSERT(mutex_owned(&mpt->m_mutex));
6101
6102 /*
6103 * If physport is 0xFF, this is a RAID volume. Use phymask of 0.
6104 */
6105 if (physport == 0xFF) {
6106 return (0);
6107 }
6108
6109 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
6110 if (mpt->m_phy_info[i].attached_devhdl &&
6111 (mpt->m_phy_info[i].phy_mask != 0) &&
6112 (mpt->m_phy_info[i].port_num == physport)) {
6113 phy_mask = mpt->m_phy_info[i].phy_mask;
6114 break;
6115 }
6116 }
6117 NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
6118 mpt->m_instance, physport, phy_mask));
6119 return (phy_mask);
6120 }
6121
6122 /*
6123 * mpt free device handle after device gone, by use of passthrough
6124 */
6125 static int
6126 mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl)
6127 {
6128 Mpi2SasIoUnitControlRequest_t req;
6129 Mpi2SasIoUnitControlReply_t rep;
6130 int ret;
6131
6132 ASSERT(mutex_owned(&mpt->m_mutex));
6133
6134 /*
6135 * Need to compose a SAS IO Unit Control request message
6136 * and call mptsas_do_passthru() function
6137 */
6138 bzero(&req, sizeof (req));
6139 bzero(&rep, sizeof (rep));
6140
6141 req.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
6142 req.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
6143 req.DevHandle = LE_16(devhdl);
6144
6145 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
6146 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
6147 if (ret != 0) {
6148 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
6149 "Control error %d", ret);
6150 return (DDI_FAILURE);
6151 }
6152
6153 /* do passthrough success, check the ioc status */
6154 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
6155 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
6156 "Control IOCStatus %d", LE_16(rep.IOCStatus));
6157 return (DDI_FAILURE);
6158 }
6159
6160 return (DDI_SUCCESS);
6161 }
6162
6163 /*
6164 * We have a SATA target that has changed, which means the "bridge-port"
6165 * property must be updated to reflect the SAS WWN of the new attachment point.
6166 * This may change if a SATA device changes which bay, and therefore phy, it is
6167 * plugged into. This SATA device may be a multipath virtual device or may be a
6168 * physical device. We have to handle both cases.
6169 */
6170 static boolean_t
6171 mptsas_update_sata_bridge(mptsas_t *mpt, dev_info_t *parent,
6172 mptsas_target_t *ptgt)
6173 {
6174 int rval;
6175 uint16_t dev_hdl;
6176 uint16_t pdev_hdl;
6177 uint64_t dev_sas_wwn;
6178 uint8_t physport;
6179 uint8_t phy_id;
6180 uint32_t page_address;
6181 uint16_t bay_num, enclosure, io_flags;
6182 uint32_t dev_info;
6183 char uabuf[SCSI_WWN_BUFLEN];
6184 dev_info_t *dip;
6185 mdi_pathinfo_t *pip;
6186
6187 mutex_enter(&mpt->m_mutex);
6188 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
6189 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)ptgt->m_devhdl;
6190 rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
6191 &dev_sas_wwn, &dev_info, &physport, &phy_id, &pdev_hdl, &bay_num,
6192 &enclosure, &io_flags);
6193 mutex_exit(&mpt->m_mutex);
6194 if (rval != DDI_SUCCESS) {
6195 mptsas_log(mpt, CE_WARN, "unable to get SAS page 0 for "
6196 "handle %d", page_address);
6197 return (B_FALSE);
6198 }
6199
6200 if (scsi_wwn_to_wwnstr(dev_sas_wwn, 1, uabuf) == NULL) {
6201 mptsas_log(mpt, CE_WARN,
6202 "mptsas unable to format SATA bridge WWN");
6203 return (B_FALSE);
6204 }
6205
6206 if (mpt->m_mpxio_enable == TRUE && (pip = mptsas_find_path_addr(parent,
6207 ptgt->m_addr.mta_wwn, 0)) != NULL) {
6208 if (mdi_prop_update_string(pip, SCSI_ADDR_PROP_BRIDGE_PORT,
6209 uabuf) != DDI_SUCCESS) {
6210 mptsas_log(mpt, CE_WARN,
6211 "mptsas unable to create SCSI bridge port "
6212 "property for SATA device");
6213 return (B_FALSE);
6214 }
6215 return (B_TRUE);
6216 }
6217
6218 if ((dip = mptsas_find_child_addr(parent, ptgt->m_addr.mta_wwn,
6219 0)) != NULL) {
6220 if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
6221 SCSI_ADDR_PROP_BRIDGE_PORT, uabuf) != DDI_PROP_SUCCESS) {
6222 mptsas_log(mpt, CE_WARN,
6223 "mptsas unable to create SCSI bridge port "
6224 "property for SATA device");
6225 return (B_FALSE);
6226 }
6227 return (B_TRUE);
6228 }
6229
6230 mptsas_log(mpt, CE_WARN, "mptsas failed to find dev_info_t or "
6231 "mdi_pathinfo_t for target with WWN %016" PRIx64,
6232 ptgt->m_addr.mta_wwn);
6233
6234 return (B_FALSE);
6235 }
6236
6237 static void
6238 mptsas_update_phymask(mptsas_t *mpt)
6239 {
6240 mptsas_phymask_t mask = 0, phy_mask;
6241 char *phy_mask_name;
6242 uint8_t current_port;
6243 int i, j;
6244
6245 NDBG20(("mptsas%d update phymask ", mpt->m_instance));
6246
6247 ASSERT(mutex_owned(&mpt->m_mutex));
6248
6249 (void) mptsas_get_sas_io_unit_page(mpt);
6250
6251 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6252
6253 for (i = 0; i < mpt->m_num_phys; i++) {
6254 phy_mask = 0x00;
6255
6256 if (mpt->m_phy_info[i].attached_devhdl == 0)
6257 continue;
6258
6259 bzero(phy_mask_name, sizeof (phy_mask_name));
6260
6261 current_port = mpt->m_phy_info[i].port_num;
6262
6263 if ((mask & (1 << i)) != 0)
6264 continue;
6265
6266 for (j = 0; j < mpt->m_num_phys; j++) {
6267 if (mpt->m_phy_info[j].attached_devhdl &&
6268 (mpt->m_phy_info[j].port_num == current_port)) {
6269 phy_mask |= (1 << j);
6270 }
6271 }
6272 mask = mask | phy_mask;
6273
6274 for (j = 0; j < mpt->m_num_phys; j++) {
6275 if ((phy_mask >> j) & 0x01) {
6276 mpt->m_phy_info[j].phy_mask = phy_mask;
6277 }
6278 }
6279
6280 (void) sprintf(phy_mask_name, "%x", phy_mask);
6281
6282 mutex_exit(&mpt->m_mutex);
6283 /*
6284 * register a iport, if the port has already been existed
6285 * SCSA will do nothing and just return.
6286 */
6287 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
6288 mutex_enter(&mpt->m_mutex);
6289 }
6290 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6291 NDBG20(("mptsas%d update phymask return", mpt->m_instance));
6292 }
6293
6294 /*
6295 * mptsas_handle_dr is a task handler for DR, the DR action includes:
6296 * 1. Directly attched Device Added/Removed.
6297 * 2. Expander Device Added/Removed.
6298 * 3. Indirectly Attached Device Added/Expander.
6299 * 4. LUNs of a existing device status change.
6300 * 5. RAID volume created/deleted.
6301 * 6. Member of RAID volume is released because of RAID deletion.
6302 * 7. Physical disks are removed because of RAID creation.
6303 */
6304 static void
6305 mptsas_handle_dr(void *args)
6306 {
6307 mptsas_topo_change_list_t *topo_node = NULL;
6308 mptsas_topo_change_list_t *save_node = NULL;
6309 mptsas_t *mpt;
6310 dev_info_t *parent = NULL;
6311 mptsas_phymask_t phymask = 0;
6312 char *phy_mask_name;
6313 uint8_t flags = 0, physport = 0xff;
6314 uint8_t port_update = 0;
6315 uint_t event;
6316
6317 topo_node = (mptsas_topo_change_list_t *)args;
6318
6319 mpt = topo_node->mpt;
6320 event = topo_node->event;
6321 flags = topo_node->flags;
6322
6323 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6324
6325 NDBG20(("mptsas%d handle_dr enter", mpt->m_instance));
6326
6327 switch (event) {
6328 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6329 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6330 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE) ||
6331 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6332 /*
6333 * Direct attached or expander attached device added
6334 * into system or a Phys Disk that is being unhidden.
6335 */
6336 port_update = 1;
6337 }
6338 break;
6339 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6340 /*
6341 * New expander added into system, it must be the head
6342 * of topo_change_list_t
6343 */
6344 port_update = 1;
6345 break;
6346 default:
6347 port_update = 0;
6348 break;
6349 }
6350 /*
6351 * All cases port_update == 1 may cause initiator port form change
6352 */
6353 mutex_enter(&mpt->m_mutex);
6354 if (mpt->m_port_chng && port_update) {
6355 /*
6356 * mpt->m_port_chng flag indicates some PHYs of initiator
6357 * port have changed to online. So when expander added or
6358 * directly attached device online event come, we force to
6359 * update port information by issueing SAS IO Unit Page and
6360 * update PHYMASKs.
6361 */
6362 (void) mptsas_update_phymask(mpt);
6363 mpt->m_port_chng = 0;
6364
6365 }
6366 mutex_exit(&mpt->m_mutex);
6367 while (topo_node) {
6368 phymask = 0;
6369 if (parent == NULL) {
6370 physport = topo_node->un.physport;
6371 event = topo_node->event;
6372 flags = topo_node->flags;
6373 if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET |
6374 MPTSAS_DR_EVENT_OFFLINE_SMP)) {
6375 /*
6376 * For all offline events, phymask is known
6377 */
6378 phymask = topo_node->un.phymask;
6379 goto find_parent;
6380 }
6381 if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6382 goto handle_topo_change;
6383 }
6384 if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) {
6385 phymask = topo_node->un.phymask;
6386 goto find_parent;
6387 }
6388
6389 if ((flags ==
6390 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) &&
6391 (event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) {
6392 /*
6393 * There is no any field in IR_CONFIG_CHANGE
6394 * event indicate physport/phynum, let's get
6395 * parent after SAS Device Page0 request.
6396 */
6397 goto handle_topo_change;
6398 }
6399
6400 mutex_enter(&mpt->m_mutex);
6401 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6402 /*
6403 * If the direct attached device added or a
6404 * phys disk is being unhidden, argument
6405 * physport actually is PHY#, so we have to get
6406 * phymask according PHY#.
6407 */
6408 physport = mpt->m_phy_info[physport].port_num;
6409 }
6410
6411 /*
6412 * Translate physport to phymask so that we can search
6413 * parent dip.
6414 */
6415 phymask = mptsas_physport_to_phymask(mpt,
6416 physport);
6417 mutex_exit(&mpt->m_mutex);
6418
6419 find_parent:
6420 bzero(phy_mask_name, MPTSAS_MAX_PHYS);
6421 /*
6422 * For RAID topology change node, write the iport name
6423 * as v0.
6424 */
6425 if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6426 (void) sprintf(phy_mask_name, "v0");
6427 } else {
6428 /*
6429 * phymask can bo 0 if the drive has been
6430 * pulled by the time an add event is
6431 * processed. If phymask is 0, just skip this
6432 * event and continue.
6433 */
6434 if (phymask == 0) {
6435 mutex_enter(&mpt->m_mutex);
6436 save_node = topo_node;
6437 topo_node = topo_node->next;
6438 ASSERT(save_node);
6439 kmem_free(save_node,
6440 sizeof (mptsas_topo_change_list_t));
6441 mutex_exit(&mpt->m_mutex);
6442
6443 parent = NULL;
6444 continue;
6445 }
6446 (void) sprintf(phy_mask_name, "%x", phymask);
6447 }
6448 parent = scsi_hba_iport_find(mpt->m_dip,
6449 phy_mask_name);
6450 if (parent == NULL) {
6451 mptsas_log(mpt, CE_WARN, "Failed to find an "
6452 "iport, should not happen!");
6453 goto out;
6454 }
6455
6456 }
6457 ASSERT(parent);
6458 handle_topo_change:
6459
6460 mutex_enter(&mpt->m_mutex);
6461 /*
6462 * If HBA is being reset, don't perform operations depending
6463 * on the IOC. We must free the topo list, however.
6464 */
6465 if (!mpt->m_in_reset) {
6466 mptsas_handle_topo_change(topo_node, parent);
6467 } else {
6468 NDBG20(("skipping topo change received during reset"));
6469 }
6470 save_node = topo_node;
6471 topo_node = topo_node->next;
6472 ASSERT(save_node);
6473 kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
6474 mutex_exit(&mpt->m_mutex);
6475
6476 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6477 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
6478 (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
6479 /*
6480 * If direct attached device associated, make sure
6481 * reset the parent before start the next one. But
6482 * all devices associated with expander shares the
6483 * parent. Also, reset parent if this is for RAID.
6484 */
6485 parent = NULL;
6486 }
6487 }
6488 out:
6489 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6490 }
6491
6492 static void
6493 mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
6494 dev_info_t *parent)
6495 {
6496 mptsas_target_t *ptgt = NULL;
6497 mptsas_smp_t *psmp = NULL;
6498 mptsas_t *mpt = (void *)topo_node->mpt;
6499 uint16_t devhdl;
6500 uint16_t attached_devhdl;
6501 uint64_t sas_wwn = 0;
6502 int rval = 0;
6503 uint32_t page_address;
6504 uint8_t phy, flags;
6505 char *addr = NULL;
6506 dev_info_t *lundip;
6507 int circ = 0, circ1 = 0;
6508 char attached_wwnstr[MPTSAS_WWN_STRLEN];
6509
6510 NDBG20(("mptsas%d handle_topo_change enter, devhdl 0x%x,"
6511 "event 0x%x, flags 0x%x", mpt->m_instance, topo_node->devhdl,
6512 topo_node->event, topo_node->flags));
6513
6514 ASSERT(mutex_owned(&mpt->m_mutex));
6515
6516 switch (topo_node->event) {
6517 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6518 {
6519 char *phy_mask_name;
6520 mptsas_phymask_t phymask = 0;
6521
6522 if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6523 /*
6524 * Get latest RAID info.
6525 */
6526 (void) mptsas_get_raid_info(mpt);
6527 ptgt = refhash_linear_search(mpt->m_targets,
6528 mptsas_target_eval_devhdl, &topo_node->devhdl);
6529 if (ptgt == NULL)
6530 break;
6531 } else {
6532 ptgt = (void *)topo_node->object;
6533 }
6534
6535 if (ptgt == NULL) {
6536 /*
6537 * If a Phys Disk was deleted, RAID info needs to be
6538 * updated to reflect the new topology.
6539 */
6540 (void) mptsas_get_raid_info(mpt);
6541
6542 /*
6543 * Get sas device page 0 by DevHandle to make sure if
6544 * SSP/SATA end device exist.
6545 */
6546 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
6547 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
6548 topo_node->devhdl;
6549
6550 rval = mptsas_get_target_device_info(mpt, page_address,
6551 &devhdl, &ptgt);
6552 if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
6553 mptsas_log(mpt, CE_NOTE,
6554 "mptsas_handle_topo_change: target %d is "
6555 "not a SAS/SATA device. \n",
6556 topo_node->devhdl);
6557 } else if (rval == DEV_INFO_FAIL_ALLOC) {
6558 mptsas_log(mpt, CE_NOTE,
6559 "mptsas_handle_topo_change: could not "
6560 "allocate memory. \n");
6561 } else if (rval == DEV_INFO_FAIL_GUID) {
6562 mptsas_log(mpt, CE_NOTE,
6563 "mptsas_handle_topo_change: could not "
6564 "get SATA GUID for target %d. \n",
6565 topo_node->devhdl);
6566 }
6567 /*
6568 * If rval is DEV_INFO_PHYS_DISK or indicates failure
6569 * then there is nothing else to do, just leave.
6570 */
6571 if (rval != DEV_INFO_SUCCESS) {
6572 return;
6573 }
6574 }
6575
6576 ASSERT(ptgt->m_devhdl == topo_node->devhdl);
6577
6578 mutex_exit(&mpt->m_mutex);
6579 flags = topo_node->flags;
6580
6581 if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
6582 phymask = ptgt->m_addr.mta_phymask;
6583 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6584 (void) sprintf(phy_mask_name, "%x", phymask);
6585 parent = scsi_hba_iport_find(mpt->m_dip,
6586 phy_mask_name);
6587 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6588 if (parent == NULL) {
6589 mptsas_log(mpt, CE_WARN, "Failed to find a "
6590 "iport for PD, should not happen!");
6591 mutex_enter(&mpt->m_mutex);
6592 break;
6593 }
6594 }
6595
6596 if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6597 ndi_devi_enter(parent, &circ1);
6598 (void) mptsas_config_raid(parent, topo_node->devhdl,
6599 &lundip);
6600 ndi_devi_exit(parent, circ1);
6601 } else {
6602 /*
6603 * hold nexus for bus configure
6604 */
6605 ndi_devi_enter(scsi_vhci_dip, &circ);
6606 ndi_devi_enter(parent, &circ1);
6607 rval = mptsas_config_target(parent, ptgt);
6608 /*
6609 * release nexus for bus configure
6610 */
6611 ndi_devi_exit(parent, circ1);
6612 ndi_devi_exit(scsi_vhci_dip, circ);
6613
6614 /*
6615 * If this is a SATA device, make sure that the
6616 * bridge-port (the SAS WWN that the SATA device is
6617 * plugged into) is updated. This may change if a SATA
6618 * device changes which bay, and therefore phy, it is
6619 * plugged into.
6620 */
6621 if (IS_SATA_DEVICE(ptgt->m_deviceinfo)) {
6622 if (!mptsas_update_sata_bridge(mpt, parent,
6623 ptgt)) {
6624 mutex_enter(&mpt->m_mutex);
6625 return;
6626 }
6627 }
6628
6629 /*
6630 * Add parent's props for SMHBA support
6631 */
6632 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6633 bzero(attached_wwnstr,
6634 sizeof (attached_wwnstr));
6635 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
6636 ptgt->m_addr.mta_wwn);
6637 if (ddi_prop_update_string(DDI_DEV_T_NONE,
6638 parent,
6639 SCSI_ADDR_PROP_ATTACHED_PORT,
6640 attached_wwnstr)
6641 != DDI_PROP_SUCCESS) {
6642 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6643 parent,
6644 SCSI_ADDR_PROP_ATTACHED_PORT);
6645 mptsas_log(mpt, CE_WARN, "Failed to"
6646 "attached-port props");
6647 mutex_enter(&mpt->m_mutex);
6648 return;
6649 }
6650 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6651 MPTSAS_NUM_PHYS, 1) !=
6652 DDI_PROP_SUCCESS) {
6653 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6654 parent, MPTSAS_NUM_PHYS);
6655 mptsas_log(mpt, CE_WARN, "Failed to"
6656 " create num-phys props");
6657 mutex_enter(&mpt->m_mutex);
6658 return;
6659 }
6660
6661 /*
6662 * Update PHY info for smhba
6663 */
6664 mutex_enter(&mpt->m_mutex);
6665 if (mptsas_smhba_phy_init(mpt)) {
6666 mptsas_log(mpt, CE_WARN, "mptsas phy"
6667 " update failed");
6668 return;
6669 }
6670 mutex_exit(&mpt->m_mutex);
6671
6672 /*
6673 * topo_node->un.physport is really the PHY#
6674 * for direct attached devices
6675 */
6676 mptsas_smhba_set_one_phy_props(mpt, parent,
6677 topo_node->un.physport, &attached_devhdl);
6678
6679 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6680 MPTSAS_VIRTUAL_PORT, 0) !=
6681 DDI_PROP_SUCCESS) {
6682 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6683 parent, MPTSAS_VIRTUAL_PORT);
6684 mptsas_log(mpt, CE_WARN,
6685 "mptsas virtual-port"
6686 "port prop update failed");
6687 mutex_enter(&mpt->m_mutex);
6688 return;
6689 }
6690 }
6691 }
6692 mutex_enter(&mpt->m_mutex);
6693
6694 NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
6695 "phymask:%x.", mpt->m_instance, ptgt->m_devhdl,
6696 ptgt->m_addr.mta_phymask));
6697 break;
6698 }
6699 case MPTSAS_DR_EVENT_OFFLINE_TARGET:
6700 {
6701 devhdl = topo_node->devhdl;
6702 ptgt = refhash_linear_search(mpt->m_targets,
6703 mptsas_target_eval_devhdl, &devhdl);
6704 if (ptgt == NULL)
6705 break;
6706
6707 sas_wwn = ptgt->m_addr.mta_wwn;
6708 phy = ptgt->m_phynum;
6709
6710 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
6711
6712 if (sas_wwn) {
6713 (void) sprintf(addr, "w%016"PRIx64, sas_wwn);
6714 } else {
6715 (void) sprintf(addr, "p%x", phy);
6716 }
6717 ASSERT(ptgt->m_devhdl == devhdl);
6718
6719 if ((topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) ||
6720 (topo_node->flags ==
6721 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6722 /*
6723 * Get latest RAID info if RAID volume status changes
6724 * or Phys Disk status changes
6725 */
6726 (void) mptsas_get_raid_info(mpt);
6727 }
6728 /*
6729 * Abort all outstanding command on the device
6730 */
6731 rval = mptsas_do_scsi_reset(mpt, devhdl);
6732 if (rval) {
6733 NDBG20(("mptsas%d handle_topo_change to reset target "
6734 "before offline devhdl:%x, phymask:%x, rval:%x",
6735 mpt->m_instance, ptgt->m_devhdl,
6736 ptgt->m_addr.mta_phymask, rval));
6737 }
6738
6739 mutex_exit(&mpt->m_mutex);
6740
6741 ndi_devi_enter(scsi_vhci_dip, &circ);
6742 ndi_devi_enter(parent, &circ1);
6743 rval = mptsas_offline_target(parent, addr);
6744 ndi_devi_exit(parent, circ1);
6745 ndi_devi_exit(scsi_vhci_dip, circ);
6746 NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
6747 "phymask:%x, rval:%x", mpt->m_instance,
6748 ptgt->m_devhdl, ptgt->m_addr.mta_phymask, rval));
6749
6750 kmem_free(addr, SCSI_MAXNAMELEN);
6751
6752 /*
6753 * Clear parent's props for SMHBA support
6754 */
6755 flags = topo_node->flags;
6756 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6757 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6758 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6759 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6760 DDI_PROP_SUCCESS) {
6761 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6762 SCSI_ADDR_PROP_ATTACHED_PORT);
6763 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6764 "prop update failed");
6765 mutex_enter(&mpt->m_mutex);
6766 break;
6767 }
6768 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6769 MPTSAS_NUM_PHYS, 0) !=
6770 DDI_PROP_SUCCESS) {
6771 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6772 MPTSAS_NUM_PHYS);
6773 mptsas_log(mpt, CE_WARN, "mptsas num phys "
6774 "prop update failed");
6775 mutex_enter(&mpt->m_mutex);
6776 break;
6777 }
6778 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6779 MPTSAS_VIRTUAL_PORT, 1) !=
6780 DDI_PROP_SUCCESS) {
6781 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6782 MPTSAS_VIRTUAL_PORT);
6783 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6784 "prop update failed");
6785 mutex_enter(&mpt->m_mutex);
6786 break;
6787 }
6788 }
6789
6790 mutex_enter(&mpt->m_mutex);
6791 if (rval == DDI_SUCCESS) {
6792 refhash_remove(mpt->m_targets, ptgt);
6793 ptgt = NULL;
6794 } else {
6795 /*
6796 * clean DR_INTRANSITION flag to allow I/O down to
6797 * PHCI driver since failover finished.
6798 * Invalidate the devhdl
6799 */
6800 ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6801 ptgt->m_tgt_unconfigured = 0;
6802 mutex_enter(&mpt->m_tx_waitq_mutex);
6803 ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6804 mutex_exit(&mpt->m_tx_waitq_mutex);
6805 }
6806
6807 /*
6808 * Send SAS IO Unit Control to free the dev handle
6809 */
6810 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6811 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
6812 rval = mptsas_free_devhdl(mpt, devhdl);
6813
6814 NDBG20(("mptsas%d handle_topo_change to remove "
6815 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6816 rval));
6817 }
6818
6819 break;
6820 }
6821 case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
6822 {
6823 devhdl = topo_node->devhdl;
6824 /*
6825 * If this is the remove handle event, do a reset first.
6826 */
6827 if (topo_node->event == MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6828 rval = mptsas_do_scsi_reset(mpt, devhdl);
6829 if (rval) {
6830 NDBG20(("mpt%d reset target before remove "
6831 "devhdl:%x, rval:%x", mpt->m_instance,
6832 devhdl, rval));
6833 }
6834 }
6835
6836 /*
6837 * Send SAS IO Unit Control to free the dev handle
6838 */
6839 rval = mptsas_free_devhdl(mpt, devhdl);
6840 NDBG20(("mptsas%d handle_topo_change to remove "
6841 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6842 rval));
6843 break;
6844 }
6845 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6846 {
6847 mptsas_smp_t smp;
6848 dev_info_t *smpdip;
6849
6850 devhdl = topo_node->devhdl;
6851
6852 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
6853 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)devhdl;
6854 rval = mptsas_get_sas_expander_page0(mpt, page_address, &smp);
6855 if (rval != DDI_SUCCESS) {
6856 mptsas_log(mpt, CE_WARN, "failed to online smp, "
6857 "handle %x", devhdl);
6858 return;
6859 }
6860
6861 psmp = mptsas_smp_alloc(mpt, &smp);
6862 if (psmp == NULL) {
6863 return;
6864 }
6865
6866 mutex_exit(&mpt->m_mutex);
6867 ndi_devi_enter(parent, &circ1);
6868 (void) mptsas_online_smp(parent, psmp, &smpdip);
6869 ndi_devi_exit(parent, circ1);
6870
6871 mutex_enter(&mpt->m_mutex);
6872 break;
6873 }
6874 case MPTSAS_DR_EVENT_OFFLINE_SMP:
6875 {
6876 devhdl = topo_node->devhdl;
6877 uint32_t dev_info;
6878
6879 psmp = refhash_linear_search(mpt->m_smp_targets,
6880 mptsas_smp_eval_devhdl, &devhdl);
6881 if (psmp == NULL)
6882 break;
6883 /*
6884 * The mptsas_smp_t data is released only if the dip is offlined
6885 * successfully.
6886 */
6887 mutex_exit(&mpt->m_mutex);
6888
6889 ndi_devi_enter(parent, &circ1);
6890 rval = mptsas_offline_smp(parent, psmp);
6891 ndi_devi_exit(parent, circ1);
6892
6893 dev_info = psmp->m_deviceinfo;
6894 if ((dev_info & DEVINFO_DIRECT_ATTACHED) ==
6895 DEVINFO_DIRECT_ATTACHED) {
6896 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6897 MPTSAS_VIRTUAL_PORT, 1) !=
6898 DDI_PROP_SUCCESS) {
6899 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6900 MPTSAS_VIRTUAL_PORT);
6901 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6902 "prop update failed");
6903 mutex_enter(&mpt->m_mutex);
6904 return;
6905 }
6906 /*
6907 * Check whether the smp connected to the iport,
6908 */
6909 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6910 MPTSAS_NUM_PHYS, 0) !=
6911 DDI_PROP_SUCCESS) {
6912 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6913 MPTSAS_NUM_PHYS);
6914 mptsas_log(mpt, CE_WARN, "mptsas num phys"
6915 "prop update failed");
6916 mutex_enter(&mpt->m_mutex);
6917 return;
6918 }
6919 /*
6920 * Clear parent's attached-port props
6921 */
6922 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6923 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6924 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6925 DDI_PROP_SUCCESS) {
6926 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6927 SCSI_ADDR_PROP_ATTACHED_PORT);
6928 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6929 "prop update failed");
6930 mutex_enter(&mpt->m_mutex);
6931 return;
6932 }
6933 }
6934
6935 mutex_enter(&mpt->m_mutex);
6936 NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
6937 "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
6938 if (rval == DDI_SUCCESS) {
6939 refhash_remove(mpt->m_smp_targets, psmp);
6940 } else {
6941 psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
6942 }
6943
6944 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6945
6946 break;
6947 }
6948 default:
6949 return;
6950 }
6951 }
6952
6953 /*
6954 * Record the event if its type is enabled in mpt instance by ioctl.
6955 */
6956 static void
6957 mptsas_record_event(void *args)
6958 {
6959 m_replyh_arg_t *replyh_arg;
6960 pMpi2EventNotificationReply_t eventreply;
6961 uint32_t event, rfm;
6962 mptsas_t *mpt;
6963 int i, j;
6964 uint16_t event_data_len;
6965 boolean_t sendAEN = FALSE;
6966
6967 replyh_arg = (m_replyh_arg_t *)args;
6968 rfm = replyh_arg->rfm;
6969 mpt = replyh_arg->mpt;
6970
6971 eventreply = (pMpi2EventNotificationReply_t)
6972 (mpt->m_reply_frame + (rfm -
6973 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
6974 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6975
6976
6977 /*
6978 * Generate a system event to let anyone who cares know that a
6979 * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the
6980 * event mask is set to.
6981 */
6982 if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
6983 sendAEN = TRUE;
6984 }
6985
6986 /*
6987 * Record the event only if it is not masked. Determine which dword
6988 * and bit of event mask to test.
6989 */
6990 i = (uint8_t)(event / 32);
6991 j = (uint8_t)(event % 32);
6992 if ((i < 4) && ((1 << j) & mpt->m_event_mask[i])) {
6993 i = mpt->m_event_index;
6994 mpt->m_events[i].Type = event;
6995 mpt->m_events[i].Number = ++mpt->m_event_number;
6996 bzero(mpt->m_events[i].Data, MPTSAS_MAX_EVENT_DATA_LENGTH * 4);
6997 event_data_len = ddi_get16(mpt->m_acc_reply_frame_hdl,
6998 &eventreply->EventDataLength);
6999
7000 if (event_data_len > 0) {
7001 /*
7002 * Limit data to size in m_event entry
7003 */
7004 if (event_data_len > MPTSAS_MAX_EVENT_DATA_LENGTH) {
7005 event_data_len = MPTSAS_MAX_EVENT_DATA_LENGTH;
7006 }
7007 for (j = 0; j < event_data_len; j++) {
7008 mpt->m_events[i].Data[j] =
7009 ddi_get32(mpt->m_acc_reply_frame_hdl,
7010 &(eventreply->EventData[j]));
7011 }
7012
7013 /*
7014 * check for index wrap-around
7015 */
7016 if (++i == MPTSAS_EVENT_QUEUE_SIZE) {
7017 i = 0;
7018 }
7019 mpt->m_event_index = (uint8_t)i;
7020
7021 /*
7022 * Set flag to send the event.
7023 */
7024 sendAEN = TRUE;
7025 }
7026 }
7027
7028 /*
7029 * Generate a system event if flag is set to let anyone who cares know
7030 * that an event has occurred.
7031 */
7032 if (sendAEN) {
7033 (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
7034 "SAS", NULL, NULL, DDI_NOSLEEP);
7035 }
7036 }
7037
7038 #define SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS
7039 /*
7040 * handle sync events from ioc in interrupt
7041 * return value:
7042 * DDI_SUCCESS: The event is handled by this func
7043 * DDI_FAILURE: Event is not handled
7044 */
7045 static int
7046 mptsas_handle_event_sync(void *args)
7047 {
7048 m_replyh_arg_t *replyh_arg;
7049 pMpi2EventNotificationReply_t eventreply;
7050 uint32_t event, rfm;
7051 mptsas_t *mpt;
7052 uint_t iocstatus;
7053
7054 replyh_arg = (m_replyh_arg_t *)args;
7055 rfm = replyh_arg->rfm;
7056 mpt = replyh_arg->mpt;
7057
7058 ASSERT(mutex_owned(&mpt->m_mutex));
7059
7060 eventreply = (pMpi2EventNotificationReply_t)
7061 (mpt->m_reply_frame + (rfm -
7062 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
7063 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7064
7065 if ((iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7066 &eventreply->IOCStatus)) != 0) {
7067 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7068 mptsas_log(mpt, CE_WARN,
7069 "!mptsas_handle_event_sync: event 0x%x, "
7070 "IOCStatus=0x%x, "
7071 "IOCLogInfo=0x%x", event, iocstatus,
7072 ddi_get32(mpt->m_acc_reply_frame_hdl,
7073 &eventreply->IOCLogInfo));
7074 } else {
7075 mptsas_log(mpt, CE_WARN,
7076 "mptsas_handle_event_sync: event 0x%x, "
7077 "IOCStatus=0x%x, "
7078 "(IOCLogInfo=0x%x)", event, iocstatus,
7079 ddi_get32(mpt->m_acc_reply_frame_hdl,
7080 &eventreply->IOCLogInfo));
7081 }
7082 }
7083
7084 /*
7085 * figure out what kind of event we got and handle accordingly
7086 */
7087 switch (event) {
7088 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7089 {
7090 pMpi2EventDataSasTopologyChangeList_t sas_topo_change_list;
7091 uint8_t num_entries, expstatus, phy;
7092 uint8_t phystatus, physport, state, i;
7093 uint8_t start_phy_num, link_rate;
7094 uint16_t dev_handle, reason_code;
7095 uint16_t enc_handle, expd_handle;
7096 char string[80], curr[80], prev[80];
7097 mptsas_topo_change_list_t *topo_head = NULL;
7098 mptsas_topo_change_list_t *topo_tail = NULL;
7099 mptsas_topo_change_list_t *topo_node = NULL;
7100 mptsas_target_t *ptgt;
7101 mptsas_smp_t *psmp;
7102 uint8_t flags = 0, exp_flag;
7103 smhba_info_t *pSmhba = NULL;
7104
7105 NDBG20(("mptsas_handle_event_sync: SAS topology change"));
7106
7107 sas_topo_change_list = (pMpi2EventDataSasTopologyChangeList_t)
7108 eventreply->EventData;
7109
7110 enc_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7111 &sas_topo_change_list->EnclosureHandle);
7112 expd_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7113 &sas_topo_change_list->ExpanderDevHandle);
7114 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
7115 &sas_topo_change_list->NumEntries);
7116 start_phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
7117 &sas_topo_change_list->StartPhyNum);
7118 expstatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
7119 &sas_topo_change_list->ExpStatus);
7120 physport = ddi_get8(mpt->m_acc_reply_frame_hdl,
7121 &sas_topo_change_list->PhysicalPort);
7122
7123 string[0] = 0;
7124 if (expd_handle) {
7125 flags = MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED;
7126 switch (expstatus) {
7127 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
7128 (void) sprintf(string, " added");
7129 /*
7130 * New expander device added
7131 */
7132 mpt->m_port_chng = 1;
7133 topo_node = kmem_zalloc(
7134 sizeof (mptsas_topo_change_list_t),
7135 KM_SLEEP);
7136 topo_node->mpt = mpt;
7137 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_SMP;
7138 topo_node->un.physport = physport;
7139 topo_node->devhdl = expd_handle;
7140 topo_node->flags = flags;
7141 topo_node->object = NULL;
7142 if (topo_head == NULL) {
7143 topo_head = topo_tail = topo_node;
7144 } else {
7145 topo_tail->next = topo_node;
7146 topo_tail = topo_node;
7147 }
7148 break;
7149 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
7150 (void) sprintf(string, " not responding, "
7151 "removed");
7152 psmp = refhash_linear_search(mpt->m_smp_targets,
7153 mptsas_smp_eval_devhdl, &expd_handle);
7154 if (psmp == NULL)
7155 break;
7156
7157 topo_node = kmem_zalloc(
7158 sizeof (mptsas_topo_change_list_t),
7159 KM_SLEEP);
7160 topo_node->mpt = mpt;
7161 topo_node->un.phymask =
7162 psmp->m_addr.mta_phymask;
7163 topo_node->event = MPTSAS_DR_EVENT_OFFLINE_SMP;
7164 topo_node->devhdl = expd_handle;
7165 topo_node->flags = flags;
7166 topo_node->object = NULL;
7167 if (topo_head == NULL) {
7168 topo_head = topo_tail = topo_node;
7169 } else {
7170 topo_tail->next = topo_node;
7171 topo_tail = topo_node;
7172 }
7173 break;
7174 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
7175 break;
7176 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
7177 (void) sprintf(string, " not responding, "
7178 "delaying removal");
7179 break;
7180 default:
7181 break;
7182 }
7183 } else {
7184 flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
7185 }
7186
7187 NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
7188 enc_handle, expd_handle, string));
7189 for (i = 0; i < num_entries; i++) {
7190 phy = i + start_phy_num;
7191 phystatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
7192 &sas_topo_change_list->PHY[i].PhyStatus);
7193 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7194 &sas_topo_change_list->PHY[i].AttachedDevHandle);
7195 reason_code = phystatus & MPI2_EVENT_SAS_TOPO_RC_MASK;
7196 /*
7197 * Filter out processing of Phy Vacant Status unless
7198 * the reason code is "Not Responding". Process all
7199 * other combinations of Phy Status and Reason Codes.
7200 */
7201 if ((phystatus &
7202 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) &&
7203 (reason_code !=
7204 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
7205 continue;
7206 }
7207 curr[0] = 0;
7208 prev[0] = 0;
7209 string[0] = 0;
7210 switch (reason_code) {
7211 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7212 {
7213 NDBG20(("mptsas%d phy %d physical_port %d "
7214 "dev_handle %d added", mpt->m_instance, phy,
7215 physport, dev_handle));
7216 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
7217 &sas_topo_change_list->PHY[i].LinkRate);
7218 state = (link_rate &
7219 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
7220 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
7221 switch (state) {
7222 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7223 (void) sprintf(curr, "is disabled");
7224 break;
7225 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7226 (void) sprintf(curr, "is offline, "
7227 "failed speed negotiation");
7228 break;
7229 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7230 (void) sprintf(curr, "SATA OOB "
7231 "complete");
7232 break;
7233 case SMP_RESET_IN_PROGRESS:
7234 (void) sprintf(curr, "SMP reset in "
7235 "progress");
7236 break;
7237 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7238 (void) sprintf(curr, "is online at "
7239 "1.5 Gbps");
7240 break;
7241 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7242 (void) sprintf(curr, "is online at 3.0 "
7243 "Gbps");
7244 break;
7245 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7246 (void) sprintf(curr, "is online at 6.0 "
7247 "Gbps");
7248 break;
7249 case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7250 (void) sprintf(curr,
7251 "is online at 12.0 Gbps");
7252 break;
7253 default:
7254 (void) sprintf(curr, "state is "
7255 "unknown");
7256 break;
7257 }
7258 /*
7259 * New target device added into the system.
7260 * Set association flag according to if an
7261 * expander is used or not.
7262 */
7263 exp_flag =
7264 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
7265 if (flags ==
7266 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
7267 flags = exp_flag;
7268 }
7269 topo_node = kmem_zalloc(
7270 sizeof (mptsas_topo_change_list_t),
7271 KM_SLEEP);
7272 topo_node->mpt = mpt;
7273 topo_node->event =
7274 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7275 if (expd_handle == 0) {
7276 /*
7277 * Per MPI 2, if expander dev handle
7278 * is 0, it's a directly attached
7279 * device. So driver use PHY to decide
7280 * which iport is associated
7281 */
7282 physport = phy;
7283 mpt->m_port_chng = 1;
7284 }
7285 topo_node->un.physport = physport;
7286 topo_node->devhdl = dev_handle;
7287 topo_node->flags = flags;
7288 topo_node->object = NULL;
7289 if (topo_head == NULL) {
7290 topo_head = topo_tail = topo_node;
7291 } else {
7292 topo_tail->next = topo_node;
7293 topo_tail = topo_node;
7294 }
7295 break;
7296 }
7297 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7298 {
7299 NDBG20(("mptsas%d phy %d physical_port %d "
7300 "dev_handle %d removed", mpt->m_instance,
7301 phy, physport, dev_handle));
7302 /*
7303 * Set association flag according to if an
7304 * expander is used or not.
7305 */
7306 exp_flag =
7307 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
7308 if (flags ==
7309 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
7310 flags = exp_flag;
7311 }
7312 /*
7313 * Target device is removed from the system
7314 * Before the device is really offline from
7315 * from system.
7316 */
7317 ptgt = refhash_linear_search(mpt->m_targets,
7318 mptsas_target_eval_devhdl, &dev_handle);
7319 /*
7320 * If ptgt is NULL here, it means that the
7321 * DevHandle is not in the hash table. This is
7322 * reasonable sometimes. For example, if a
7323 * disk was pulled, then added, then pulled
7324 * again, the disk will not have been put into
7325 * the hash table because the add event will
7326 * have an invalid phymask. BUT, this does not
7327 * mean that the DevHandle is invalid. The
7328 * controller will still have a valid DevHandle
7329 * that must be removed. To do this, use the
7330 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
7331 */
7332 if (ptgt == NULL) {
7333 topo_node = kmem_zalloc(
7334 sizeof (mptsas_topo_change_list_t),
7335 KM_SLEEP);
7336 topo_node->mpt = mpt;
7337 topo_node->un.phymask = 0;
7338 topo_node->event =
7339 MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
7340 topo_node->devhdl = dev_handle;
7341 topo_node->flags = flags;
7342 topo_node->object = NULL;
7343 if (topo_head == NULL) {
7344 topo_head = topo_tail =
7345 topo_node;
7346 } else {
7347 topo_tail->next = topo_node;
7348 topo_tail = topo_node;
7349 }
7350 break;
7351 }
7352
7353 /*
7354 * Update DR flag immediately avoid I/O failure
7355 * before failover finish. Pay attention to the
7356 * mutex protect, we need grab m_tx_waitq_mutex
7357 * during set m_dr_flag because we won't add
7358 * the following command into waitq, instead,
7359 * we need return TRAN_BUSY in the tran_start
7360 * context.
7361 */
7362 mutex_enter(&mpt->m_tx_waitq_mutex);
7363 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7364 mutex_exit(&mpt->m_tx_waitq_mutex);
7365
7366 topo_node = kmem_zalloc(
7367 sizeof (mptsas_topo_change_list_t),
7368 KM_SLEEP);
7369 topo_node->mpt = mpt;
7370 topo_node->un.phymask =
7371 ptgt->m_addr.mta_phymask;
7372 topo_node->event =
7373 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7374 topo_node->devhdl = dev_handle;
7375 topo_node->flags = flags;
7376 topo_node->object = NULL;
7377 if (topo_head == NULL) {
7378 topo_head = topo_tail = topo_node;
7379 } else {
7380 topo_tail->next = topo_node;
7381 topo_tail = topo_node;
7382 }
7383 break;
7384 }
7385 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7386 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
7387 &sas_topo_change_list->PHY[i].LinkRate);
7388 state = (link_rate &
7389 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
7390 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
7391 pSmhba = &mpt->m_phy_info[i].smhba_info;
7392 pSmhba->negotiated_link_rate = state;
7393 switch (state) {
7394 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7395 (void) sprintf(curr, "is disabled");
7396 mptsas_smhba_log_sysevent(mpt,
7397 ESC_SAS_PHY_EVENT,
7398 SAS_PHY_REMOVE,
7399 &mpt->m_phy_info[i].smhba_info);
7400 mpt->m_phy_info[i].smhba_info.
7401 negotiated_link_rate
7402 = 0x1;
7403 break;
7404 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7405 (void) sprintf(curr, "is offline, "
7406 "failed speed negotiation");
7407 mptsas_smhba_log_sysevent(mpt,
7408 ESC_SAS_PHY_EVENT,
7409 SAS_PHY_OFFLINE,
7410 &mpt->m_phy_info[i].smhba_info);
7411 break;
7412 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7413 (void) sprintf(curr, "SATA OOB "
7414 "complete");
7415 break;
7416 case SMP_RESET_IN_PROGRESS:
7417 (void) sprintf(curr, "SMP reset in "
7418 "progress");
7419 break;
7420 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7421 (void) sprintf(curr, "is online at "
7422 "1.5 Gbps");
7423 if ((expd_handle == 0) &&
7424 (enc_handle == 1)) {
7425 mpt->m_port_chng = 1;
7426 }
7427 mptsas_smhba_log_sysevent(mpt,
7428 ESC_SAS_PHY_EVENT,
7429 SAS_PHY_ONLINE,
7430 &mpt->m_phy_info[i].smhba_info);
7431 break;
7432 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7433 (void) sprintf(curr, "is online at 3.0 "
7434 "Gbps");
7435 if ((expd_handle == 0) &&
7436 (enc_handle == 1)) {
7437 mpt->m_port_chng = 1;
7438 }
7439 mptsas_smhba_log_sysevent(mpt,
7440 ESC_SAS_PHY_EVENT,
7441 SAS_PHY_ONLINE,
7442 &mpt->m_phy_info[i].smhba_info);
7443 break;
7444 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7445 (void) sprintf(curr, "is online at "
7446 "6.0 Gbps");
7447 if ((expd_handle == 0) &&
7448 (enc_handle == 1)) {
7449 mpt->m_port_chng = 1;
7450 }
7451 mptsas_smhba_log_sysevent(mpt,
7452 ESC_SAS_PHY_EVENT,
7453 SAS_PHY_ONLINE,
7454 &mpt->m_phy_info[i].smhba_info);
7455 break;
7456 case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7457 (void) sprintf(curr, "is online at "
7458 "12.0 Gbps");
7459 if ((expd_handle == 0) &&
7460 (enc_handle == 1)) {
7461 mpt->m_port_chng = 1;
7462 }
7463 mptsas_smhba_log_sysevent(mpt,
7464 ESC_SAS_PHY_EVENT,
7465 SAS_PHY_ONLINE,
7466 &mpt->m_phy_info[i].smhba_info);
7467 break;
7468 default:
7469 (void) sprintf(curr, "state is "
7470 "unknown");
7471 break;
7472 }
7473
7474 state = (link_rate &
7475 MPI2_EVENT_SAS_TOPO_LR_PREV_MASK) >>
7476 MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT;
7477 switch (state) {
7478 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7479 (void) sprintf(prev, ", was disabled");
7480 break;
7481 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7482 (void) sprintf(prev, ", was offline, "
7483 "failed speed negotiation");
7484 break;
7485 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7486 (void) sprintf(prev, ", was SATA OOB "
7487 "complete");
7488 break;
7489 case SMP_RESET_IN_PROGRESS:
7490 (void) sprintf(prev, ", was SMP reset "
7491 "in progress");
7492 break;
7493 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7494 (void) sprintf(prev, ", was online at "
7495 "1.5 Gbps");
7496 break;
7497 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7498 (void) sprintf(prev, ", was online at "
7499 "3.0 Gbps");
7500 break;
7501 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7502 (void) sprintf(prev, ", was online at "
7503 "6.0 Gbps");
7504 break;
7505 case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7506 (void) sprintf(prev, ", was online at "
7507 "12.0 Gbps");
7508 break;
7509 default:
7510 break;
7511 }
7512 (void) sprintf(&string[strlen(string)], "link "
7513 "changed, ");
7514 break;
7515 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7516 continue;
7517 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7518 (void) sprintf(&string[strlen(string)],
7519 "target not responding, delaying "
7520 "removal");
7521 break;
7522 }
7523 NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
7524 mpt->m_instance, phy, dev_handle, string, curr,
7525 prev));
7526 }
7527 if (topo_head != NULL) {
7528 /*
7529 * Launch DR taskq to handle topology change
7530 */
7531 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7532 mptsas_handle_dr, (void *)topo_head,
7533 DDI_NOSLEEP)) != DDI_SUCCESS) {
7534 while (topo_head != NULL) {
7535 topo_node = topo_head;
7536 topo_head = topo_head->next;
7537 kmem_free(topo_node,
7538 sizeof (mptsas_topo_change_list_t));
7539 }
7540 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7541 "for handle SAS DR event failed. \n");
7542 }
7543 }
7544 break;
7545 }
7546 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7547 {
7548 Mpi2EventDataIrConfigChangeList_t *irChangeList;
7549 mptsas_topo_change_list_t *topo_head = NULL;
7550 mptsas_topo_change_list_t *topo_tail = NULL;
7551 mptsas_topo_change_list_t *topo_node = NULL;
7552 mptsas_target_t *ptgt;
7553 uint8_t num_entries, i, reason;
7554 uint16_t volhandle, diskhandle;
7555
7556 irChangeList = (pMpi2EventDataIrConfigChangeList_t)
7557 eventreply->EventData;
7558 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
7559 &irChangeList->NumElements);
7560
7561 NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
7562 mpt->m_instance));
7563
7564 for (i = 0; i < num_entries; i++) {
7565 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7566 &irChangeList->ConfigElement[i].ReasonCode);
7567 volhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7568 &irChangeList->ConfigElement[i].VolDevHandle);
7569 diskhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7570 &irChangeList->ConfigElement[i].PhysDiskDevHandle);
7571
7572 switch (reason) {
7573 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
7574 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
7575 {
7576 NDBG20(("mptsas %d volume added\n",
7577 mpt->m_instance));
7578
7579 topo_node = kmem_zalloc(
7580 sizeof (mptsas_topo_change_list_t),
7581 KM_SLEEP);
7582
7583 topo_node->mpt = mpt;
7584 topo_node->event =
7585 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7586 topo_node->un.physport = 0xff;
7587 topo_node->devhdl = volhandle;
7588 topo_node->flags =
7589 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7590 topo_node->object = NULL;
7591 if (topo_head == NULL) {
7592 topo_head = topo_tail = topo_node;
7593 } else {
7594 topo_tail->next = topo_node;
7595 topo_tail = topo_node;
7596 }
7597 break;
7598 }
7599 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
7600 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
7601 {
7602 NDBG20(("mptsas %d volume deleted\n",
7603 mpt->m_instance));
7604 ptgt = refhash_linear_search(mpt->m_targets,
7605 mptsas_target_eval_devhdl, &volhandle);
7606 if (ptgt == NULL)
7607 break;
7608
7609 /*
7610 * Clear any flags related to volume
7611 */
7612 (void) mptsas_delete_volume(mpt, volhandle);
7613
7614 /*
7615 * Update DR flag immediately avoid I/O failure
7616 */
7617 mutex_enter(&mpt->m_tx_waitq_mutex);
7618 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7619 mutex_exit(&mpt->m_tx_waitq_mutex);
7620
7621 topo_node = kmem_zalloc(
7622 sizeof (mptsas_topo_change_list_t),
7623 KM_SLEEP);
7624 topo_node->mpt = mpt;
7625 topo_node->un.phymask =
7626 ptgt->m_addr.mta_phymask;
7627 topo_node->event =
7628 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7629 topo_node->devhdl = volhandle;
7630 topo_node->flags =
7631 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7632 topo_node->object = (void *)ptgt;
7633 if (topo_head == NULL) {
7634 topo_head = topo_tail = topo_node;
7635 } else {
7636 topo_tail->next = topo_node;
7637 topo_tail = topo_node;
7638 }
7639 break;
7640 }
7641 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
7642 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
7643 {
7644 ptgt = refhash_linear_search(mpt->m_targets,
7645 mptsas_target_eval_devhdl, &diskhandle);
7646 if (ptgt == NULL)
7647 break;
7648
7649 /*
7650 * Update DR flag immediately avoid I/O failure
7651 */
7652 mutex_enter(&mpt->m_tx_waitq_mutex);
7653 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7654 mutex_exit(&mpt->m_tx_waitq_mutex);
7655
7656 topo_node = kmem_zalloc(
7657 sizeof (mptsas_topo_change_list_t),
7658 KM_SLEEP);
7659 topo_node->mpt = mpt;
7660 topo_node->un.phymask =
7661 ptgt->m_addr.mta_phymask;
7662 topo_node->event =
7663 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7664 topo_node->devhdl = diskhandle;
7665 topo_node->flags =
7666 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7667 topo_node->object = (void *)ptgt;
7668 if (topo_head == NULL) {
7669 topo_head = topo_tail = topo_node;
7670 } else {
7671 topo_tail->next = topo_node;
7672 topo_tail = topo_node;
7673 }
7674 break;
7675 }
7676 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
7677 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
7678 {
7679 /*
7680 * The physical drive is released by a IR
7681 * volume. But we cannot get the the physport
7682 * or phynum from the event data, so we only
7683 * can get the physport/phynum after SAS
7684 * Device Page0 request for the devhdl.
7685 */
7686 topo_node = kmem_zalloc(
7687 sizeof (mptsas_topo_change_list_t),
7688 KM_SLEEP);
7689 topo_node->mpt = mpt;
7690 topo_node->un.phymask = 0;
7691 topo_node->event =
7692 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7693 topo_node->devhdl = diskhandle;
7694 topo_node->flags =
7695 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7696 topo_node->object = NULL;
7697 mpt->m_port_chng = 1;
7698 if (topo_head == NULL) {
7699 topo_head = topo_tail = topo_node;
7700 } else {
7701 topo_tail->next = topo_node;
7702 topo_tail = topo_node;
7703 }
7704 break;
7705 }
7706 default:
7707 break;
7708 }
7709 }
7710
7711 if (topo_head != NULL) {
7712 /*
7713 * Launch DR taskq to handle topology change
7714 */
7715 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7716 mptsas_handle_dr, (void *)topo_head,
7717 DDI_NOSLEEP)) != DDI_SUCCESS) {
7718 while (topo_head != NULL) {
7719 topo_node = topo_head;
7720 topo_head = topo_head->next;
7721 kmem_free(topo_node,
7722 sizeof (mptsas_topo_change_list_t));
7723 }
7724 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7725 "for handle SAS DR event failed. \n");
7726 }
7727 }
7728 break;
7729 }
7730 default:
7731 return (DDI_FAILURE);
7732 }
7733
7734 return (DDI_SUCCESS);
7735 }
7736
7737 /*
7738 * handle events from ioc
7739 */
7740 static void
7741 mptsas_handle_event(void *args)
7742 {
7743 m_replyh_arg_t *replyh_arg;
7744 pMpi2EventNotificationReply_t eventreply;
7745 uint32_t event, iocloginfo, rfm;
7746 uint32_t status;
7747 uint8_t port;
7748 mptsas_t *mpt;
7749 uint_t iocstatus;
7750
7751 replyh_arg = (m_replyh_arg_t *)args;
7752 rfm = replyh_arg->rfm;
7753 mpt = replyh_arg->mpt;
7754
7755 mutex_enter(&mpt->m_mutex);
7756 /*
7757 * If HBA is being reset, drop incoming event.
7758 */
7759 if (mpt->m_in_reset) {
7760 NDBG20(("dropping event received prior to reset"));
7761 mutex_exit(&mpt->m_mutex);
7762 return;
7763 }
7764
7765 eventreply = (pMpi2EventNotificationReply_t)
7766 (mpt->m_reply_frame + (rfm -
7767 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
7768 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7769
7770 if ((iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7771 &eventreply->IOCStatus)) != 0) {
7772 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7773 mptsas_log(mpt, CE_WARN,
7774 "!mptsas_handle_event: IOCStatus=0x%x, "
7775 "IOCLogInfo=0x%x", iocstatus,
7776 ddi_get32(mpt->m_acc_reply_frame_hdl,
7777 &eventreply->IOCLogInfo));
7778 } else {
7779 mptsas_log(mpt, CE_WARN,
7780 "mptsas_handle_event: IOCStatus=0x%x, "
7781 "IOCLogInfo=0x%x", iocstatus,
7782 ddi_get32(mpt->m_acc_reply_frame_hdl,
7783 &eventreply->IOCLogInfo));
7784 }
7785 }
7786
7787 /*
7788 * figure out what kind of event we got and handle accordingly
7789 */
7790 switch (event) {
7791 case MPI2_EVENT_LOG_ENTRY_ADDED:
7792 break;
7793 case MPI2_EVENT_LOG_DATA:
7794 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7795 &eventreply->IOCLogInfo);
7796 NDBG20(("mptsas %d log info %x received.\n", mpt->m_instance,
7797 iocloginfo));
7798 break;
7799 case MPI2_EVENT_STATE_CHANGE:
7800 NDBG20(("mptsas%d state change.", mpt->m_instance));
7801 break;
7802 case MPI2_EVENT_HARD_RESET_RECEIVED:
7803 NDBG20(("mptsas%d event change.", mpt->m_instance));
7804 break;
7805 case MPI2_EVENT_SAS_DISCOVERY:
7806 {
7807 MPI2_EVENT_DATA_SAS_DISCOVERY *sasdiscovery;
7808 char string[80];
7809 uint8_t rc;
7810
7811 sasdiscovery =
7812 (pMpi2EventDataSasDiscovery_t)eventreply->EventData;
7813
7814 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7815 &sasdiscovery->ReasonCode);
7816 port = ddi_get8(mpt->m_acc_reply_frame_hdl,
7817 &sasdiscovery->PhysicalPort);
7818 status = ddi_get32(mpt->m_acc_reply_frame_hdl,
7819 &sasdiscovery->DiscoveryStatus);
7820
7821 string[0] = 0;
7822 switch (rc) {
7823 case MPI2_EVENT_SAS_DISC_RC_STARTED:
7824 (void) sprintf(string, "STARTING");
7825 break;
7826 case MPI2_EVENT_SAS_DISC_RC_COMPLETED:
7827 (void) sprintf(string, "COMPLETED");
7828 break;
7829 default:
7830 (void) sprintf(string, "UNKNOWN");
7831 break;
7832 }
7833
7834 NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string,
7835 port, status));
7836
7837 break;
7838 }
7839 case MPI2_EVENT_EVENT_CHANGE:
7840 NDBG20(("mptsas%d event change.", mpt->m_instance));
7841 break;
7842 case MPI2_EVENT_TASK_SET_FULL:
7843 {
7844 pMpi2EventDataTaskSetFull_t taskfull;
7845
7846 taskfull = (pMpi2EventDataTaskSetFull_t)eventreply->EventData;
7847
7848 NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
7849 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7850 &taskfull->CurrentDepth)));
7851 break;
7852 }
7853 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7854 {
7855 /*
7856 * SAS TOPOLOGY CHANGE LIST Event has already been handled
7857 * in mptsas_handle_event_sync() of interrupt context
7858 */
7859 break;
7860 }
7861 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
7862 {
7863 pMpi2EventDataSasEnclDevStatusChange_t encstatus;
7864 uint8_t rc;
7865 uint16_t enchdl;
7866 char string[80];
7867 mptsas_enclosure_t *mep;
7868
7869 encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
7870 eventreply->EventData;
7871
7872 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7873 &encstatus->ReasonCode);
7874 enchdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
7875 &encstatus->EnclosureHandle);
7876
7877 switch (rc) {
7878 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7879 (void) sprintf(string, "added");
7880 break;
7881 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7882 mep = mptsas_enc_lookup(mpt, enchdl);
7883 if (mep != NULL) {
7884 list_remove(&mpt->m_enclosures, mep);
7885 mptsas_enc_free(mep);
7886 mep = NULL;
7887 }
7888 (void) sprintf(string, ", not responding");
7889 break;
7890 default:
7891 break;
7892 }
7893 NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure "
7894 "%x%s\n", mpt->m_instance,
7895 ddi_get16(mpt->m_acc_reply_frame_hdl,
7896 &encstatus->EnclosureHandle), string));
7897
7898 /*
7899 * No matter what has happened, update all of our device state
7900 * for enclosures, by retriggering an evaluation.
7901 */
7902 mpt->m_done_traverse_enc = 0;
7903 mptsas_update_hashtab(mpt);
7904 break;
7905 }
7906
7907 /*
7908 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
7909 * mptsas_handle_event_sync,in here just send ack message.
7910 */
7911 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
7912 {
7913 pMpi2EventDataSasDeviceStatusChange_t statuschange;
7914 uint8_t rc;
7915 uint16_t devhdl;
7916 uint64_t wwn = 0;
7917 uint32_t wwn_lo, wwn_hi;
7918
7919 statuschange = (pMpi2EventDataSasDeviceStatusChange_t)
7920 eventreply->EventData;
7921 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7922 &statuschange->ReasonCode);
7923 wwn_lo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7924 (uint32_t *)(void *)&statuschange->SASAddress);
7925 wwn_hi = ddi_get32(mpt->m_acc_reply_frame_hdl,
7926 (uint32_t *)(void *)&statuschange->SASAddress + 1);
7927 wwn = ((uint64_t)wwn_hi << 32) | wwn_lo;
7928 devhdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
7929 &statuschange->DevHandle);
7930
7931 NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64,
7932 wwn));
7933
7934 switch (rc) {
7935 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7936 NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x",
7937 ddi_get8(mpt->m_acc_reply_frame_hdl,
7938 &statuschange->ASC),
7939 ddi_get8(mpt->m_acc_reply_frame_hdl,
7940 &statuschange->ASCQ)));
7941 break;
7942
7943 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7944 NDBG20(("Device not supported"));
7945 break;
7946
7947 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7948 NDBG20(("IOC internally generated the Target Reset "
7949 "for devhdl:%x", devhdl));
7950 break;
7951
7952 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7953 NDBG20(("IOC's internally generated Target Reset "
7954 "completed for devhdl:%x", devhdl));
7955 break;
7956
7957 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7958 NDBG20(("IOC internally generated Abort Task"));
7959 break;
7960
7961 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7962 NDBG20(("IOC's internally generated Abort Task "
7963 "completed"));
7964 break;
7965
7966 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7967 NDBG20(("IOC internally generated Abort Task Set"));
7968 break;
7969
7970 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7971 NDBG20(("IOC internally generated Clear Task Set"));
7972 break;
7973
7974 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7975 NDBG20(("IOC internally generated Query Task"));
7976 break;
7977
7978 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7979 NDBG20(("Device sent an Asynchronous Notification"));
7980 break;
7981
7982 default:
7983 break;
7984 }
7985 break;
7986 }
7987 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7988 {
7989 /*
7990 * IR TOPOLOGY CHANGE LIST Event has already been handled
7991 * in mpt_handle_event_sync() of interrupt context
7992 */
7993 break;
7994 }
7995 case MPI2_EVENT_IR_OPERATION_STATUS:
7996 {
7997 Mpi2EventDataIrOperationStatus_t *irOpStatus;
7998 char reason_str[80];
7999 uint8_t rc, percent;
8000 uint16_t handle;
8001
8002 irOpStatus = (pMpi2EventDataIrOperationStatus_t)
8003 eventreply->EventData;
8004 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
8005 &irOpStatus->RAIDOperation);
8006 percent = ddi_get8(mpt->m_acc_reply_frame_hdl,
8007 &irOpStatus->PercentComplete);
8008 handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
8009 &irOpStatus->VolDevHandle);
8010
8011 switch (rc) {
8012 case MPI2_EVENT_IR_RAIDOP_RESYNC:
8013 (void) sprintf(reason_str, "resync");
8014 break;
8015 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
8016 (void) sprintf(reason_str, "online capacity "
8017 "expansion");
8018 break;
8019 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
8020 (void) sprintf(reason_str, "consistency check");
8021 break;
8022 default:
8023 (void) sprintf(reason_str, "unknown reason %x",
8024 rc);
8025 }
8026
8027 NDBG20(("mptsas%d raid operational status: (%s)"
8028 "\thandle(0x%04x), percent complete(%d)\n",
8029 mpt->m_instance, reason_str, handle, percent));
8030 break;
8031 }
8032 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
8033 {
8034 pMpi2EventDataSasBroadcastPrimitive_t sas_broadcast;
8035 uint8_t phy_num;
8036 uint8_t primitive;
8037
8038 sas_broadcast = (pMpi2EventDataSasBroadcastPrimitive_t)
8039 eventreply->EventData;
8040
8041 phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
8042 &sas_broadcast->PhyNum);
8043 primitive = ddi_get8(mpt->m_acc_reply_frame_hdl,
8044 &sas_broadcast->Primitive);
8045
8046 switch (primitive) {
8047 case MPI2_EVENT_PRIMITIVE_CHANGE:
8048 mptsas_smhba_log_sysevent(mpt,
8049 ESC_SAS_HBA_PORT_BROADCAST,
8050 SAS_PORT_BROADCAST_CHANGE,
8051 &mpt->m_phy_info[phy_num].smhba_info);
8052 break;
8053 case MPI2_EVENT_PRIMITIVE_SES:
8054 mptsas_smhba_log_sysevent(mpt,
8055 ESC_SAS_HBA_PORT_BROADCAST,
8056 SAS_PORT_BROADCAST_SES,
8057 &mpt->m_phy_info[phy_num].smhba_info);
8058 break;
8059 case MPI2_EVENT_PRIMITIVE_EXPANDER:
8060 mptsas_smhba_log_sysevent(mpt,
8061 ESC_SAS_HBA_PORT_BROADCAST,
8062 SAS_PORT_BROADCAST_D01_4,
8063 &mpt->m_phy_info[phy_num].smhba_info);
8064 break;
8065 case MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT:
8066 mptsas_smhba_log_sysevent(mpt,
8067 ESC_SAS_HBA_PORT_BROADCAST,
8068 SAS_PORT_BROADCAST_D04_7,
8069 &mpt->m_phy_info[phy_num].smhba_info);
8070 break;
8071 case MPI2_EVENT_PRIMITIVE_RESERVED3:
8072 mptsas_smhba_log_sysevent(mpt,
8073 ESC_SAS_HBA_PORT_BROADCAST,
8074 SAS_PORT_BROADCAST_D16_7,
8075 &mpt->m_phy_info[phy_num].smhba_info);
8076 break;
8077 case MPI2_EVENT_PRIMITIVE_RESERVED4:
8078 mptsas_smhba_log_sysevent(mpt,
8079 ESC_SAS_HBA_PORT_BROADCAST,
8080 SAS_PORT_BROADCAST_D29_7,
8081 &mpt->m_phy_info[phy_num].smhba_info);
8082 break;
8083 case MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED:
8084 mptsas_smhba_log_sysevent(mpt,
8085 ESC_SAS_HBA_PORT_BROADCAST,
8086 SAS_PORT_BROADCAST_D24_0,
8087 &mpt->m_phy_info[phy_num].smhba_info);
8088 break;
8089 case MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED:
8090 mptsas_smhba_log_sysevent(mpt,
8091 ESC_SAS_HBA_PORT_BROADCAST,
8092 SAS_PORT_BROADCAST_D27_4,
8093 &mpt->m_phy_info[phy_num].smhba_info);
8094 break;
8095 default:
8096 NDBG16(("mptsas%d: unknown BROADCAST PRIMITIVE"
8097 " %x received",
8098 mpt->m_instance, primitive));
8099 break;
8100 }
8101 NDBG16(("mptsas%d sas broadcast primitive: "
8102 "\tprimitive(0x%04x), phy(%d) complete\n",
8103 mpt->m_instance, primitive, phy_num));
8104 break;
8105 }
8106 case MPI2_EVENT_IR_VOLUME:
8107 {
8108 Mpi2EventDataIrVolume_t *irVolume;
8109 uint16_t devhandle;
8110 uint32_t state;
8111 int config, vol;
8112 uint8_t found = FALSE;
8113
8114 irVolume = (pMpi2EventDataIrVolume_t)eventreply->EventData;
8115 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
8116 &irVolume->NewValue);
8117 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
8118 &irVolume->VolDevHandle);
8119
8120 NDBG20(("EVENT_IR_VOLUME event is received"));
8121
8122 /*
8123 * Get latest RAID info and then find the DevHandle for this
8124 * event in the configuration. If the DevHandle is not found
8125 * just exit the event.
8126 */
8127 (void) mptsas_get_raid_info(mpt);
8128 for (config = 0; (config < mpt->m_num_raid_configs) &&
8129 (!found); config++) {
8130 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
8131 if (mpt->m_raidconfig[config].m_raidvol[vol].
8132 m_raidhandle == devhandle) {
8133 found = TRUE;
8134 break;
8135 }
8136 }
8137 }
8138 if (!found) {
8139 break;
8140 }
8141
8142 switch (irVolume->ReasonCode) {
8143 case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
8144 {
8145 uint32_t i;
8146 mpt->m_raidconfig[config].m_raidvol[vol].m_settings =
8147 state;
8148
8149 i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
8150 mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
8151 ", auto-config of hot-swap drives is %s"
8152 ", write caching is %s"
8153 ", hot-spare pool mask is %02x\n",
8154 vol, state &
8155 MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
8156 ? "disabled" : "enabled",
8157 i == MPI2_RAIDVOL0_SETTING_UNCHANGED
8158 ? "controlled by member disks" :
8159 i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
8160 ? "disabled" :
8161 i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
8162 ? "enabled" :
8163 "incorrectly set",
8164 (state >> 16) & 0xff);
8165 break;
8166 }
8167 case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
8168 {
8169 mpt->m_raidconfig[config].m_raidvol[vol].m_state =
8170 (uint8_t)state;
8171
8172 mptsas_log(mpt, CE_NOTE,
8173 "Volume %d is now %s\n", vol,
8174 state == MPI2_RAID_VOL_STATE_OPTIMAL
8175 ? "optimal" :
8176 state == MPI2_RAID_VOL_STATE_DEGRADED
8177 ? "degraded" :
8178 state == MPI2_RAID_VOL_STATE_ONLINE
8179 ? "online" :
8180 state == MPI2_RAID_VOL_STATE_INITIALIZING
8181 ? "initializing" :
8182 state == MPI2_RAID_VOL_STATE_FAILED
8183 ? "failed" :
8184 state == MPI2_RAID_VOL_STATE_MISSING
8185 ? "missing" :
8186 "state unknown");
8187 break;
8188 }
8189 case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
8190 {
8191 mpt->m_raidconfig[config].m_raidvol[vol].
8192 m_statusflags = state;
8193
8194 mptsas_log(mpt, CE_NOTE,
8195 " Volume %d is now %s%s%s%s%s%s%s%s%s\n",
8196 vol,
8197 state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
8198 ? ", enabled" : ", disabled",
8199 state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
8200 ? ", quiesced" : "",
8201 state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
8202 ? ", inactive" : ", active",
8203 state &
8204 MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
8205 ? ", bad block table is full" : "",
8206 state &
8207 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
8208 ? ", resync in progress" : "",
8209 state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
8210 ? ", background initialization in progress" : "",
8211 state &
8212 MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
8213 ? ", capacity expansion in progress" : "",
8214 state &
8215 MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
8216 ? ", consistency check in progress" : "",
8217 state & MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB
8218 ? ", data scrub in progress" : "");
8219 break;
8220 }
8221 default:
8222 break;
8223 }
8224 break;
8225 }
8226 case MPI2_EVENT_IR_PHYSICAL_DISK:
8227 {
8228 Mpi2EventDataIrPhysicalDisk_t *irPhysDisk;
8229 uint16_t devhandle, enchandle, slot;
8230 uint32_t status, state;
8231 uint8_t physdisknum, reason;
8232
8233 irPhysDisk = (Mpi2EventDataIrPhysicalDisk_t *)
8234 eventreply->EventData;
8235 physdisknum = ddi_get8(mpt->m_acc_reply_frame_hdl,
8236 &irPhysDisk->PhysDiskNum);
8237 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
8238 &irPhysDisk->PhysDiskDevHandle);
8239 enchandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
8240 &irPhysDisk->EnclosureHandle);
8241 slot = ddi_get16(mpt->m_acc_reply_frame_hdl,
8242 &irPhysDisk->Slot);
8243 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
8244 &irPhysDisk->NewValue);
8245 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
8246 &irPhysDisk->ReasonCode);
8247
8248 NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
8249
8250 switch (reason) {
8251 case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
8252 mptsas_log(mpt, CE_NOTE,
8253 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8254 "for enclosure with handle 0x%x is now in hot "
8255 "spare pool %d",
8256 physdisknum, devhandle, slot, enchandle,
8257 (state >> 16) & 0xff);
8258 break;
8259
8260 case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
8261 status = state;
8262 mptsas_log(mpt, CE_NOTE,
8263 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8264 "for enclosure with handle 0x%x is now "
8265 "%s%s%s%s%s\n", physdisknum, devhandle, slot,
8266 enchandle,
8267 status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
8268 ? ", inactive" : ", active",
8269 status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
8270 ? ", out of sync" : "",
8271 status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
8272 ? ", quiesced" : "",
8273 status &
8274 MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
8275 ? ", write cache enabled" : "",
8276 status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
8277 ? ", capacity expansion target" : "");
8278 break;
8279
8280 case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
8281 mptsas_log(mpt, CE_NOTE,
8282 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8283 "for enclosure with handle 0x%x is now %s\n",
8284 physdisknum, devhandle, slot, enchandle,
8285 state == MPI2_RAID_PD_STATE_OPTIMAL
8286 ? "optimal" :
8287 state == MPI2_RAID_PD_STATE_REBUILDING
8288 ? "rebuilding" :
8289 state == MPI2_RAID_PD_STATE_DEGRADED
8290 ? "degraded" :
8291 state == MPI2_RAID_PD_STATE_HOT_SPARE
8292 ? "a hot spare" :
8293 state == MPI2_RAID_PD_STATE_ONLINE
8294 ? "online" :
8295 state == MPI2_RAID_PD_STATE_OFFLINE
8296 ? "offline" :
8297 state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
8298 ? "not compatible" :
8299 state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
8300 ? "not configured" :
8301 "state unknown");
8302 break;
8303 }
8304 break;
8305 }
8306 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
8307 {
8308 pMpi26EventDataActiveCableExcept_t actcable;
8309 uint32_t power;
8310 uint8_t reason, id;
8311
8312 actcable = (pMpi26EventDataActiveCableExcept_t)
8313 eventreply->EventData;
8314 power = ddi_get32(mpt->m_acc_reply_frame_hdl,
8315 &actcable->ActiveCablePowerRequirement);
8316 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
8317 &actcable->ReasonCode);
8318 id = ddi_get8(mpt->m_acc_reply_frame_hdl,
8319 &actcable->ReceptacleID);
8320
8321 /*
8322 * It'd be nice if this weren't just logging to the system but
8323 * were telling FMA about the active cable problem and FMA was
8324 * aware of the cable topology and state.
8325 */
8326 switch (reason) {
8327 case MPI26_EVENT_ACTIVE_CABLE_PRESENT:
8328 /* Don't log anything if it's fine */
8329 break;
8330 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
8331 mptsas_log(mpt, CE_WARN, "An active cable (id %u) does "
8332 "not have sufficient power to be enabled. "
8333 "Devices connected to this cable will not be "
8334 "visible to the system.", id);
8335 if (power == UINT32_MAX) {
8336 mptsas_log(mpt, CE_CONT, "The cable's power "
8337 "requirements are unknown.\n");
8338 } else {
8339 mptsas_log(mpt, CE_CONT, "The cable requires "
8340 "%u mW of power to function.\n", power);
8341 }
8342 break;
8343 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
8344 mptsas_log(mpt, CE_WARN, "An active cable (id %u) is "
8345 "degraded and not running at its full speed. "
8346 "Some devices might not appear.", id);
8347 break;
8348 default:
8349 break;
8350 }
8351 break;
8352 }
8353 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
8354 case MPI2_EVENT_PCIE_ENUMERATION:
8355 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
8356 case MPI2_EVENT_PCIE_LINK_COUNTER:
8357 mptsas_log(mpt, CE_NOTE, "Unhandled mpt_sas PCIe device "
8358 "event received (0x%x)", event);
8359 break;
8360 default:
8361 NDBG20(("mptsas%d: unknown event %x received",
8362 mpt->m_instance, event));
8363 break;
8364 }
8365
8366 /*
8367 * Return the reply frame to the free queue.
8368 */
8369 ddi_put32(mpt->m_acc_free_queue_hdl,
8370 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm);
8371 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
8372 DDI_DMA_SYNC_FORDEV);
8373 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
8374 mpt->m_free_index = 0;
8375 }
8376 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
8377 mpt->m_free_index);
8378 mutex_exit(&mpt->m_mutex);
8379 }
8380
8381 /*
8382 * invoked from timeout() to restart qfull cmds with throttle == 0
8383 */
8384 static void
8385 mptsas_restart_cmd(void *arg)
8386 {
8387 mptsas_t *mpt = arg;
8388 mptsas_target_t *ptgt = NULL;
8389
8390 mutex_enter(&mpt->m_mutex);
8391
8392 mpt->m_restart_cmd_timeid = 0;
8393
8394 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
8395 ptgt = refhash_next(mpt->m_targets, ptgt)) {
8396 if (ptgt->m_reset_delay == 0) {
8397 if (ptgt->m_t_throttle == QFULL_THROTTLE) {
8398 mptsas_set_throttle(mpt, ptgt,
8399 MAX_THROTTLE);
8400 }
8401 }
8402 }
8403 mptsas_restart_hba(mpt);
8404 mutex_exit(&mpt->m_mutex);
8405 }
8406
8407 void
8408 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8409 {
8410 int slot;
8411 mptsas_slots_t *slots = mpt->m_active;
8412 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8413
8414 ASSERT(cmd != NULL);
8415 ASSERT(cmd->cmd_queued == FALSE);
8416
8417 /*
8418 * Task Management cmds are removed in their own routines. Also,
8419 * we don't want to modify timeout based on TM cmds.
8420 */
8421 if (cmd->cmd_flags & CFLAG_TM_CMD) {
8422 return;
8423 }
8424
8425 slot = cmd->cmd_slot;
8426
8427 /*
8428 * remove the cmd.
8429 */
8430 if (cmd == slots->m_slot[slot]) {
8431 NDBG31(("mptsas_remove_cmd: removing cmd=0x%p, flags "
8432 "0x%x", (void *)cmd, cmd->cmd_flags));
8433 slots->m_slot[slot] = NULL;
8434 mpt->m_ncmds--;
8435
8436 /*
8437 * only decrement per target ncmds if command
8438 * has a target associated with it.
8439 */
8440 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
8441 ptgt->m_t_ncmds--;
8442 /*
8443 * reset throttle if we just ran an untagged command
8444 * to a tagged target
8445 */
8446 if ((ptgt->m_t_ncmds == 0) &&
8447 ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
8448 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8449 }
8450
8451 /*
8452 * Remove this command from the active queue.
8453 */
8454 if (cmd->cmd_active_expiration != 0) {
8455 TAILQ_REMOVE(&ptgt->m_active_cmdq, cmd,
8456 cmd_active_link);
8457 cmd->cmd_active_expiration = 0;
8458 }
8459 }
8460 }
8461
8462 /*
8463 * This is all we need to do for ioc commands.
8464 */
8465 if (cmd->cmd_flags & CFLAG_CMDIOC) {
8466 mptsas_return_to_pool(mpt, cmd);
8467 return;
8468 }
8469
8470 ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);
8471 }
8472
8473 /*
8474 * accept all cmds on the tx_waitq if any and then
8475 * start a fresh request from the top of the device queue.
8476 *
8477 * since there are always cmds queued on the tx_waitq, and rare cmds on
8478 * the instance waitq, so this function should not be invoked in the ISR,
8479 * the mptsas_restart_waitq() is invoked in the ISR instead. otherwise, the
8480 * burden belongs to the IO dispatch CPUs is moved the interrupt CPU.
8481 */
8482 static void
8483 mptsas_restart_hba(mptsas_t *mpt)
8484 {
8485 ASSERT(mutex_owned(&mpt->m_mutex));
8486
8487 mutex_enter(&mpt->m_tx_waitq_mutex);
8488 if (mpt->m_tx_waitq) {
8489 mptsas_accept_tx_waitq(mpt);
8490 }
8491 mutex_exit(&mpt->m_tx_waitq_mutex);
8492 mptsas_restart_waitq(mpt);
8493 }
8494
8495 /*
8496 * start a fresh request from the top of the device queue
8497 */
8498 static void
8499 mptsas_restart_waitq(mptsas_t *mpt)
8500 {
8501 mptsas_cmd_t *cmd, *next_cmd;
8502 mptsas_target_t *ptgt = NULL;
8503
8504 NDBG1(("mptsas_restart_waitq: mpt=0x%p", (void *)mpt));
8505
8506 ASSERT(mutex_owned(&mpt->m_mutex));
8507
8508 /*
8509 * If there is a reset delay, don't start any cmds. Otherwise, start
8510 * as many cmds as possible.
8511 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
8512 * commands is m_max_requests - 2.
8513 */
8514 cmd = mpt->m_waitq;
8515
8516 while (cmd != NULL) {
8517 next_cmd = cmd->cmd_linkp;
8518 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
8519 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8520 /*
8521 * passthru command get slot need
8522 * set CFLAG_PREPARED.
8523 */
8524 cmd->cmd_flags |= CFLAG_PREPARED;
8525 mptsas_waitq_delete(mpt, cmd);
8526 mptsas_start_passthru(mpt, cmd);
8527 }
8528 cmd = next_cmd;
8529 continue;
8530 }
8531 if (cmd->cmd_flags & CFLAG_CONFIG) {
8532 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8533 /*
8534 * Send the config page request and delete it
8535 * from the waitq.
8536 */
8537 cmd->cmd_flags |= CFLAG_PREPARED;
8538 mptsas_waitq_delete(mpt, cmd);
8539 mptsas_start_config_page_access(mpt, cmd);
8540 }
8541 cmd = next_cmd;
8542 continue;
8543 }
8544 if (cmd->cmd_flags & CFLAG_FW_DIAG) {
8545 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8546 /*
8547 * Send the FW Diag request and delete if from
8548 * the waitq.
8549 */
8550 cmd->cmd_flags |= CFLAG_PREPARED;
8551 mptsas_waitq_delete(mpt, cmd);
8552 mptsas_start_diag(mpt, cmd);
8553 }
8554 cmd = next_cmd;
8555 continue;
8556 }
8557
8558 ptgt = cmd->cmd_tgt_addr;
8559 if (ptgt && (ptgt->m_t_throttle == DRAIN_THROTTLE) &&
8560 (ptgt->m_t_ncmds == 0)) {
8561 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8562 }
8563 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
8564 (ptgt && (ptgt->m_reset_delay == 0)) &&
8565 (ptgt && (ptgt->m_t_ncmds <
8566 ptgt->m_t_throttle))) {
8567 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8568 mptsas_waitq_delete(mpt, cmd);
8569 (void) mptsas_start_cmd(mpt, cmd);
8570 }
8571 }
8572 cmd = next_cmd;
8573 }
8574 }
8575 /*
8576 * Cmds are queued if tran_start() doesn't get the m_mutexlock(no wait).
8577 * Accept all those queued cmds before new cmd is accept so that the
8578 * cmds are sent in order.
8579 */
8580 static void
8581 mptsas_accept_tx_waitq(mptsas_t *mpt)
8582 {
8583 mptsas_cmd_t *cmd;
8584
8585 ASSERT(mutex_owned(&mpt->m_mutex));
8586 ASSERT(mutex_owned(&mpt->m_tx_waitq_mutex));
8587
8588 /*
8589 * A Bus Reset could occur at any time and flush the tx_waitq,
8590 * so we cannot count on the tx_waitq to contain even one cmd.
8591 * And when the m_tx_waitq_mutex is released and run
8592 * mptsas_accept_pkt(), the tx_waitq may be flushed.
8593 */
8594 cmd = mpt->m_tx_waitq;
8595 for (;;) {
8596 if ((cmd = mpt->m_tx_waitq) == NULL) {
8597 mpt->m_tx_draining = 0;
8598 break;
8599 }
8600 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL) {
8601 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8602 }
8603 cmd->cmd_linkp = NULL;
8604 mutex_exit(&mpt->m_tx_waitq_mutex);
8605 if (mptsas_accept_pkt(mpt, cmd) != TRAN_ACCEPT)
8606 cmn_err(CE_WARN, "mpt: mptsas_accept_tx_waitq: failed "
8607 "to accept cmd on queue\n");
8608 mutex_enter(&mpt->m_tx_waitq_mutex);
8609 }
8610 }
8611
8612
8613 /*
8614 * mpt tag type lookup
8615 */
8616 static char mptsas_tag_lookup[] =
8617 {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
8618
8619 static int
8620 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8621 {
8622 struct scsi_pkt *pkt = CMD2PKT(cmd);
8623 uint32_t control = 0;
8624 caddr_t mem, arsbuf;
8625 pMpi2SCSIIORequest_t io_request;
8626 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
8627 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
8628 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8629 uint16_t SMID, io_flags = 0;
8630 uint8_t ars_size;
8631 uint64_t request_desc;
8632 uint32_t ars_dmaaddrlow;
8633 mptsas_cmd_t *c;
8634
8635 NDBG1(("mptsas_start_cmd: cmd=0x%p, flags 0x%x", (void *)cmd,
8636 cmd->cmd_flags));
8637
8638 /*
8639 * Set SMID and increment index. Rollover to 1 instead of 0 if index
8640 * is at the max. 0 is an invalid SMID, so we call the first index 1.
8641 */
8642 SMID = cmd->cmd_slot;
8643
8644 /*
8645 * It is possible for back to back device reset to
8646 * happen before the reset delay has expired. That's
8647 * ok, just let the device reset go out on the bus.
8648 */
8649 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8650 ASSERT(ptgt->m_reset_delay == 0);
8651 }
8652
8653 /*
8654 * if a non-tagged cmd is submitted to an active tagged target
8655 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
8656 * to be untagged
8657 */
8658 if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
8659 (ptgt->m_t_ncmds > 1) &&
8660 ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
8661 (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
8662 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8663 NDBG23(("target=%d, untagged cmd, start draining\n",
8664 ptgt->m_devhdl));
8665
8666 if (ptgt->m_reset_delay == 0) {
8667 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
8668 }
8669
8670 mptsas_remove_cmd(mpt, cmd);
8671 cmd->cmd_pkt_flags |= FLAG_HEAD;
8672 mptsas_waitq_add(mpt, cmd);
8673 }
8674 return (DDI_FAILURE);
8675 }
8676
8677 /*
8678 * Set correct tag bits.
8679 */
8680 if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
8681 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
8682 FLAG_TAGMASK) >> 12)]) {
8683 case MSG_SIMPLE_QTAG:
8684 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8685 break;
8686 case MSG_HEAD_QTAG:
8687 control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
8688 break;
8689 case MSG_ORDERED_QTAG:
8690 control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
8691 break;
8692 default:
8693 mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
8694 break;
8695 }
8696 } else {
8697 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
8698 ptgt->m_t_throttle = 1;
8699 }
8700 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8701 }
8702
8703 if (cmd->cmd_pkt_flags & FLAG_TLR) {
8704 control |= MPI2_SCSIIO_CONTROL_TLR_ON;
8705 }
8706
8707 mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
8708 io_request = (pMpi2SCSIIORequest_t)mem;
8709 if (cmd->cmd_extrqslen != 0) {
8710 /*
8711 * Mapping of the buffer was done in mptsas_pkt_alloc_extern().
8712 * Calculate the DMA address with the same offset.
8713 */
8714 arsbuf = cmd->cmd_arq_buf;
8715 ars_size = cmd->cmd_extrqslen;
8716 ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
8717 ((uintptr_t)arsbuf - (uintptr_t)mpt->m_req_sense)) &
8718 0xffffffffu;
8719 } else {
8720 arsbuf = mpt->m_req_sense + (mpt->m_req_sense_size * (SMID-1));
8721 cmd->cmd_arq_buf = arsbuf;
8722 ars_size = mpt->m_req_sense_size;
8723 ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
8724 (mpt->m_req_sense_size * (SMID-1))) &
8725 0xffffffffu;
8726 }
8727 bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
8728 bzero(arsbuf, ars_size);
8729
8730 ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
8731 (MPI2_SCSI_IO_REQUEST, SGL) / 4);
8732 mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
8733 MPI2_FUNCTION_SCSI_IO_REQUEST);
8734
8735 (void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
8736 io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
8737
8738 io_flags = cmd->cmd_cdblen;
8739 if (mptsas_use_fastpath &&
8740 ptgt->m_io_flags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
8741 io_flags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
8742 request_desc = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
8743 } else {
8744 request_desc = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
8745 }
8746 ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
8747 /*
8748 * setup the Scatter/Gather DMA list for this request
8749 */
8750 if (cmd->cmd_cookiec > 0) {
8751 mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
8752 } else {
8753 ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
8754 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
8755 MPI2_SGE_FLAGS_END_OF_BUFFER |
8756 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
8757 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
8758 }
8759
8760 /*
8761 * save ARQ information
8762 */
8763 ddi_put8(acc_hdl, &io_request->SenseBufferLength, ars_size);
8764 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress, ars_dmaaddrlow);
8765
8766 ddi_put32(acc_hdl, &io_request->Control, control);
8767
8768 NDBG31(("starting message=%d(0x%p), with cmd=0x%p",
8769 SMID, (void *)io_request, (void *)cmd));
8770
8771 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
8772 (void) ddi_dma_sync(mpt->m_dma_req_sense_hdl, 0, 0,
8773 DDI_DMA_SYNC_FORDEV);
8774
8775 /*
8776 * Build request descriptor and write it to the request desc post reg.
8777 */
8778 request_desc |= (SMID << 16);
8779 request_desc |= (uint64_t)ptgt->m_devhdl << 48;
8780 MPTSAS_START_CMD(mpt, request_desc);
8781
8782 /*
8783 * Start timeout.
8784 */
8785 cmd->cmd_active_expiration =
8786 gethrtime() + (hrtime_t)pkt->pkt_time * NANOSEC;
8787 #ifdef MPTSAS_TEST
8788 /*
8789 * Force timeouts to happen immediately.
8790 */
8791 if (mptsas_test_timeouts)
8792 cmd->cmd_active_expiration = gethrtime();
8793 #endif
8794 c = TAILQ_FIRST(&ptgt->m_active_cmdq);
8795 if (c == NULL ||
8796 c->cmd_active_expiration < cmd->cmd_active_expiration) {
8797 /*
8798 * Common case is that this is the last pending expiration
8799 * (or queue is empty). Insert at head of the queue.
8800 */
8801 TAILQ_INSERT_HEAD(&ptgt->m_active_cmdq, cmd, cmd_active_link);
8802 } else {
8803 /*
8804 * Queue is not empty and first element expires later than
8805 * this command. Search for element expiring sooner.
8806 */
8807 while ((c = TAILQ_NEXT(c, cmd_active_link)) != NULL) {
8808 if (c->cmd_active_expiration <
8809 cmd->cmd_active_expiration) {
8810 TAILQ_INSERT_BEFORE(c, cmd, cmd_active_link);
8811 break;
8812 }
8813 }
8814 if (c == NULL) {
8815 /*
8816 * No element found expiring sooner, append to
8817 * non-empty queue.
8818 */
8819 TAILQ_INSERT_TAIL(&ptgt->m_active_cmdq, cmd,
8820 cmd_active_link);
8821 }
8822 }
8823
8824 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8825 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8826 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8827 return (DDI_FAILURE);
8828 }
8829 return (DDI_SUCCESS);
8830 }
8831
8832 /*
8833 * Select a helper thread to handle current doneq
8834 */
8835 static void
8836 mptsas_deliver_doneq_thread(mptsas_t *mpt)
8837 {
8838 uint64_t t, i;
8839 uint32_t min = 0xffffffff;
8840 mptsas_doneq_thread_list_t *item;
8841
8842 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
8843 item = &mpt->m_doneq_thread_id[i];
8844 /*
8845 * If the completed command on help thread[i] less than
8846 * doneq_thread_threshold, then pick the thread[i]. Otherwise
8847 * pick a thread which has least completed command.
8848 */
8849
8850 mutex_enter(&item->mutex);
8851 if (item->len < mpt->m_doneq_thread_threshold) {
8852 t = i;
8853 mutex_exit(&item->mutex);
8854 break;
8855 }
8856 if (item->len < min) {
8857 min = item->len;
8858 t = i;
8859 }
8860 mutex_exit(&item->mutex);
8861 }
8862 mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
8863 mptsas_doneq_mv(mpt, t);
8864 cv_signal(&mpt->m_doneq_thread_id[t].cv);
8865 mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
8866 }
8867
8868 /*
8869 * move the current global doneq to the doneq of thead[t]
8870 */
8871 static void
8872 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
8873 {
8874 mptsas_cmd_t *cmd;
8875 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8876
8877 ASSERT(mutex_owned(&item->mutex));
8878 while ((cmd = mpt->m_doneq) != NULL) {
8879 if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
8880 mpt->m_donetail = &mpt->m_doneq;
8881 }
8882 cmd->cmd_linkp = NULL;
8883 *item->donetail = cmd;
8884 item->donetail = &cmd->cmd_linkp;
8885 mpt->m_doneq_len--;
8886 item->len++;
8887 }
8888 }
8889
8890 void
8891 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
8892 {
8893 struct scsi_pkt *pkt = CMD2PKT(cmd);
8894
8895 /* Check all acc and dma handles */
8896 if ((mptsas_check_acc_handle(mpt->m_datap) !=
8897 DDI_SUCCESS) ||
8898 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
8899 DDI_SUCCESS) ||
8900 (mptsas_check_acc_handle(mpt->m_acc_req_sense_hdl) !=
8901 DDI_SUCCESS) ||
8902 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
8903 DDI_SUCCESS) ||
8904 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
8905 DDI_SUCCESS) ||
8906 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
8907 DDI_SUCCESS) ||
8908 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
8909 DDI_SUCCESS) ||
8910 (mptsas_check_acc_handle(mpt->m_config_handle) !=
8911 DDI_SUCCESS)) {
8912 ddi_fm_service_impact(mpt->m_dip,
8913 DDI_SERVICE_UNAFFECTED);
8914 ddi_fm_acc_err_clear(mpt->m_config_handle,
8915 DDI_FME_VER0);
8916 pkt->pkt_reason = CMD_TRAN_ERR;
8917 pkt->pkt_statistics = 0;
8918 }
8919 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
8920 DDI_SUCCESS) ||
8921 (mptsas_check_dma_handle(mpt->m_dma_req_sense_hdl) !=
8922 DDI_SUCCESS) ||
8923 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
8924 DDI_SUCCESS) ||
8925 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
8926 DDI_SUCCESS) ||
8927 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
8928 DDI_SUCCESS) ||
8929 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
8930 DDI_SUCCESS)) {
8931 ddi_fm_service_impact(mpt->m_dip,
8932 DDI_SERVICE_UNAFFECTED);
8933 pkt->pkt_reason = CMD_TRAN_ERR;
8934 pkt->pkt_statistics = 0;
8935 }
8936 if (cmd->cmd_dmahandle &&
8937 (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
8938 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8939 pkt->pkt_reason = CMD_TRAN_ERR;
8940 pkt->pkt_statistics = 0;
8941 }
8942 if ((cmd->cmd_extra_frames &&
8943 ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
8944 DDI_SUCCESS) ||
8945 (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
8946 DDI_SUCCESS)))) {
8947 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8948 pkt->pkt_reason = CMD_TRAN_ERR;
8949 pkt->pkt_statistics = 0;
8950 }
8951 }
8952
8953 /*
8954 * These routines manipulate the queue of commands that
8955 * are waiting for their completion routines to be called.
8956 * The queue is usually in FIFO order but on an MP system
8957 * it's possible for the completion routines to get out
8958 * of order. If that's a problem you need to add a global
8959 * mutex around the code that calls the completion routine
8960 * in the interrupt handler.
8961 */
8962 static void
8963 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8964 {
8965 struct scsi_pkt *pkt = CMD2PKT(cmd);
8966
8967 NDBG31(("mptsas_doneq_add: cmd=0x%p", (void *)cmd));
8968
8969 ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
8970 cmd->cmd_linkp = NULL;
8971 cmd->cmd_flags |= CFLAG_FINISHED;
8972 cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
8973
8974 mptsas_fma_check(mpt, cmd);
8975
8976 /*
8977 * only add scsi pkts that have completion routines to
8978 * the doneq. no intr cmds do not have callbacks.
8979 */
8980 if (pkt && (pkt->pkt_comp)) {
8981 *mpt->m_donetail = cmd;
8982 mpt->m_donetail = &cmd->cmd_linkp;
8983 mpt->m_doneq_len++;
8984 }
8985 }
8986
8987 static mptsas_cmd_t *
8988 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
8989 {
8990 mptsas_cmd_t *cmd;
8991 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8992
8993 /* pop one off the done queue */
8994 if ((cmd = item->doneq) != NULL) {
8995 /* if the queue is now empty fix the tail pointer */
8996 NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
8997 if ((item->doneq = cmd->cmd_linkp) == NULL) {
8998 item->donetail = &item->doneq;
8999 }
9000 cmd->cmd_linkp = NULL;
9001 item->len--;
9002 }
9003 return (cmd);
9004 }
9005
9006 static void
9007 mptsas_doneq_empty(mptsas_t *mpt)
9008 {
9009 if (mpt->m_doneq && !mpt->m_in_callback) {
9010 mptsas_cmd_t *cmd, *next;
9011 struct scsi_pkt *pkt;
9012
9013 mpt->m_in_callback = 1;
9014 cmd = mpt->m_doneq;
9015 mpt->m_doneq = NULL;
9016 mpt->m_donetail = &mpt->m_doneq;
9017 mpt->m_doneq_len = 0;
9018
9019 mutex_exit(&mpt->m_mutex);
9020 /*
9021 * run the completion routines of all the
9022 * completed commands
9023 */
9024 while (cmd != NULL) {
9025 next = cmd->cmd_linkp;
9026 cmd->cmd_linkp = NULL;
9027 /* run this command's completion routine */
9028 cmd->cmd_flags |= CFLAG_COMPLETED;
9029 pkt = CMD2PKT(cmd);
9030 mptsas_pkt_comp(pkt, cmd);
9031 cmd = next;
9032 }
9033 mutex_enter(&mpt->m_mutex);
9034 mpt->m_in_callback = 0;
9035 }
9036 }
9037
9038 /*
9039 * These routines manipulate the target's queue of pending requests
9040 */
9041 void
9042 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
9043 {
9044 NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
9045 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
9046 cmd->cmd_queued = TRUE;
9047 if (ptgt)
9048 ptgt->m_t_nwait++;
9049 if (cmd->cmd_pkt_flags & FLAG_HEAD) {
9050 if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
9051 mpt->m_waitqtail = &cmd->cmd_linkp;
9052 }
9053 mpt->m_waitq = cmd;
9054 } else {
9055 cmd->cmd_linkp = NULL;
9056 *(mpt->m_waitqtail) = cmd;
9057 mpt->m_waitqtail = &cmd->cmd_linkp;
9058 }
9059 }
9060
9061 static mptsas_cmd_t *
9062 mptsas_waitq_rm(mptsas_t *mpt)
9063 {
9064 mptsas_cmd_t *cmd;
9065 mptsas_target_t *ptgt;
9066 NDBG7(("mptsas_waitq_rm"));
9067
9068 MPTSAS_WAITQ_RM(mpt, cmd);
9069
9070 NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
9071 if (cmd) {
9072 ptgt = cmd->cmd_tgt_addr;
9073 if (ptgt) {
9074 ptgt->m_t_nwait--;
9075 ASSERT(ptgt->m_t_nwait >= 0);
9076 }
9077 }
9078 return (cmd);
9079 }
9080
9081 /*
9082 * remove specified cmd from the middle of the wait queue.
9083 */
9084 static void
9085 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
9086 {
9087 mptsas_cmd_t *prevp = mpt->m_waitq;
9088 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
9089
9090 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
9091 (void *)mpt, (void *)cmd));
9092 if (ptgt) {
9093 ptgt->m_t_nwait--;
9094 ASSERT(ptgt->m_t_nwait >= 0);
9095 }
9096
9097 if (prevp == cmd) {
9098 if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
9099 mpt->m_waitqtail = &mpt->m_waitq;
9100
9101 cmd->cmd_linkp = NULL;
9102 cmd->cmd_queued = FALSE;
9103 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
9104 (void *)mpt, (void *)cmd));
9105 return;
9106 }
9107
9108 while (prevp != NULL) {
9109 if (prevp->cmd_linkp == cmd) {
9110 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
9111 mpt->m_waitqtail = &prevp->cmd_linkp;
9112
9113 cmd->cmd_linkp = NULL;
9114 cmd->cmd_queued = FALSE;
9115 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
9116 (void *)mpt, (void *)cmd));
9117 return;
9118 }
9119 prevp = prevp->cmd_linkp;
9120 }
9121 cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
9122 }
9123
9124 static mptsas_cmd_t *
9125 mptsas_tx_waitq_rm(mptsas_t *mpt)
9126 {
9127 mptsas_cmd_t *cmd;
9128 NDBG7(("mptsas_tx_waitq_rm"));
9129
9130 MPTSAS_TX_WAITQ_RM(mpt, cmd);
9131
9132 NDBG7(("mptsas_tx_waitq_rm: cmd=0x%p", (void *)cmd));
9133
9134 return (cmd);
9135 }
9136
9137 /*
9138 * remove specified cmd from the middle of the tx_waitq.
9139 */
9140 static void
9141 mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
9142 {
9143 mptsas_cmd_t *prevp = mpt->m_tx_waitq;
9144
9145 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
9146 (void *)mpt, (void *)cmd));
9147
9148 if (prevp == cmd) {
9149 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL)
9150 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
9151
9152 cmd->cmd_linkp = NULL;
9153 cmd->cmd_queued = FALSE;
9154 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
9155 (void *)mpt, (void *)cmd));
9156 return;
9157 }
9158
9159 while (prevp != NULL) {
9160 if (prevp->cmd_linkp == cmd) {
9161 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
9162 mpt->m_tx_waitqtail = &prevp->cmd_linkp;
9163
9164 cmd->cmd_linkp = NULL;
9165 cmd->cmd_queued = FALSE;
9166 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
9167 (void *)mpt, (void *)cmd));
9168 return;
9169 }
9170 prevp = prevp->cmd_linkp;
9171 }
9172 cmn_err(CE_PANIC, "mpt: mptsas_tx_waitq_delete: queue botch");
9173 }
9174
9175 /*
9176 * device and bus reset handling
9177 *
9178 * Notes:
9179 * - RESET_ALL: reset the controller
9180 * - RESET_TARGET: reset the target specified in scsi_address
9181 */
9182 static int
9183 mptsas_scsi_reset(struct scsi_address *ap, int level)
9184 {
9185 mptsas_t *mpt = ADDR2MPT(ap);
9186 int rval;
9187 mptsas_tgt_private_t *tgt_private;
9188 mptsas_target_t *ptgt = NULL;
9189
9190 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
9191 ptgt = tgt_private->t_private;
9192 if (ptgt == NULL) {
9193 return (FALSE);
9194 }
9195 NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,
9196 level));
9197
9198 mutex_enter(&mpt->m_mutex);
9199 /*
9200 * if we are not in panic set up a reset delay for this target
9201 */
9202 if (!ddi_in_panic()) {
9203 mptsas_setup_bus_reset_delay(mpt);
9204 } else {
9205 drv_usecwait(mpt->m_scsi_reset_delay * 1000);
9206 }
9207 rval = mptsas_do_scsi_reset(mpt, ptgt->m_devhdl);
9208 mutex_exit(&mpt->m_mutex);
9209
9210 /*
9211 * The transport layer expect to only see TRUE and
9212 * FALSE. Therefore, we will adjust the return value
9213 * if mptsas_do_scsi_reset returns FAILED.
9214 */
9215 if (rval == FAILED)
9216 rval = FALSE;
9217 return (rval);
9218 }
9219
9220 static int
9221 mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl)
9222 {
9223 int rval = FALSE;
9224 uint8_t config, disk;
9225
9226 ASSERT(mutex_owned(&mpt->m_mutex));
9227
9228 if (mptsas_debug_resets) {
9229 mptsas_log(mpt, CE_WARN, "mptsas_do_scsi_reset: target=%d",
9230 devhdl);
9231 }
9232
9233 /*
9234 * Issue a Target Reset message to the target specified but not to a
9235 * disk making up a raid volume. Just look through the RAID config
9236 * Phys Disk list of DevHandles. If the target's DevHandle is in this
9237 * list, then don't reset this target.
9238 */
9239 for (config = 0; config < mpt->m_num_raid_configs; config++) {
9240 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
9241 if (devhdl == mpt->m_raidconfig[config].
9242 m_physdisk_devhdl[disk]) {
9243 return (TRUE);
9244 }
9245 }
9246 }
9247
9248 rval = mptsas_ioc_task_management(mpt,
9249 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, devhdl, 0, NULL, 0, 0);
9250
9251 mptsas_doneq_empty(mpt);
9252 return (rval);
9253 }
9254
9255 static int
9256 mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
9257 void (*callback)(caddr_t), caddr_t arg)
9258 {
9259 mptsas_t *mpt = ADDR2MPT(ap);
9260
9261 NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap->a_target));
9262
9263 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
9264 &mpt->m_mutex, &mpt->m_reset_notify_listf));
9265 }
9266
9267 static int
9268 mptsas_get_name(struct scsi_device *sd, char *name, int len)
9269 {
9270 dev_info_t *lun_dip = NULL;
9271
9272 ASSERT(sd != NULL);
9273 ASSERT(name != NULL);
9274 lun_dip = sd->sd_dev;
9275 ASSERT(lun_dip != NULL);
9276
9277 if (mptsas_name_child(lun_dip, name, len) == DDI_SUCCESS) {
9278 return (1);
9279 } else {
9280 return (0);
9281 }
9282 }
9283
9284 static int
9285 mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len)
9286 {
9287 return (mptsas_get_name(sd, name, len));
9288 }
9289
9290 void
9291 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
9292 {
9293
9294 NDBG25(("mptsas_set_throttle: throttle=%x", what));
9295
9296 /*
9297 * if the bus is draining/quiesced, no changes to the throttles
9298 * are allowed. Not allowing change of throttles during draining
9299 * limits error recovery but will reduce draining time
9300 *
9301 * all throttles should have been set to HOLD_THROTTLE
9302 */
9303 if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
9304 return;
9305 }
9306
9307 if (what == HOLD_THROTTLE) {
9308 ptgt->m_t_throttle = HOLD_THROTTLE;
9309 } else if (ptgt->m_reset_delay == 0) {
9310 ptgt->m_t_throttle = what;
9311 }
9312 }
9313
9314 /*
9315 * Clean up from a device reset.
9316 * For the case of target reset, this function clears the waitq of all
9317 * commands for a particular target. For the case of abort task set, this
9318 * function clears the waitq of all commonds for a particular target/lun.
9319 */
9320 static void
9321 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
9322 {
9323 mptsas_slots_t *slots = mpt->m_active;
9324 mptsas_cmd_t *cmd, *next_cmd;
9325 int slot;
9326 uchar_t reason;
9327 uint_t stat;
9328 hrtime_t timestamp;
9329
9330 NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
9331
9332 timestamp = gethrtime();
9333
9334 /*
9335 * Make sure the I/O Controller has flushed all cmds
9336 * that are associated with this target for a target reset
9337 * and target/lun for abort task set.
9338 * Account for TM requests, which use the last SMID.
9339 */
9340 for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
9341 if ((cmd = slots->m_slot[slot]) == NULL)
9342 continue;
9343 reason = CMD_RESET;
9344 stat = STAT_DEV_RESET;
9345 switch (tasktype) {
9346 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
9347 if (Tgt(cmd) == target) {
9348 if (cmd->cmd_active_expiration <= timestamp) {
9349 /*
9350 * When timeout requested, propagate
9351 * proper reason and statistics to
9352 * target drivers.
9353 */
9354 reason = CMD_TIMEOUT;
9355 stat |= STAT_TIMEOUT;
9356 }
9357 NDBG25(("mptsas_flush_target discovered non-"
9358 "NULL cmd in slot %d, tasktype 0x%x", slot,
9359 tasktype));
9360 mptsas_dump_cmd(mpt, cmd);
9361 mptsas_remove_cmd(mpt, cmd);
9362 mptsas_set_pkt_reason(mpt, cmd, reason, stat);
9363 mptsas_doneq_add(mpt, cmd);
9364 }
9365 break;
9366 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9367 reason = CMD_ABORTED;
9368 stat = STAT_ABORTED;
9369 /*FALLTHROUGH*/
9370 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9371 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9372
9373 NDBG25(("mptsas_flush_target discovered non-"
9374 "NULL cmd in slot %d, tasktype 0x%x", slot,
9375 tasktype));
9376 mptsas_dump_cmd(mpt, cmd);
9377 mptsas_remove_cmd(mpt, cmd);
9378 mptsas_set_pkt_reason(mpt, cmd, reason,
9379 stat);
9380 mptsas_doneq_add(mpt, cmd);
9381 }
9382 break;
9383 default:
9384 break;
9385 }
9386 }
9387
9388 /*
9389 * Flush the waitq and tx_waitq of this target's cmds
9390 */
9391 cmd = mpt->m_waitq;
9392
9393 reason = CMD_RESET;
9394 stat = STAT_DEV_RESET;
9395
9396 switch (tasktype) {
9397 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
9398 while (cmd != NULL) {
9399 next_cmd = cmd->cmd_linkp;
9400 if (Tgt(cmd) == target) {
9401 mptsas_waitq_delete(mpt, cmd);
9402 mptsas_set_pkt_reason(mpt, cmd,
9403 reason, stat);
9404 mptsas_doneq_add(mpt, cmd);
9405 }
9406 cmd = next_cmd;
9407 }
9408 mutex_enter(&mpt->m_tx_waitq_mutex);
9409 cmd = mpt->m_tx_waitq;
9410 while (cmd != NULL) {
9411 next_cmd = cmd->cmd_linkp;
9412 if (Tgt(cmd) == target) {
9413 mptsas_tx_waitq_delete(mpt, cmd);
9414 mutex_exit(&mpt->m_tx_waitq_mutex);
9415 mptsas_set_pkt_reason(mpt, cmd,
9416 reason, stat);
9417 mptsas_doneq_add(mpt, cmd);
9418 mutex_enter(&mpt->m_tx_waitq_mutex);
9419 }
9420 cmd = next_cmd;
9421 }
9422 mutex_exit(&mpt->m_tx_waitq_mutex);
9423 break;
9424 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9425 reason = CMD_ABORTED;
9426 stat = STAT_ABORTED;
9427 /*FALLTHROUGH*/
9428 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9429 while (cmd != NULL) {
9430 next_cmd = cmd->cmd_linkp;
9431 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9432 mptsas_waitq_delete(mpt, cmd);
9433 mptsas_set_pkt_reason(mpt, cmd,
9434 reason, stat);
9435 mptsas_doneq_add(mpt, cmd);
9436 }
9437 cmd = next_cmd;
9438 }
9439 mutex_enter(&mpt->m_tx_waitq_mutex);
9440 cmd = mpt->m_tx_waitq;
9441 while (cmd != NULL) {
9442 next_cmd = cmd->cmd_linkp;
9443 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9444 mptsas_tx_waitq_delete(mpt, cmd);
9445 mutex_exit(&mpt->m_tx_waitq_mutex);
9446 mptsas_set_pkt_reason(mpt, cmd,
9447 reason, stat);
9448 mptsas_doneq_add(mpt, cmd);
9449 mutex_enter(&mpt->m_tx_waitq_mutex);
9450 }
9451 cmd = next_cmd;
9452 }
9453 mutex_exit(&mpt->m_tx_waitq_mutex);
9454 break;
9455 default:
9456 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
9457 tasktype);
9458 break;
9459 }
9460 }
9461
9462 /*
9463 * Clean up hba state, abort all outstanding command and commands in waitq
9464 * reset timeout of all targets.
9465 */
9466 static void
9467 mptsas_flush_hba(mptsas_t *mpt)
9468 {
9469 mptsas_slots_t *slots = mpt->m_active;
9470 mptsas_cmd_t *cmd;
9471 int slot;
9472
9473 NDBG25(("mptsas_flush_hba"));
9474
9475 /*
9476 * The I/O Controller should have already sent back
9477 * all commands via the scsi I/O reply frame. Make
9478 * sure all commands have been flushed.
9479 * Account for TM request, which use the last SMID.
9480 */
9481 for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
9482 if ((cmd = slots->m_slot[slot]) == NULL)
9483 continue;
9484
9485 if (cmd->cmd_flags & CFLAG_CMDIOC) {
9486 /*
9487 * Need to make sure to tell everyone that might be
9488 * waiting on this command that it's going to fail. If
9489 * we get here, this command will never timeout because
9490 * the active command table is going to be re-allocated,
9491 * so there will be nothing to check against a time out.
9492 * Instead, mark the command as failed due to reset.
9493 */
9494 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
9495 STAT_BUS_RESET);
9496 if ((cmd->cmd_flags &
9497 (CFLAG_PASSTHRU | CFLAG_CONFIG | CFLAG_FW_DIAG))) {
9498 cmd->cmd_flags |= CFLAG_FINISHED;
9499 cv_broadcast(&mpt->m_passthru_cv);
9500 cv_broadcast(&mpt->m_config_cv);
9501 cv_broadcast(&mpt->m_fw_diag_cv);
9502 }
9503 continue;
9504 }
9505
9506 NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
9507 slot));
9508 mptsas_dump_cmd(mpt, cmd);
9509
9510 mptsas_remove_cmd(mpt, cmd);
9511 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9512 mptsas_doneq_add(mpt, cmd);
9513 }
9514
9515 /*
9516 * Flush the waitq.
9517 */
9518 while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
9519 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9520 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9521 (cmd->cmd_flags & CFLAG_CONFIG) ||
9522 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9523 cmd->cmd_flags |= CFLAG_FINISHED;
9524 cv_broadcast(&mpt->m_passthru_cv);
9525 cv_broadcast(&mpt->m_config_cv);
9526 cv_broadcast(&mpt->m_fw_diag_cv);
9527 } else {
9528 mptsas_doneq_add(mpt, cmd);
9529 }
9530 }
9531
9532 /*
9533 * Flush the tx_waitq
9534 */
9535 mutex_enter(&mpt->m_tx_waitq_mutex);
9536 while ((cmd = mptsas_tx_waitq_rm(mpt)) != NULL) {
9537 mutex_exit(&mpt->m_tx_waitq_mutex);
9538 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9539 mptsas_doneq_add(mpt, cmd);
9540 mutex_enter(&mpt->m_tx_waitq_mutex);
9541 }
9542 mutex_exit(&mpt->m_tx_waitq_mutex);
9543
9544 /*
9545 * Drain the taskqs prior to reallocating resources. The thread
9546 * passing through here could be launched from either (dr)
9547 * or (event) taskqs so only wait on the 'other' queue since
9548 * waiting on 'this' queue is a deadlock condition.
9549 */
9550 mutex_exit(&mpt->m_mutex);
9551 if (!taskq_member((taskq_t *)mpt->m_event_taskq, curthread))
9552 ddi_taskq_wait(mpt->m_event_taskq);
9553 if (!taskq_member((taskq_t *)mpt->m_dr_taskq, curthread))
9554 ddi_taskq_wait(mpt->m_dr_taskq);
9555
9556 mutex_enter(&mpt->m_mutex);
9557 }
9558
9559 /*
9560 * set pkt_reason and OR in pkt_statistics flag
9561 */
9562 static void
9563 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
9564 uint_t stat)
9565 {
9566 #ifndef __lock_lint
9567 _NOTE(ARGUNUSED(mpt))
9568 #endif
9569
9570 NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
9571 (void *)cmd, reason, stat));
9572
9573 if (cmd) {
9574 if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
9575 cmd->cmd_pkt->pkt_reason = reason;
9576 }
9577 cmd->cmd_pkt->pkt_statistics |= stat;
9578 }
9579 }
9580
9581 static void
9582 mptsas_start_watch_reset_delay()
9583 {
9584 NDBG22(("mptsas_start_watch_reset_delay"));
9585
9586 mutex_enter(&mptsas_global_mutex);
9587 if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
9588 mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
9589 drv_usectohz((clock_t)
9590 MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
9591 ASSERT(mptsas_reset_watch != NULL);
9592 }
9593 mutex_exit(&mptsas_global_mutex);
9594 }
9595
9596 static void
9597 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
9598 {
9599 mptsas_target_t *ptgt = NULL;
9600
9601 ASSERT(MUTEX_HELD(&mpt->m_mutex));
9602
9603 NDBG22(("mptsas_setup_bus_reset_delay"));
9604 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9605 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9606 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9607 ptgt->m_reset_delay = mpt->m_scsi_reset_delay;
9608 }
9609
9610 mptsas_start_watch_reset_delay();
9611 }
9612
9613 /*
9614 * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
9615 * mpt instance for active reset delays
9616 */
9617 static void
9618 mptsas_watch_reset_delay(void *arg)
9619 {
9620 #ifndef __lock_lint
9621 _NOTE(ARGUNUSED(arg))
9622 #endif
9623
9624 mptsas_t *mpt;
9625 int not_done = 0;
9626
9627 NDBG22(("mptsas_watch_reset_delay"));
9628
9629 mutex_enter(&mptsas_global_mutex);
9630 mptsas_reset_watch = 0;
9631 mutex_exit(&mptsas_global_mutex);
9632 rw_enter(&mptsas_global_rwlock, RW_READER);
9633 for (mpt = mptsas_head; mpt != NULL; mpt = mpt->m_next) {
9634 if (mpt->m_tran == 0) {
9635 continue;
9636 }
9637 mutex_enter(&mpt->m_mutex);
9638 not_done += mptsas_watch_reset_delay_subr(mpt);
9639 mutex_exit(&mpt->m_mutex);
9640 }
9641 rw_exit(&mptsas_global_rwlock);
9642
9643 if (not_done) {
9644 mptsas_start_watch_reset_delay();
9645 }
9646 }
9647
9648 static int
9649 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
9650 {
9651 int done = 0;
9652 int restart = 0;
9653 mptsas_target_t *ptgt = NULL;
9654
9655 NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
9656
9657 ASSERT(mutex_owned(&mpt->m_mutex));
9658
9659 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9660 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9661 if (ptgt->m_reset_delay != 0) {
9662 ptgt->m_reset_delay -=
9663 MPTSAS_WATCH_RESET_DELAY_TICK;
9664 if (ptgt->m_reset_delay <= 0) {
9665 ptgt->m_reset_delay = 0;
9666 mptsas_set_throttle(mpt, ptgt,
9667 MAX_THROTTLE);
9668 restart++;
9669 } else {
9670 done = -1;
9671 }
9672 }
9673 }
9674
9675 if (restart > 0) {
9676 mptsas_restart_hba(mpt);
9677 }
9678 return (done);
9679 }
9680
9681 #ifdef MPTSAS_TEST
9682 static void
9683 mptsas_test_reset(mptsas_t *mpt, int target)
9684 {
9685 mptsas_target_t *ptgt = NULL;
9686
9687 if (mptsas_rtest == target) {
9688 if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
9689 mptsas_rtest = -1;
9690 }
9691 if (mptsas_rtest == -1) {
9692 NDBG22(("mptsas_test_reset success"));
9693 }
9694 }
9695 }
9696 #endif
9697
9698 /*
9699 * abort handling:
9700 *
9701 * Notes:
9702 * - if pkt is not NULL, abort just that command
9703 * - if pkt is NULL, abort all outstanding commands for target
9704 */
9705 static int
9706 mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
9707 {
9708 mptsas_t *mpt = ADDR2MPT(ap);
9709 int rval;
9710 mptsas_tgt_private_t *tgt_private;
9711 int target, lun;
9712
9713 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
9714 tran_tgt_private;
9715 ASSERT(tgt_private != NULL);
9716 target = tgt_private->t_private->m_devhdl;
9717 lun = tgt_private->t_lun;
9718
9719 NDBG23(("mptsas_scsi_abort: target=%d.%d", target, lun));
9720
9721 mutex_enter(&mpt->m_mutex);
9722 rval = mptsas_do_scsi_abort(mpt, target, lun, pkt);
9723 mutex_exit(&mpt->m_mutex);
9724 return (rval);
9725 }
9726
9727 static int
9728 mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, struct scsi_pkt *pkt)
9729 {
9730 mptsas_cmd_t *sp = NULL;
9731 mptsas_slots_t *slots = mpt->m_active;
9732 int rval = FALSE;
9733
9734 ASSERT(mutex_owned(&mpt->m_mutex));
9735
9736 /*
9737 * Abort the command pkt on the target/lun in ap. If pkt is
9738 * NULL, abort all outstanding commands on that target/lun.
9739 * If you can abort them, return 1, else return 0.
9740 * Each packet that's aborted should be sent back to the target
9741 * driver through the callback routine, with pkt_reason set to
9742 * CMD_ABORTED.
9743 *
9744 * abort cmd pkt on HBA hardware; clean out of outstanding
9745 * command lists, etc.
9746 */
9747 if (pkt != NULL) {
9748 /* abort the specified packet */
9749 sp = PKT2CMD(pkt);
9750
9751 if (sp->cmd_queued) {
9752 NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
9753 (void *)sp));
9754 mptsas_waitq_delete(mpt, sp);
9755 mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
9756 STAT_ABORTED);
9757 mptsas_doneq_add(mpt, sp);
9758 rval = TRUE;
9759 goto done;
9760 }
9761
9762 /*
9763 * Have mpt firmware abort this command
9764 */
9765
9766 if (slots->m_slot[sp->cmd_slot] != NULL) {
9767 rval = mptsas_ioc_task_management(mpt,
9768 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
9769 lun, NULL, 0, 0);
9770
9771 /*
9772 * The transport layer expects only TRUE and FALSE.
9773 * Therefore, if mptsas_ioc_task_management returns
9774 * FAILED we will return FALSE.
9775 */
9776 if (rval == FAILED)
9777 rval = FALSE;
9778 goto done;
9779 }
9780 }
9781
9782 /*
9783 * If pkt is NULL then abort task set
9784 */
9785 rval = mptsas_ioc_task_management(mpt,
9786 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0);
9787
9788 /*
9789 * The transport layer expects only TRUE and FALSE.
9790 * Therefore, if mptsas_ioc_task_management returns
9791 * FAILED we will return FALSE.
9792 */
9793 if (rval == FAILED)
9794 rval = FALSE;
9795
9796 #ifdef MPTSAS_TEST
9797 if (rval && mptsas_test_stop) {
9798 debug_enter("mptsas_do_scsi_abort");
9799 }
9800 #endif
9801
9802 done:
9803 mptsas_doneq_empty(mpt);
9804 return (rval);
9805 }
9806
9807 /*
9808 * capability handling:
9809 * (*tran_getcap). Get the capability named, and return its value.
9810 */
9811 static int
9812 mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
9813 {
9814 mptsas_t *mpt = ADDR2MPT(ap);
9815 int ckey;
9816 int rval = FALSE;
9817
9818 NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
9819 ap->a_target, cap, tgtonly));
9820
9821 mutex_enter(&mpt->m_mutex);
9822
9823 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9824 mutex_exit(&mpt->m_mutex);
9825 return (UNDEFINED);
9826 }
9827
9828 switch (ckey) {
9829 case SCSI_CAP_DMA_MAX:
9830 rval = (int)mpt->m_msg_dma_attr.dma_attr_maxxfer;
9831 break;
9832 case SCSI_CAP_ARQ:
9833 rval = TRUE;
9834 break;
9835 case SCSI_CAP_MSG_OUT:
9836 case SCSI_CAP_PARITY:
9837 case SCSI_CAP_UNTAGGED_QING:
9838 rval = TRUE;
9839 break;
9840 case SCSI_CAP_TAGGED_QING:
9841 rval = TRUE;
9842 break;
9843 case SCSI_CAP_RESET_NOTIFICATION:
9844 rval = TRUE;
9845 break;
9846 case SCSI_CAP_LINKED_CMDS:
9847 rval = FALSE;
9848 break;
9849 case SCSI_CAP_QFULL_RETRIES:
9850 rval = ((mptsas_tgt_private_t *)(ap->a_hba_tran->
9851 tran_tgt_private))->t_private->m_qfull_retries;
9852 break;
9853 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9854 rval = drv_hztousec(((mptsas_tgt_private_t *)
9855 (ap->a_hba_tran->tran_tgt_private))->
9856 t_private->m_qfull_retry_interval) / 1000;
9857 break;
9858 case SCSI_CAP_CDB_LEN:
9859 rval = CDB_GROUP4;
9860 break;
9861 case SCSI_CAP_INTERCONNECT_TYPE:
9862 rval = INTERCONNECT_SAS;
9863 break;
9864 case SCSI_CAP_TRAN_LAYER_RETRIES:
9865 if (mpt->m_ioc_capabilities &
9866 MPI2_IOCFACTS_CAPABILITY_TLR)
9867 rval = TRUE;
9868 else
9869 rval = FALSE;
9870 break;
9871 default:
9872 rval = UNDEFINED;
9873 break;
9874 }
9875
9876 NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
9877
9878 mutex_exit(&mpt->m_mutex);
9879 return (rval);
9880 }
9881
9882 /*
9883 * (*tran_setcap). Set the capability named to the value given.
9884 */
9885 static int
9886 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
9887 {
9888 mptsas_t *mpt = ADDR2MPT(ap);
9889 int ckey;
9890 int rval = FALSE;
9891
9892 NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
9893 ap->a_target, cap, value, tgtonly));
9894
9895 if (!tgtonly) {
9896 return (rval);
9897 }
9898
9899 mutex_enter(&mpt->m_mutex);
9900
9901 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9902 mutex_exit(&mpt->m_mutex);
9903 return (UNDEFINED);
9904 }
9905
9906 switch (ckey) {
9907 case SCSI_CAP_DMA_MAX:
9908 case SCSI_CAP_MSG_OUT:
9909 case SCSI_CAP_PARITY:
9910 case SCSI_CAP_INITIATOR_ID:
9911 case SCSI_CAP_LINKED_CMDS:
9912 case SCSI_CAP_UNTAGGED_QING:
9913 case SCSI_CAP_RESET_NOTIFICATION:
9914 /*
9915 * None of these are settable via
9916 * the capability interface.
9917 */
9918 break;
9919 case SCSI_CAP_ARQ:
9920 /*
9921 * We cannot turn off arq so return false if asked to
9922 */
9923 if (value) {
9924 rval = TRUE;
9925 } else {
9926 rval = FALSE;
9927 }
9928 break;
9929 case SCSI_CAP_TAGGED_QING:
9930 mptsas_set_throttle(mpt, ((mptsas_tgt_private_t *)
9931 (ap->a_hba_tran->tran_tgt_private))->t_private,
9932 MAX_THROTTLE);
9933 rval = TRUE;
9934 break;
9935 case SCSI_CAP_QFULL_RETRIES:
9936 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9937 t_private->m_qfull_retries = (uchar_t)value;
9938 rval = TRUE;
9939 break;
9940 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9941 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9942 t_private->m_qfull_retry_interval =
9943 drv_usectohz(value * 1000);
9944 rval = TRUE;
9945 break;
9946 default:
9947 rval = UNDEFINED;
9948 break;
9949 }
9950 mutex_exit(&mpt->m_mutex);
9951 return (rval);
9952 }
9953
9954 /*
9955 * Utility routine for mptsas_ifsetcap/ifgetcap
9956 */
9957 /*ARGSUSED*/
9958 static int
9959 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp)
9960 {
9961 NDBG24(("mptsas_scsi_capchk: cap=%s", cap));
9962
9963 if (!cap)
9964 return (FALSE);
9965
9966 *cidxp = scsi_hba_lookup_capstr(cap);
9967 return (TRUE);
9968 }
9969
9970 static int
9971 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
9972 {
9973 mptsas_slots_t *old_active = mpt->m_active;
9974 mptsas_slots_t *new_active;
9975 size_t size;
9976
9977 /*
9978 * if there are active commands, then we cannot
9979 * change size of active slots array.
9980 */
9981 ASSERT(mpt->m_ncmds == 0);
9982
9983 size = MPTSAS_SLOTS_SIZE(mpt);
9984 new_active = kmem_zalloc(size, flag);
9985 if (new_active == NULL) {
9986 NDBG1(("new active alloc failed"));
9987 return (-1);
9988 }
9989 /*
9990 * Since SMID 0 is reserved and the TM slot is reserved, the
9991 * number of slots that can be used at any one time is
9992 * m_max_requests - 2.
9993 */
9994 new_active->m_n_normal = (mpt->m_max_requests - 2);
9995 new_active->m_size = size;
9996 new_active->m_rotor = 1;
9997 if (old_active)
9998 mptsas_free_active_slots(mpt);
9999 mpt->m_active = new_active;
10000
10001 return (0);
10002 }
10003
10004 static void
10005 mptsas_free_active_slots(mptsas_t *mpt)
10006 {
10007 mptsas_slots_t *active = mpt->m_active;
10008 size_t size;
10009
10010 if (active == NULL)
10011 return;
10012 size = active->m_size;
10013 kmem_free(active, size);
10014 mpt->m_active = NULL;
10015 }
10016
10017 /*
10018 * Error logging, printing, and debug print routines.
10019 */
10020 static char *mptsas_label = "mpt_sas";
10021
10022 /*PRINTFLIKE3*/
10023 void
10024 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
10025 {
10026 dev_info_t *dev;
10027 va_list ap;
10028
10029 if (mpt) {
10030 dev = mpt->m_dip;
10031 } else {
10032 dev = 0;
10033 }
10034
10035 mutex_enter(&mptsas_log_mutex);
10036
10037 va_start(ap, fmt);
10038 (void) vsprintf(mptsas_log_buf, fmt, ap);
10039 va_end(ap);
10040
10041 if (level == CE_CONT) {
10042 scsi_log(dev, mptsas_label, level, "%s\n", mptsas_log_buf);
10043 } else {
10044 scsi_log(dev, mptsas_label, level, "%s", mptsas_log_buf);
10045 }
10046
10047 mutex_exit(&mptsas_log_mutex);
10048 }
10049
10050 #ifdef MPTSAS_DEBUG
10051 /*
10052 * Use a circular buffer to log messages to private memory.
10053 * Increment idx atomically to minimize risk to miss lines.
10054 * It's fast and does not hold up the proceedings too much.
10055 */
10056 static const size_t mptsas_dbglog_linecnt = MPTSAS_DBGLOG_LINECNT;
10057 static const size_t mptsas_dbglog_linelen = MPTSAS_DBGLOG_LINELEN;
10058 static char mptsas_dbglog_bufs[MPTSAS_DBGLOG_LINECNT][MPTSAS_DBGLOG_LINELEN];
10059 static uint32_t mptsas_dbglog_idx = 0;
10060
10061 /*PRINTFLIKE1*/
10062 void
10063 mptsas_debug_log(char *fmt, ...)
10064 {
10065 va_list ap;
10066 uint32_t idx;
10067
10068 idx = atomic_inc_32_nv(&mptsas_dbglog_idx) &
10069 (mptsas_dbglog_linecnt - 1);
10070
10071 va_start(ap, fmt);
10072 (void) vsnprintf(mptsas_dbglog_bufs[idx],
10073 mptsas_dbglog_linelen, fmt, ap);
10074 va_end(ap);
10075 }
10076
10077 /*PRINTFLIKE1*/
10078 void
10079 mptsas_printf(char *fmt, ...)
10080 {
10081 dev_info_t *dev = 0;
10082 va_list ap;
10083
10084 mutex_enter(&mptsas_log_mutex);
10085
10086 va_start(ap, fmt);
10087 (void) vsprintf(mptsas_log_buf, fmt, ap);
10088 va_end(ap);
10089
10090 #ifdef PROM_PRINTF
10091 prom_printf("%s:\t%s\n", mptsas_label, mptsas_log_buf);
10092 #else
10093 scsi_log(dev, mptsas_label, CE_CONT, "!%s\n", mptsas_log_buf);
10094 #endif
10095 mutex_exit(&mptsas_log_mutex);
10096 }
10097 #endif
10098
10099 /*
10100 * timeout handling
10101 */
10102 static void
10103 mptsas_watch(void *arg)
10104 {
10105 #ifndef __lock_lint
10106 _NOTE(ARGUNUSED(arg))
10107 #endif
10108
10109 mptsas_t *mpt;
10110 uint32_t doorbell;
10111
10112 NDBG30(("mptsas_watch"));
10113
10114 rw_enter(&mptsas_global_rwlock, RW_READER);
10115 for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
10116
10117 mutex_enter(&mpt->m_mutex);
10118
10119 /* Skip device if not powered on */
10120 if (mpt->m_options & MPTSAS_OPT_PM) {
10121 if (mpt->m_power_level == PM_LEVEL_D0) {
10122 (void) pm_busy_component(mpt->m_dip, 0);
10123 mpt->m_busy = 1;
10124 } else {
10125 mutex_exit(&mpt->m_mutex);
10126 continue;
10127 }
10128 }
10129
10130 /*
10131 * Check if controller is in a FAULT state. If so, reset it.
10132 */
10133 doorbell = ddi_get32(mpt->m_datap, &mpt->m_reg->Doorbell);
10134 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
10135 doorbell &= MPI2_DOORBELL_DATA_MASK;
10136 mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
10137 "code: %04x", doorbell);
10138 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
10139 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
10140 mptsas_log(mpt, CE_WARN, "Reset failed"
10141 "after fault was detected");
10142 }
10143 }
10144
10145 /*
10146 * For now, always call mptsas_watchsubr.
10147 */
10148 mptsas_watchsubr(mpt);
10149
10150 if (mpt->m_options & MPTSAS_OPT_PM) {
10151 mpt->m_busy = 0;
10152 (void) pm_idle_component(mpt->m_dip, 0);
10153 }
10154
10155 mutex_exit(&mpt->m_mutex);
10156 }
10157 rw_exit(&mptsas_global_rwlock);
10158
10159 mutex_enter(&mptsas_global_mutex);
10160 if (mptsas_timeouts_enabled)
10161 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
10162 mutex_exit(&mptsas_global_mutex);
10163 }
10164
10165 static void
10166 mptsas_watchsubr_tgt(mptsas_t *mpt, mptsas_target_t *ptgt, hrtime_t timestamp)
10167 {
10168 mptsas_cmd_t *cmd;
10169
10170 /*
10171 * If we were draining due to a qfull condition,
10172 * go back to full throttle.
10173 */
10174 if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
10175 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
10176 (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
10177 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10178 mptsas_restart_hba(mpt);
10179 }
10180
10181 cmd = TAILQ_LAST(&ptgt->m_active_cmdq, mptsas_active_cmdq);
10182 if (cmd == NULL)
10183 return;
10184
10185 if (cmd->cmd_active_expiration <= timestamp) {
10186 /*
10187 * Earliest command timeout expired. Drain throttle.
10188 */
10189 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
10190
10191 /*
10192 * Check for remaining commands.
10193 */
10194 cmd = TAILQ_FIRST(&ptgt->m_active_cmdq);
10195 if (cmd->cmd_active_expiration > timestamp) {
10196 /*
10197 * Wait for remaining commands to complete or
10198 * time out.
10199 */
10200 NDBG23(("command timed out, pending drain"));
10201 return;
10202 }
10203
10204 /*
10205 * All command timeouts expired.
10206 */
10207 mptsas_log(mpt, CE_NOTE, "Timeout of %d seconds "
10208 "expired with %d commands on target %d lun %d.",
10209 cmd->cmd_pkt->pkt_time, ptgt->m_t_ncmds,
10210 ptgt->m_devhdl, Lun(cmd));
10211
10212 mptsas_cmd_timeout(mpt, ptgt);
10213 } else if (cmd->cmd_active_expiration <=
10214 timestamp + (hrtime_t)mptsas_scsi_watchdog_tick * NANOSEC) {
10215 NDBG23(("pending timeout"));
10216 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
10217 }
10218 }
10219
10220 static void
10221 mptsas_watchsubr(mptsas_t *mpt)
10222 {
10223 int i;
10224 mptsas_cmd_t *cmd;
10225 mptsas_target_t *ptgt = NULL;
10226 hrtime_t timestamp = gethrtime();
10227
10228 ASSERT(MUTEX_HELD(&mpt->m_mutex));
10229
10230 NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
10231
10232 #ifdef MPTSAS_TEST
10233 if (mptsas_enable_untagged) {
10234 mptsas_test_untagged++;
10235 }
10236 #endif
10237
10238 /*
10239 * Check for commands stuck in active slot
10240 * Account for TM requests, which use the last SMID.
10241 */
10242 for (i = 0; i <= mpt->m_active->m_n_normal; i++) {
10243 if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
10244 if (cmd->cmd_active_expiration <= timestamp) {
10245 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
10246 /*
10247 * There seems to be a command stuck
10248 * in the active slot. Drain throttle.
10249 */
10250 mptsas_set_throttle(mpt,
10251 cmd->cmd_tgt_addr,
10252 DRAIN_THROTTLE);
10253 } else if (cmd->cmd_flags &
10254 (CFLAG_PASSTHRU | CFLAG_CONFIG |
10255 CFLAG_FW_DIAG)) {
10256 /*
10257 * passthrough command timeout
10258 */
10259 cmd->cmd_flags |= (CFLAG_FINISHED |
10260 CFLAG_TIMEOUT);
10261 cv_broadcast(&mpt->m_passthru_cv);
10262 cv_broadcast(&mpt->m_config_cv);
10263 cv_broadcast(&mpt->m_fw_diag_cv);
10264 }
10265 }
10266 }
10267 }
10268
10269 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10270 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10271 mptsas_watchsubr_tgt(mpt, ptgt, timestamp);
10272 }
10273
10274 for (ptgt = refhash_first(mpt->m_tmp_targets); ptgt != NULL;
10275 ptgt = refhash_next(mpt->m_tmp_targets, ptgt)) {
10276 mptsas_watchsubr_tgt(mpt, ptgt, timestamp);
10277 }
10278 }
10279
10280 /*
10281 * timeout recovery
10282 */
10283 static void
10284 mptsas_cmd_timeout(mptsas_t *mpt, mptsas_target_t *ptgt)
10285 {
10286 uint16_t devhdl;
10287 uint64_t sas_wwn;
10288 uint8_t phy;
10289 char wwn_str[MPTSAS_WWN_STRLEN];
10290
10291 devhdl = ptgt->m_devhdl;
10292 sas_wwn = ptgt->m_addr.mta_wwn;
10293 phy = ptgt->m_phynum;
10294 if (sas_wwn == 0) {
10295 (void) sprintf(wwn_str, "p%x", phy);
10296 } else {
10297 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
10298 }
10299
10300 NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
10301 mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
10302 "target %d %s, enclosure %u", devhdl, wwn_str,
10303 ptgt->m_enclosure);
10304
10305 /*
10306 * Abort all outstanding commands on the device.
10307 */
10308 NDBG29(("mptsas_cmd_timeout: device reset"));
10309 if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
10310 mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
10311 "recovery failed!", devhdl);
10312 }
10313 }
10314
10315 /*
10316 * Device / Hotplug control
10317 */
10318 static int
10319 mptsas_scsi_quiesce(dev_info_t *dip)
10320 {
10321 mptsas_t *mpt;
10322 scsi_hba_tran_t *tran;
10323
10324 tran = ddi_get_driver_private(dip);
10325 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
10326 return (-1);
10327
10328 return (mptsas_quiesce_bus(mpt));
10329 }
10330
10331 static int
10332 mptsas_scsi_unquiesce(dev_info_t *dip)
10333 {
10334 mptsas_t *mpt;
10335 scsi_hba_tran_t *tran;
10336
10337 tran = ddi_get_driver_private(dip);
10338 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
10339 return (-1);
10340
10341 return (mptsas_unquiesce_bus(mpt));
10342 }
10343
10344 static int
10345 mptsas_quiesce_bus(mptsas_t *mpt)
10346 {
10347 mptsas_target_t *ptgt = NULL;
10348
10349 NDBG28(("mptsas_quiesce_bus"));
10350 mutex_enter(&mpt->m_mutex);
10351
10352 /* Set all the throttles to zero */
10353 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10354 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10355 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10356 }
10357
10358 /* If there are any outstanding commands in the queue */
10359 if (mpt->m_ncmds) {
10360 mpt->m_softstate |= MPTSAS_SS_DRAINING;
10361 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
10362 mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
10363 if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
10364 /*
10365 * Quiesce has been interrupted
10366 */
10367 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
10368 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10369 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10370 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10371 }
10372 mptsas_restart_hba(mpt);
10373 if (mpt->m_quiesce_timeid != 0) {
10374 timeout_id_t tid = mpt->m_quiesce_timeid;
10375 mpt->m_quiesce_timeid = 0;
10376 mutex_exit(&mpt->m_mutex);
10377 (void) untimeout(tid);
10378 return (-1);
10379 }
10380 mutex_exit(&mpt->m_mutex);
10381 return (-1);
10382 } else {
10383 /* Bus has been quiesced */
10384 ASSERT(mpt->m_quiesce_timeid == 0);
10385 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
10386 mpt->m_softstate |= MPTSAS_SS_QUIESCED;
10387 mutex_exit(&mpt->m_mutex);
10388 return (0);
10389 }
10390 }
10391 /* Bus was not busy - QUIESCED */
10392 mutex_exit(&mpt->m_mutex);
10393
10394 return (0);
10395 }
10396
10397 static int
10398 mptsas_unquiesce_bus(mptsas_t *mpt)
10399 {
10400 mptsas_target_t *ptgt = NULL;
10401
10402 NDBG28(("mptsas_unquiesce_bus"));
10403 mutex_enter(&mpt->m_mutex);
10404 mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
10405 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10406 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10407 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10408 }
10409 mptsas_restart_hba(mpt);
10410 mutex_exit(&mpt->m_mutex);
10411 return (0);
10412 }
10413
10414 static void
10415 mptsas_ncmds_checkdrain(void *arg)
10416 {
10417 mptsas_t *mpt = arg;
10418 mptsas_target_t *ptgt = NULL;
10419
10420 mutex_enter(&mpt->m_mutex);
10421 if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
10422 mpt->m_quiesce_timeid = 0;
10423 if (mpt->m_ncmds == 0) {
10424 /* Command queue has been drained */
10425 cv_signal(&mpt->m_cv);
10426 } else {
10427 /*
10428 * The throttle may have been reset because
10429 * of a SCSI bus reset
10430 */
10431 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10432 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10433 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10434 }
10435
10436 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
10437 mpt, (MPTSAS_QUIESCE_TIMEOUT *
10438 drv_usectohz(1000000)));
10439 }
10440 }
10441 mutex_exit(&mpt->m_mutex);
10442 }
10443
10444 /*ARGSUSED*/
10445 static void
10446 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
10447 {
10448 int i;
10449 uint8_t *cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
10450 char buf[128];
10451
10452 buf[0] = '\0';
10453 NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd,
10454 Tgt(cmd), Lun(cmd)));
10455 (void) sprintf(&buf[0], "\tcdb=[");
10456 for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
10457 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
10458 }
10459 (void) sprintf(&buf[strlen(buf)], " ]");
10460 NDBG25(("?%s\n", buf));
10461 NDBG25(("?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
10462 cmd->cmd_pkt->pkt_flags, cmd->cmd_pkt->pkt_statistics,
10463 cmd->cmd_pkt->pkt_state));
10464 NDBG25(("?pkt_scbp=0x%x cmd_flags=0x%x\n", cmd->cmd_pkt->pkt_scbp ?
10465 *(cmd->cmd_pkt->pkt_scbp) : 0, cmd->cmd_flags));
10466 }
10467
10468 static void
10469 mptsas_passthru_sge(ddi_acc_handle_t acc_hdl, mptsas_pt_request_t *pt,
10470 pMpi2SGESimple64_t sgep)
10471 {
10472 uint32_t sge_flags;
10473 uint32_t data_size, dataout_size;
10474 ddi_dma_cookie_t data_cookie;
10475 ddi_dma_cookie_t dataout_cookie;
10476
10477 data_size = pt->data_size;
10478 dataout_size = pt->dataout_size;
10479 data_cookie = pt->data_cookie;
10480 dataout_cookie = pt->dataout_cookie;
10481
10482 if (dataout_size) {
10483 sge_flags = dataout_size |
10484 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10485 MPI2_SGE_FLAGS_END_OF_BUFFER |
10486 MPI2_SGE_FLAGS_HOST_TO_IOC |
10487 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10488 MPI2_SGE_FLAGS_SHIFT);
10489 ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
10490 ddi_put32(acc_hdl, &sgep->Address.Low,
10491 (uint32_t)(dataout_cookie.dmac_laddress &
10492 0xffffffffull));
10493 ddi_put32(acc_hdl, &sgep->Address.High,
10494 (uint32_t)(dataout_cookie.dmac_laddress
10495 >> 32));
10496 sgep++;
10497 }
10498 sge_flags = data_size;
10499 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10500 MPI2_SGE_FLAGS_LAST_ELEMENT |
10501 MPI2_SGE_FLAGS_END_OF_BUFFER |
10502 MPI2_SGE_FLAGS_END_OF_LIST |
10503 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10504 MPI2_SGE_FLAGS_SHIFT);
10505 if (pt->direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10506 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
10507 MPI2_SGE_FLAGS_SHIFT);
10508 } else {
10509 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
10510 MPI2_SGE_FLAGS_SHIFT);
10511 }
10512 ddi_put32(acc_hdl, &sgep->FlagsLength,
10513 sge_flags);
10514 ddi_put32(acc_hdl, &sgep->Address.Low,
10515 (uint32_t)(data_cookie.dmac_laddress &
10516 0xffffffffull));
10517 ddi_put32(acc_hdl, &sgep->Address.High,
10518 (uint32_t)(data_cookie.dmac_laddress >> 32));
10519 }
10520
10521 static void
10522 mptsas_passthru_ieee_sge(ddi_acc_handle_t acc_hdl, mptsas_pt_request_t *pt,
10523 pMpi2IeeeSgeSimple64_t ieeesgep)
10524 {
10525 uint8_t sge_flags;
10526 uint32_t data_size, dataout_size;
10527 ddi_dma_cookie_t data_cookie;
10528 ddi_dma_cookie_t dataout_cookie;
10529
10530 data_size = pt->data_size;
10531 dataout_size = pt->dataout_size;
10532 data_cookie = pt->data_cookie;
10533 dataout_cookie = pt->dataout_cookie;
10534
10535 sge_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
10536 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
10537 if (dataout_size) {
10538 ddi_put32(acc_hdl, &ieeesgep->Length, dataout_size);
10539 ddi_put32(acc_hdl, &ieeesgep->Address.Low,
10540 (uint32_t)(dataout_cookie.dmac_laddress &
10541 0xffffffffull));
10542 ddi_put32(acc_hdl, &ieeesgep->Address.High,
10543 (uint32_t)(dataout_cookie.dmac_laddress >> 32));
10544 ddi_put8(acc_hdl, &ieeesgep->Flags, sge_flags);
10545 ieeesgep++;
10546 }
10547 sge_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
10548 ddi_put32(acc_hdl, &ieeesgep->Length, data_size);
10549 ddi_put32(acc_hdl, &ieeesgep->Address.Low,
10550 (uint32_t)(data_cookie.dmac_laddress & 0xffffffffull));
10551 ddi_put32(acc_hdl, &ieeesgep->Address.High,
10552 (uint32_t)(data_cookie.dmac_laddress >> 32));
10553 ddi_put8(acc_hdl, &ieeesgep->Flags, sge_flags);
10554 }
10555
10556 static void
10557 mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd)
10558 {
10559 caddr_t memp;
10560 pMPI2RequestHeader_t request_hdrp;
10561 struct scsi_pkt *pkt = cmd->cmd_pkt;
10562 mptsas_pt_request_t *pt = pkt->pkt_ha_private;
10563 uint32_t request_size;
10564 uint32_t i;
10565 uint64_t request_desc = 0;
10566 uint8_t desc_type;
10567 uint16_t SMID;
10568 uint8_t *request, function;
10569 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
10570 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
10571
10572 desc_type = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10573
10574 request = pt->request;
10575 request_size = pt->request_size;
10576
10577 SMID = cmd->cmd_slot;
10578
10579 /*
10580 * Store the passthrough message in memory location
10581 * corresponding to our slot number
10582 */
10583 memp = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
10584 request_hdrp = (pMPI2RequestHeader_t)memp;
10585 bzero(memp, mpt->m_req_frame_size);
10586
10587 for (i = 0; i < request_size; i++) {
10588 bcopy(request + i, memp + i, 1);
10589 }
10590
10591 NDBG15(("mptsas_start_passthru: Func 0x%x, MsgFlags 0x%x, "
10592 "size=%d, in %d, out %d, SMID %d", request_hdrp->Function,
10593 request_hdrp->MsgFlags, request_size,
10594 pt->data_size, pt->dataout_size, SMID));
10595
10596 /*
10597 * Add an SGE, even if the length is zero.
10598 */
10599 if (mpt->m_MPI25 && pt->simple == 0) {
10600 mptsas_passthru_ieee_sge(acc_hdl, pt,
10601 (pMpi2IeeeSgeSimple64_t)
10602 ((uint8_t *)request_hdrp + pt->sgl_offset));
10603 } else {
10604 mptsas_passthru_sge(acc_hdl, pt,
10605 (pMpi2SGESimple64_t)
10606 ((uint8_t *)request_hdrp + pt->sgl_offset));
10607 }
10608
10609 function = request_hdrp->Function;
10610 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10611 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10612 pMpi2SCSIIORequest_t scsi_io_req;
10613 caddr_t arsbuf;
10614 uint8_t ars_size;
10615 uint32_t ars_dmaaddrlow;
10616
10617 NDBG15(("mptsas_start_passthru: Is SCSI IO Req"));
10618 scsi_io_req = (pMpi2SCSIIORequest_t)request_hdrp;
10619
10620 if (cmd->cmd_extrqslen != 0) {
10621 /*
10622 * Mapping of the buffer was done in
10623 * mptsas_do_passthru().
10624 * Calculate the DMA address with the same offset.
10625 */
10626 arsbuf = cmd->cmd_arq_buf;
10627 ars_size = cmd->cmd_extrqslen;
10628 ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
10629 ((uintptr_t)arsbuf - (uintptr_t)mpt->m_req_sense)) &
10630 0xffffffffu;
10631 } else {
10632 arsbuf = mpt->m_req_sense +
10633 (mpt->m_req_sense_size * (SMID-1));
10634 cmd->cmd_arq_buf = arsbuf;
10635 ars_size = mpt->m_req_sense_size;
10636 ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
10637 (mpt->m_req_sense_size * (SMID-1))) &
10638 0xffffffffu;
10639 }
10640 bzero(arsbuf, ars_size);
10641
10642 ddi_put8(acc_hdl, &scsi_io_req->SenseBufferLength, ars_size);
10643 ddi_put32(acc_hdl, &scsi_io_req->SenseBufferLowAddress,
10644 ars_dmaaddrlow);
10645
10646 /*
10647 * Put SGE for data and data_out buffer at the end of
10648 * scsi_io_request message header.(64 bytes in total)
10649 * Set SGLOffset0 value
10650 */
10651 ddi_put8(acc_hdl, &scsi_io_req->SGLOffset0,
10652 offsetof(MPI2_SCSI_IO_REQUEST, SGL) / 4);
10653
10654 /*
10655 * Setup descriptor info. RAID passthrough must use the
10656 * default request descriptor which is already set, so if this
10657 * is a SCSI IO request, change the descriptor to SCSI IO.
10658 */
10659 if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
10660 desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
10661 request_desc = ((uint64_t)ddi_get16(acc_hdl,
10662 &scsi_io_req->DevHandle) << 48);
10663 }
10664 (void) ddi_dma_sync(mpt->m_dma_req_sense_hdl, 0, 0,
10665 DDI_DMA_SYNC_FORDEV);
10666 }
10667
10668 /*
10669 * We must wait till the message has been completed before
10670 * beginning the next message so we wait for this one to
10671 * finish.
10672 */
10673 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
10674 request_desc |= (SMID << 16) + desc_type;
10675 cmd->cmd_rfm = NULL;
10676 MPTSAS_START_CMD(mpt, request_desc);
10677 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
10678 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
10679 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10680 }
10681 }
10682
10683 typedef void (mptsas_pre_f)(mptsas_t *, mptsas_pt_request_t *);
10684 static mptsas_pre_f mpi_pre_ioc_facts;
10685 static mptsas_pre_f mpi_pre_port_facts;
10686 static mptsas_pre_f mpi_pre_fw_download;
10687 static mptsas_pre_f mpi_pre_fw_25_download;
10688 static mptsas_pre_f mpi_pre_fw_upload;
10689 static mptsas_pre_f mpi_pre_fw_25_upload;
10690 static mptsas_pre_f mpi_pre_sata_passthrough;
10691 static mptsas_pre_f mpi_pre_smp_passthrough;
10692 static mptsas_pre_f mpi_pre_config;
10693 static mptsas_pre_f mpi_pre_sas_io_unit_control;
10694 static mptsas_pre_f mpi_pre_scsi_io_req;
10695
10696 /*
10697 * Prepare the pt for a SAS2 FW_DOWNLOAD request.
10698 */
10699 static void
10700 mpi_pre_fw_download(mptsas_t *mpt, mptsas_pt_request_t *pt)
10701 {
10702 pMpi2FWDownloadTCSGE_t tcsge;
10703 pMpi2FWDownloadRequest req;
10704
10705 /*
10706 * If SAS3, call separate function.
10707 */
10708 if (mpt->m_MPI25) {
10709 mpi_pre_fw_25_download(mpt, pt);
10710 return;
10711 }
10712
10713 /*
10714 * User requests should come in with the Transaction
10715 * context element where the SGL will go. Putting the
10716 * SGL after that seems to work, but don't really know
10717 * why. Other drivers tend to create an extra SGL and
10718 * refer to the TCE through that.
10719 */
10720 req = (pMpi2FWDownloadRequest)pt->request;
10721 tcsge = (pMpi2FWDownloadTCSGE_t)&req->SGL;
10722 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10723 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10724 mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10725 }
10726
10727 pt->sgl_offset = offsetof(MPI2_FW_DOWNLOAD_REQUEST, SGL) +
10728 sizeof (*tcsge);
10729 if (pt->request_size != pt->sgl_offset) {
10730 NDBG15(("mpi_pre_fw_download(): Incorrect req size, "
10731 "0x%x, should be 0x%x, dataoutsz 0x%x",
10732 (int)pt->request_size, (int)pt->sgl_offset,
10733 (int)pt->dataout_size));
10734 }
10735 if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY)) {
10736 NDBG15(("mpi_pre_fw_download(): Incorrect rep size, "
10737 "0x%x, should be 0x%x", pt->data_size,
10738 (int)sizeof (MPI2_FW_DOWNLOAD_REPLY)));
10739 }
10740 }
10741
10742 /*
10743 * Prepare the pt for a SAS3 FW_DOWNLOAD request.
10744 */
10745 static void
10746 mpi_pre_fw_25_download(mptsas_t *mpt, mptsas_pt_request_t *pt)
10747 {
10748 pMpi2FWDownloadTCSGE_t tcsge;
10749 pMpi2FWDownloadRequest req2;
10750 pMpi25FWDownloadRequest req25;
10751
10752 /*
10753 * User requests should come in with the Transaction
10754 * context element where the SGL will go. The new firmware
10755 * Doesn't use TCE and has space in the main request for
10756 * this information. So move to the right place.
10757 */
10758 req2 = (pMpi2FWDownloadRequest)pt->request;
10759 req25 = (pMpi25FWDownloadRequest)pt->request;
10760 tcsge = (pMpi2FWDownloadTCSGE_t)&req2->SGL;
10761 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10762 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10763 mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10764 }
10765 req25->ImageOffset = tcsge->ImageOffset;
10766 req25->ImageSize = tcsge->ImageSize;
10767
10768 pt->sgl_offset = offsetof(MPI25_FW_DOWNLOAD_REQUEST, SGL);
10769 if (pt->request_size != pt->sgl_offset) {
10770 NDBG15(("mpi_pre_fw_25_download(): Incorrect req size, "
10771 "0x%x, should be 0x%x, dataoutsz 0x%x",
10772 pt->request_size, pt->sgl_offset,
10773 pt->dataout_size));
10774 }
10775 if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY)) {
10776 NDBG15(("mpi_pre_fw_25_download(): Incorrect rep size, "
10777 "0x%x, should be 0x%x", pt->data_size,
10778 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10779 }
10780 }
10781
10782 /*
10783 * Prepare the pt for a SAS2 FW_UPLOAD request.
10784 */
10785 static void
10786 mpi_pre_fw_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10787 {
10788 pMpi2FWUploadTCSGE_t tcsge;
10789 pMpi2FWUploadRequest_t req;
10790
10791 /*
10792 * If SAS3, call separate function.
10793 */
10794 if (mpt->m_MPI25) {
10795 mpi_pre_fw_25_upload(mpt, pt);
10796 return;
10797 }
10798
10799 /*
10800 * User requests should come in with the Transaction
10801 * context element where the SGL will go. Putting the
10802 * SGL after that seems to work, but don't really know
10803 * why. Other drivers tend to create an extra SGL and
10804 * refer to the TCE through that.
10805 */
10806 req = (pMpi2FWUploadRequest_t)pt->request;
10807 tcsge = (pMpi2FWUploadTCSGE_t)&req->SGL;
10808 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10809 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10810 mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10811 }
10812
10813 pt->sgl_offset = offsetof(MPI2_FW_UPLOAD_REQUEST, SGL) +
10814 sizeof (*tcsge);
10815 if (pt->request_size != pt->sgl_offset) {
10816 NDBG15(("mpi_pre_fw_upload(): Incorrect req size, "
10817 "0x%x, should be 0x%x, dataoutsz 0x%x",
10818 pt->request_size, pt->sgl_offset,
10819 pt->dataout_size));
10820 }
10821 if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY)) {
10822 NDBG15(("mpi_pre_fw_upload(): Incorrect rep size, "
10823 "0x%x, should be 0x%x", pt->data_size,
10824 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10825 }
10826 }
10827
10828 /*
10829 * Prepare the pt a SAS3 FW_UPLOAD request.
10830 */
10831 static void
10832 mpi_pre_fw_25_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10833 {
10834 pMpi2FWUploadTCSGE_t tcsge;
10835 pMpi2FWUploadRequest_t req2;
10836 pMpi25FWUploadRequest_t req25;
10837
10838 /*
10839 * User requests should come in with the Transaction
10840 * context element where the SGL will go. The new firmware
10841 * Doesn't use TCE and has space in the main request for
10842 * this information. So move to the right place.
10843 */
10844 req2 = (pMpi2FWUploadRequest_t)pt->request;
10845 req25 = (pMpi25FWUploadRequest_t)pt->request;
10846 tcsge = (pMpi2FWUploadTCSGE_t)&req2->SGL;
10847 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10848 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10849 mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10850 }
10851 req25->ImageOffset = tcsge->ImageOffset;
10852 req25->ImageSize = tcsge->ImageSize;
10853
10854 pt->sgl_offset = offsetof(MPI25_FW_UPLOAD_REQUEST, SGL);
10855 if (pt->request_size != pt->sgl_offset) {
10856 NDBG15(("mpi_pre_fw_25_upload(): Incorrect req size, "
10857 "0x%x, should be 0x%x, dataoutsz 0x%x",
10858 pt->request_size, pt->sgl_offset,
10859 pt->dataout_size));
10860 }
10861 if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY)) {
10862 NDBG15(("mpi_pre_fw_25_upload(): Incorrect rep size, "
10863 "0x%x, should be 0x%x", pt->data_size,
10864 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10865 }
10866 }
10867
10868 /*
10869 * Prepare the pt for an IOC_FACTS request.
10870 */
10871 static void
10872 mpi_pre_ioc_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
10873 {
10874 #ifndef __lock_lint
10875 _NOTE(ARGUNUSED(mpt))
10876 #endif
10877 if (pt->request_size != sizeof (MPI2_IOC_FACTS_REQUEST)) {
10878 NDBG15(("mpi_pre_ioc_facts(): Incorrect req size, "
10879 "0x%x, should be 0x%x, dataoutsz 0x%x",
10880 pt->request_size,
10881 (int)sizeof (MPI2_IOC_FACTS_REQUEST),
10882 pt->dataout_size));
10883 }
10884 if (pt->data_size != sizeof (MPI2_IOC_FACTS_REPLY)) {
10885 NDBG15(("mpi_pre_ioc_facts(): Incorrect rep size, "
10886 "0x%x, should be 0x%x", pt->data_size,
10887 (int)sizeof (MPI2_IOC_FACTS_REPLY)));
10888 }
10889 pt->sgl_offset = (uint16_t)pt->request_size;
10890 }
10891
10892 /*
10893 * Prepare the pt for a PORT_FACTS request.
10894 */
10895 static void
10896 mpi_pre_port_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
10897 {
10898 #ifndef __lock_lint
10899 _NOTE(ARGUNUSED(mpt))
10900 #endif
10901 if (pt->request_size != sizeof (MPI2_PORT_FACTS_REQUEST)) {
10902 NDBG15(("mpi_pre_port_facts(): Incorrect req size, "
10903 "0x%x, should be 0x%x, dataoutsz 0x%x",
10904 pt->request_size,
10905 (int)sizeof (MPI2_PORT_FACTS_REQUEST),
10906 pt->dataout_size));
10907 }
10908 if (pt->data_size != sizeof (MPI2_PORT_FACTS_REPLY)) {
10909 NDBG15(("mpi_pre_port_facts(): Incorrect rep size, "
10910 "0x%x, should be 0x%x", pt->data_size,
10911 (int)sizeof (MPI2_PORT_FACTS_REPLY)));
10912 }
10913 pt->sgl_offset = (uint16_t)pt->request_size;
10914 }
10915
10916 /*
10917 * Prepare pt for a SATA_PASSTHROUGH request.
10918 */
10919 static void
10920 mpi_pre_sata_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
10921 {
10922 #ifndef __lock_lint
10923 _NOTE(ARGUNUSED(mpt))
10924 #endif
10925 pt->sgl_offset = offsetof(MPI2_SATA_PASSTHROUGH_REQUEST, SGL);
10926 if (pt->request_size != pt->sgl_offset) {
10927 NDBG15(("mpi_pre_sata_passthrough(): Incorrect req size, "
10928 "0x%x, should be 0x%x, dataoutsz 0x%x",
10929 pt->request_size, pt->sgl_offset,
10930 pt->dataout_size));
10931 }
10932 if (pt->data_size != sizeof (MPI2_SATA_PASSTHROUGH_REPLY)) {
10933 NDBG15(("mpi_pre_sata_passthrough(): Incorrect rep size, "
10934 "0x%x, should be 0x%x", pt->data_size,
10935 (int)sizeof (MPI2_SATA_PASSTHROUGH_REPLY)));
10936 }
10937 }
10938
10939 static void
10940 mpi_pre_smp_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
10941 {
10942 #ifndef __lock_lint
10943 _NOTE(ARGUNUSED(mpt))
10944 #endif
10945 pt->sgl_offset = offsetof(MPI2_SMP_PASSTHROUGH_REQUEST, SGL);
10946 if (pt->request_size != pt->sgl_offset) {
10947 NDBG15(("mpi_pre_smp_passthrough(): Incorrect req size, "
10948 "0x%x, should be 0x%x, dataoutsz 0x%x",
10949 pt->request_size, pt->sgl_offset,
10950 pt->dataout_size));
10951 }
10952 if (pt->data_size != sizeof (MPI2_SMP_PASSTHROUGH_REPLY)) {
10953 NDBG15(("mpi_pre_smp_passthrough(): Incorrect rep size, "
10954 "0x%x, should be 0x%x", pt->data_size,
10955 (int)sizeof (MPI2_SMP_PASSTHROUGH_REPLY)));
10956 }
10957 }
10958
10959 /*
10960 * Prepare pt for a CONFIG request.
10961 */
10962 static void
10963 mpi_pre_config(mptsas_t *mpt, mptsas_pt_request_t *pt)
10964 {
10965 #ifndef __lock_lint
10966 _NOTE(ARGUNUSED(mpt))
10967 #endif
10968 pt->sgl_offset = offsetof(MPI2_CONFIG_REQUEST, PageBufferSGE);
10969 if (pt->request_size != pt->sgl_offset) {
10970 NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
10971 "should be 0x%x, dataoutsz 0x%x", pt->request_size,
10972 pt->sgl_offset, pt->dataout_size));
10973 }
10974 if (pt->data_size != sizeof (MPI2_CONFIG_REPLY)) {
10975 NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
10976 "should be 0x%x", pt->data_size,
10977 (int)sizeof (MPI2_CONFIG_REPLY)));
10978 }
10979 pt->simple = 1;
10980 }
10981
10982 /*
10983 * Prepare pt for a SCSI_IO_REQ request.
10984 */
10985 static void
10986 mpi_pre_scsi_io_req(mptsas_t *mpt, mptsas_pt_request_t *pt)
10987 {
10988 #ifndef __lock_lint
10989 _NOTE(ARGUNUSED(mpt))
10990 #endif
10991 pt->sgl_offset = offsetof(MPI2_SCSI_IO_REQUEST, SGL);
10992 if (pt->request_size != pt->sgl_offset) {
10993 NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
10994 "should be 0x%x, dataoutsz 0x%x", pt->request_size,
10995 pt->sgl_offset,
10996 pt->dataout_size));
10997 }
10998 if (pt->data_size != sizeof (MPI2_SCSI_IO_REPLY)) {
10999 NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
11000 "should be 0x%x", pt->data_size,
11001 (int)sizeof (MPI2_SCSI_IO_REPLY)));
11002 }
11003 }
11004
11005 /*
11006 * Prepare the mptsas_cmd for a SAS_IO_UNIT_CONTROL request.
11007 */
11008 static void
11009 mpi_pre_sas_io_unit_control(mptsas_t *mpt, mptsas_pt_request_t *pt)
11010 {
11011 #ifndef __lock_lint
11012 _NOTE(ARGUNUSED(mpt))
11013 #endif
11014 pt->sgl_offset = (uint16_t)pt->request_size;
11015 }
11016
11017 /*
11018 * A set of functions to prepare an mptsas_cmd for the various
11019 * supported requests.
11020 */
11021 static struct mptsas_func {
11022 U8 Function;
11023 char *Name;
11024 mptsas_pre_f *f_pre;
11025 } mptsas_func_list[] = {
11026 { MPI2_FUNCTION_IOC_FACTS, "IOC_FACTS", mpi_pre_ioc_facts },
11027 { MPI2_FUNCTION_PORT_FACTS, "PORT_FACTS", mpi_pre_port_facts },
11028 { MPI2_FUNCTION_FW_DOWNLOAD, "FW_DOWNLOAD", mpi_pre_fw_download },
11029 { MPI2_FUNCTION_FW_UPLOAD, "FW_UPLOAD", mpi_pre_fw_upload },
11030 { MPI2_FUNCTION_SATA_PASSTHROUGH, "SATA_PASSTHROUGH",
11031 mpi_pre_sata_passthrough },
11032 { MPI2_FUNCTION_SMP_PASSTHROUGH, "SMP_PASSTHROUGH",
11033 mpi_pre_smp_passthrough},
11034 { MPI2_FUNCTION_SCSI_IO_REQUEST, "SCSI_IO_REQUEST",
11035 mpi_pre_scsi_io_req},
11036 { MPI2_FUNCTION_CONFIG, "CONFIG", mpi_pre_config},
11037 { MPI2_FUNCTION_SAS_IO_UNIT_CONTROL, "SAS_IO_UNIT_CONTROL",
11038 mpi_pre_sas_io_unit_control },
11039 { 0xFF, NULL, NULL } /* list end */
11040 };
11041
11042 static void
11043 mptsas_prep_sgl_offset(mptsas_t *mpt, mptsas_pt_request_t *pt)
11044 {
11045 pMPI2RequestHeader_t hdr;
11046 struct mptsas_func *f;
11047
11048 hdr = (pMPI2RequestHeader_t)pt->request;
11049
11050 for (f = mptsas_func_list; f->f_pre != NULL; f++) {
11051 if (hdr->Function == f->Function) {
11052 f->f_pre(mpt, pt);
11053 NDBG15(("mptsas_prep_sgl_offset: Function %s,"
11054 " sgl_offset 0x%x", f->Name,
11055 pt->sgl_offset));
11056 return;
11057 }
11058 }
11059 NDBG15(("mptsas_prep_sgl_offset: Unknown Function 0x%02x,"
11060 " returning req_size 0x%x for sgl_offset",
11061 hdr->Function, pt->request_size));
11062 pt->sgl_offset = (uint16_t)pt->request_size;
11063 }
11064
11065
11066 static int
11067 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
11068 uint8_t *data, uint32_t request_size, uint32_t reply_size,
11069 uint32_t data_size, uint32_t direction, uint8_t *dataout,
11070 uint32_t dataout_size, short timeout, int mode)
11071 {
11072 mptsas_pt_request_t pt;
11073 mptsas_dma_alloc_state_t data_dma_state;
11074 mptsas_dma_alloc_state_t dataout_dma_state;
11075 caddr_t memp;
11076 mptsas_cmd_t *cmd = NULL;
11077 struct scsi_pkt *pkt;
11078 uint32_t reply_len = 0, sense_len = 0;
11079 pMPI2RequestHeader_t request_hdrp;
11080 pMPI2RequestHeader_t request_msg;
11081 pMPI2DefaultReply_t reply_msg;
11082 Mpi2SCSIIOReply_t rep_msg;
11083 int rvalue;
11084 int i, status = 0, pt_flags = 0, rv = 0;
11085 uint8_t function;
11086
11087 ASSERT(mutex_owned(&mpt->m_mutex));
11088
11089 reply_msg = (pMPI2DefaultReply_t)(&rep_msg);
11090 bzero(reply_msg, sizeof (MPI2_DEFAULT_REPLY));
11091 request_msg = kmem_zalloc(request_size, KM_SLEEP);
11092
11093 mutex_exit(&mpt->m_mutex);
11094 /*
11095 * copy in the request buffer since it could be used by
11096 * another thread when the pt request into waitq
11097 */
11098 if (ddi_copyin(request, request_msg, request_size, mode)) {
11099 mutex_enter(&mpt->m_mutex);
11100 status = EFAULT;
11101 mptsas_log(mpt, CE_WARN, "failed to copy request data");
11102 goto out;
11103 }
11104 NDBG27(("mptsas_do_passthru: mode 0x%x, size 0x%x, Func 0x%x",
11105 mode, request_size, request_msg->Function));
11106 mutex_enter(&mpt->m_mutex);
11107
11108 function = request_msg->Function;
11109 if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
11110 pMpi2SCSITaskManagementRequest_t task;
11111 task = (pMpi2SCSITaskManagementRequest_t)request_msg;
11112 mptsas_setup_bus_reset_delay(mpt);
11113 rv = mptsas_ioc_task_management(mpt, task->TaskType,
11114 task->DevHandle, (int)task->LUN[1], reply, reply_size,
11115 mode);
11116
11117 if (rv != TRUE) {
11118 status = EIO;
11119 mptsas_log(mpt, CE_WARN, "task management failed");
11120 }
11121 goto out;
11122 }
11123
11124 if (data_size != 0) {
11125 data_dma_state.size = data_size;
11126 if (mptsas_dma_alloc(mpt, &data_dma_state) != DDI_SUCCESS) {
11127 status = ENOMEM;
11128 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
11129 "resource");
11130 goto out;
11131 }
11132 pt_flags |= MPTSAS_DATA_ALLOCATED;
11133 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
11134 mutex_exit(&mpt->m_mutex);
11135 for (i = 0; i < data_size; i++) {
11136 if (ddi_copyin(data + i, (uint8_t *)
11137 data_dma_state.memp + i, 1, mode)) {
11138 mutex_enter(&mpt->m_mutex);
11139 status = EFAULT;
11140 mptsas_log(mpt, CE_WARN, "failed to "
11141 "copy read data");
11142 goto out;
11143 }
11144 }
11145 mutex_enter(&mpt->m_mutex);
11146 }
11147 } else {
11148 bzero(&data_dma_state, sizeof (data_dma_state));
11149 }
11150
11151 if (dataout_size != 0) {
11152 dataout_dma_state.size = dataout_size;
11153 if (mptsas_dma_alloc(mpt, &dataout_dma_state) != DDI_SUCCESS) {
11154 status = ENOMEM;
11155 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
11156 "resource");
11157 goto out;
11158 }
11159 pt_flags |= MPTSAS_DATAOUT_ALLOCATED;
11160 mutex_exit(&mpt->m_mutex);
11161 for (i = 0; i < dataout_size; i++) {
11162 if (ddi_copyin(dataout + i, (uint8_t *)
11163 dataout_dma_state.memp + i, 1, mode)) {
11164 mutex_enter(&mpt->m_mutex);
11165 mptsas_log(mpt, CE_WARN, "failed to copy out"
11166 " data");
11167 status = EFAULT;
11168 goto out;
11169 }
11170 }
11171 mutex_enter(&mpt->m_mutex);
11172 } else {
11173 bzero(&dataout_dma_state, sizeof (dataout_dma_state));
11174 }
11175
11176 if ((rvalue = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11177 status = EAGAIN;
11178 mptsas_log(mpt, CE_NOTE, "event ack command pool is full");
11179 goto out;
11180 }
11181 pt_flags |= MPTSAS_REQUEST_POOL_CMD;
11182
11183 bzero((caddr_t)cmd, sizeof (*cmd));
11184 bzero((caddr_t)pkt, scsi_pkt_size());
11185 bzero((caddr_t)&pt, sizeof (pt));
11186
11187 cmd->ioc_cmd_slot = (uint32_t)(rvalue);
11188
11189 pt.request = (uint8_t *)request_msg;
11190 pt.direction = direction;
11191 pt.simple = 0;
11192 pt.request_size = request_size;
11193 pt.data_size = data_size;
11194 pt.dataout_size = dataout_size;
11195 pt.data_cookie = data_dma_state.cookie;
11196 pt.dataout_cookie = dataout_dma_state.cookie;
11197 mptsas_prep_sgl_offset(mpt, &pt);
11198
11199 /*
11200 * Form a blank cmd/pkt to store the acknowledgement message
11201 */
11202 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb[0];
11203 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
11204 pkt->pkt_ha_private = (opaque_t)&pt;
11205 pkt->pkt_flags = FLAG_HEAD;
11206 pkt->pkt_time = timeout;
11207 cmd->cmd_pkt = pkt;
11208 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_PASSTHRU;
11209
11210 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
11211 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
11212 uint8_t com, cdb_group_id;
11213 boolean_t ret;
11214
11215 pkt->pkt_cdbp = ((pMpi2SCSIIORequest_t)request_msg)->CDB.CDB32;
11216 com = pkt->pkt_cdbp[0];
11217 cdb_group_id = CDB_GROUPID(com);
11218 switch (cdb_group_id) {
11219 case CDB_GROUPID_0: cmd->cmd_cdblen = CDB_GROUP0; break;
11220 case CDB_GROUPID_1: cmd->cmd_cdblen = CDB_GROUP1; break;
11221 case CDB_GROUPID_2: cmd->cmd_cdblen = CDB_GROUP2; break;
11222 case CDB_GROUPID_4: cmd->cmd_cdblen = CDB_GROUP4; break;
11223 case CDB_GROUPID_5: cmd->cmd_cdblen = CDB_GROUP5; break;
11224 default:
11225 NDBG27(("mptsas_do_passthru: SCSI_IO, reserved "
11226 "CDBGROUP 0x%x requested!", cdb_group_id));
11227 break;
11228 }
11229
11230 reply_len = sizeof (MPI2_SCSI_IO_REPLY);
11231 sense_len = reply_size - reply_len;
11232 ret = mptsas_cmdarqsize(mpt, cmd, sense_len, KM_SLEEP);
11233 VERIFY(ret == B_TRUE);
11234 } else {
11235 reply_len = reply_size;
11236 sense_len = 0;
11237 }
11238
11239 NDBG27(("mptsas_do_passthru: %s, dsz 0x%x, dosz 0x%x, replen 0x%x, "
11240 "snslen 0x%x",
11241 (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE)?"Write":"Read",
11242 data_size, dataout_size, reply_len, sense_len));
11243
11244 /*
11245 * Save the command in a slot
11246 */
11247 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11248 /*
11249 * Once passthru command get slot, set cmd_flags
11250 * CFLAG_PREPARED.
11251 */
11252 cmd->cmd_flags |= CFLAG_PREPARED;
11253 mptsas_start_passthru(mpt, cmd);
11254 } else {
11255 mptsas_waitq_add(mpt, cmd);
11256 }
11257
11258 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11259 cv_wait(&mpt->m_passthru_cv, &mpt->m_mutex);
11260 }
11261
11262 NDBG27(("mptsas_do_passthru: Cmd complete, flags 0x%x, rfm 0x%x "
11263 "pktreason 0x%x", cmd->cmd_flags, cmd->cmd_rfm,
11264 pkt->pkt_reason));
11265
11266 if (cmd->cmd_flags & CFLAG_PREPARED) {
11267 memp = mpt->m_req_frame + (mpt->m_req_frame_size *
11268 cmd->cmd_slot);
11269 request_hdrp = (pMPI2RequestHeader_t)memp;
11270 }
11271
11272 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11273 status = ETIMEDOUT;
11274 mptsas_log(mpt, CE_WARN, "passthrough command timeout");
11275 pt_flags |= MPTSAS_CMD_TIMEOUT;
11276 goto out;
11277 }
11278
11279 if (cmd->cmd_rfm) {
11280 /*
11281 * cmd_rfm is zero means the command reply is a CONTEXT
11282 * reply and no PCI Write to post the free reply SMFA
11283 * because no reply message frame is used.
11284 * cmd_rfm is non-zero means the reply is a ADDRESS
11285 * reply and reply message frame is used.
11286 */
11287 pt_flags |= MPTSAS_ADDRESS_REPLY;
11288 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11289 DDI_DMA_SYNC_FORCPU);
11290 reply_msg = (pMPI2DefaultReply_t)
11291 (mpt->m_reply_frame + (cmd->cmd_rfm -
11292 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
11293 }
11294
11295 mptsas_fma_check(mpt, cmd);
11296 if (pkt->pkt_reason == CMD_TRAN_ERR) {
11297 status = EAGAIN;
11298 mptsas_log(mpt, CE_WARN, "passthru fma error");
11299 goto out;
11300 }
11301 if (pkt->pkt_reason == CMD_RESET) {
11302 status = EAGAIN;
11303 mptsas_log(mpt, CE_WARN, "ioc reset abort passthru");
11304 goto out;
11305 }
11306
11307 if (pkt->pkt_reason == CMD_INCOMPLETE) {
11308 status = EIO;
11309 mptsas_log(mpt, CE_WARN, "passthrough command incomplete");
11310 goto out;
11311 }
11312
11313 mutex_exit(&mpt->m_mutex);
11314 if (cmd->cmd_flags & CFLAG_PREPARED) {
11315 function = request_hdrp->Function;
11316 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
11317 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
11318 reply_len = sizeof (MPI2_SCSI_IO_REPLY);
11319 sense_len = cmd->cmd_extrqslen ?
11320 min(sense_len, cmd->cmd_extrqslen) :
11321 min(sense_len, cmd->cmd_rqslen);
11322 } else {
11323 reply_len = reply_size;
11324 sense_len = 0;
11325 }
11326
11327 for (i = 0; i < reply_len; i++) {
11328 if (ddi_copyout((uint8_t *)reply_msg + i, reply + i, 1,
11329 mode)) {
11330 mutex_enter(&mpt->m_mutex);
11331 status = EFAULT;
11332 mptsas_log(mpt, CE_WARN, "failed to copy out "
11333 "reply data");
11334 goto out;
11335 }
11336 }
11337 for (i = 0; i < sense_len; i++) {
11338 if (ddi_copyout((uint8_t *)request_hdrp + 64 + i,
11339 reply + reply_len + i, 1, mode)) {
11340 mutex_enter(&mpt->m_mutex);
11341 status = EFAULT;
11342 mptsas_log(mpt, CE_WARN, "failed to copy out "
11343 "sense data");
11344 goto out;
11345 }
11346 }
11347 }
11348
11349 if (data_size) {
11350 if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) {
11351 (void) ddi_dma_sync(data_dma_state.handle, 0, 0,
11352 DDI_DMA_SYNC_FORCPU);
11353 for (i = 0; i < data_size; i++) {
11354 if (ddi_copyout((uint8_t *)(
11355 data_dma_state.memp + i), data + i, 1,
11356 mode)) {
11357 mutex_enter(&mpt->m_mutex);
11358 status = EFAULT;
11359 mptsas_log(mpt, CE_WARN, "failed to "
11360 "copy out the reply data");
11361 goto out;
11362 }
11363 }
11364 }
11365 }
11366 mutex_enter(&mpt->m_mutex);
11367 out:
11368 /*
11369 * Put the reply frame back on the free queue, increment the free
11370 * index, and write the new index to the free index register. But only
11371 * if this reply is an ADDRESS reply.
11372 */
11373 if (pt_flags & MPTSAS_ADDRESS_REPLY) {
11374 ddi_put32(mpt->m_acc_free_queue_hdl,
11375 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11376 cmd->cmd_rfm);
11377 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11378 DDI_DMA_SYNC_FORDEV);
11379 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11380 mpt->m_free_index = 0;
11381 }
11382 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11383 mpt->m_free_index);
11384 }
11385 if (cmd) {
11386 if (cmd->cmd_extrqslen != 0) {
11387 rmfree(mpt->m_erqsense_map, cmd->cmd_extrqschunks,
11388 cmd->cmd_extrqsidx + 1);
11389 }
11390 if (cmd->cmd_flags & CFLAG_PREPARED) {
11391 mptsas_remove_cmd(mpt, cmd);
11392 pt_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11393 }
11394 }
11395 if (pt_flags & MPTSAS_REQUEST_POOL_CMD)
11396 mptsas_return_to_pool(mpt, cmd);
11397 if (pt_flags & MPTSAS_DATA_ALLOCATED) {
11398 if (mptsas_check_dma_handle(data_dma_state.handle) !=
11399 DDI_SUCCESS) {
11400 ddi_fm_service_impact(mpt->m_dip,
11401 DDI_SERVICE_UNAFFECTED);
11402 status = EFAULT;
11403 }
11404 mptsas_dma_free(&data_dma_state);
11405 }
11406 if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
11407 if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
11408 DDI_SUCCESS) {
11409 ddi_fm_service_impact(mpt->m_dip,
11410 DDI_SERVICE_UNAFFECTED);
11411 status = EFAULT;
11412 }
11413 mptsas_dma_free(&dataout_dma_state);
11414 }
11415 if (pt_flags & MPTSAS_CMD_TIMEOUT) {
11416 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
11417 mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
11418 }
11419 }
11420 if (request_msg)
11421 kmem_free(request_msg, request_size);
11422 NDBG27(("mptsas_do_passthru: Done status 0x%x", status));
11423
11424 return (status);
11425 }
11426
11427 static int
11428 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
11429 {
11430 /*
11431 * If timeout is 0, set timeout to default of 60 seconds.
11432 */
11433 if (data->Timeout == 0) {
11434 data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
11435 }
11436
11437 if (((data->DataSize == 0) &&
11438 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_NONE)) ||
11439 ((data->DataSize != 0) &&
11440 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_READ) ||
11441 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_WRITE) ||
11442 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) &&
11443 (data->DataOutSize != 0))))) {
11444 if (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) {
11445 data->DataDirection = MPTSAS_PASS_THRU_DIRECTION_READ;
11446 } else {
11447 data->DataOutSize = 0;
11448 }
11449 /*
11450 * Send passthru request messages
11451 */
11452 return (mptsas_do_passthru(mpt,
11453 (uint8_t *)((uintptr_t)data->PtrRequest),
11454 (uint8_t *)((uintptr_t)data->PtrReply),
11455 (uint8_t *)((uintptr_t)data->PtrData),
11456 data->RequestSize, data->ReplySize,
11457 data->DataSize, data->DataDirection,
11458 (uint8_t *)((uintptr_t)data->PtrDataOut),
11459 data->DataOutSize, data->Timeout, mode));
11460 } else {
11461 return (EINVAL);
11462 }
11463 }
11464
11465 static uint8_t
11466 mptsas_get_fw_diag_buffer_number(mptsas_t *mpt, uint32_t unique_id)
11467 {
11468 uint8_t index;
11469
11470 for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
11471 if (mpt->m_fw_diag_buffer_list[index].unique_id == unique_id) {
11472 return (index);
11473 }
11474 }
11475
11476 return (MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND);
11477 }
11478
11479 static void
11480 mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd)
11481 {
11482 pMpi2DiagBufferPostRequest_t pDiag_post_msg;
11483 pMpi2DiagReleaseRequest_t pDiag_release_msg;
11484 struct scsi_pkt *pkt = cmd->cmd_pkt;
11485 mptsas_diag_request_t *diag = pkt->pkt_ha_private;
11486 uint32_t i;
11487 uint64_t request_desc;
11488
11489 ASSERT(mutex_owned(&mpt->m_mutex));
11490
11491 /*
11492 * Form the diag message depending on the post or release function.
11493 */
11494 if (diag->function == MPI2_FUNCTION_DIAG_BUFFER_POST) {
11495 pDiag_post_msg = (pMpi2DiagBufferPostRequest_t)
11496 (mpt->m_req_frame + (mpt->m_req_frame_size *
11497 cmd->cmd_slot));
11498 bzero(pDiag_post_msg, mpt->m_req_frame_size);
11499 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->Function,
11500 diag->function);
11501 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->BufferType,
11502 diag->pBuffer->buffer_type);
11503 ddi_put8(mpt->m_acc_req_frame_hdl,
11504 &pDiag_post_msg->ExtendedType,
11505 diag->pBuffer->extended_type);
11506 ddi_put32(mpt->m_acc_req_frame_hdl,
11507 &pDiag_post_msg->BufferLength,
11508 diag->pBuffer->buffer_data.size);
11509 for (i = 0; i < (sizeof (pDiag_post_msg->ProductSpecific) / 4);
11510 i++) {
11511 ddi_put32(mpt->m_acc_req_frame_hdl,
11512 &pDiag_post_msg->ProductSpecific[i],
11513 diag->pBuffer->product_specific[i]);
11514 }
11515 ddi_put32(mpt->m_acc_req_frame_hdl,
11516 &pDiag_post_msg->BufferAddress.Low,
11517 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
11518 & 0xffffffffull));
11519 ddi_put32(mpt->m_acc_req_frame_hdl,
11520 &pDiag_post_msg->BufferAddress.High,
11521 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
11522 >> 32));
11523 } else {
11524 pDiag_release_msg = (pMpi2DiagReleaseRequest_t)
11525 (mpt->m_req_frame + (mpt->m_req_frame_size *
11526 cmd->cmd_slot));
11527 bzero(pDiag_release_msg, mpt->m_req_frame_size);
11528 ddi_put8(mpt->m_acc_req_frame_hdl,
11529 &pDiag_release_msg->Function, diag->function);
11530 ddi_put8(mpt->m_acc_req_frame_hdl,
11531 &pDiag_release_msg->BufferType,
11532 diag->pBuffer->buffer_type);
11533 }
11534
11535 /*
11536 * Send the message
11537 */
11538 (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
11539 DDI_DMA_SYNC_FORDEV);
11540 request_desc = (cmd->cmd_slot << 16) +
11541 MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
11542 cmd->cmd_rfm = NULL;
11543 MPTSAS_START_CMD(mpt, request_desc);
11544 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
11545 DDI_SUCCESS) ||
11546 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
11547 DDI_SUCCESS)) {
11548 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11549 }
11550 }
11551
11552 static int
11553 mptsas_post_fw_diag_buffer(mptsas_t *mpt,
11554 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
11555 {
11556 mptsas_diag_request_t diag;
11557 int status, slot_num, post_flags = 0;
11558 mptsas_cmd_t *cmd = NULL;
11559 struct scsi_pkt *pkt;
11560 pMpi2DiagBufferPostReply_t reply;
11561 uint16_t iocstatus;
11562 uint32_t iocloginfo, transfer_length;
11563
11564 /*
11565 * If buffer is not enabled, just leave.
11566 */
11567 *return_code = MPTSAS_FW_DIAG_ERROR_POST_FAILED;
11568 if (!pBuffer->enabled) {
11569 status = DDI_FAILURE;
11570 goto out;
11571 }
11572
11573 /*
11574 * Clear some flags initially.
11575 */
11576 pBuffer->force_release = FALSE;
11577 pBuffer->valid_data = FALSE;
11578 pBuffer->owned_by_firmware = FALSE;
11579
11580 /*
11581 * Get a cmd buffer from the cmd buffer pool
11582 */
11583 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11584 status = DDI_FAILURE;
11585 mptsas_log(mpt, CE_NOTE, "command pool is full: Post FW Diag");
11586 goto out;
11587 }
11588 post_flags |= MPTSAS_REQUEST_POOL_CMD;
11589
11590 bzero((caddr_t)cmd, sizeof (*cmd));
11591 bzero((caddr_t)pkt, scsi_pkt_size());
11592
11593 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
11594
11595 diag.pBuffer = pBuffer;
11596 diag.function = MPI2_FUNCTION_DIAG_BUFFER_POST;
11597
11598 /*
11599 * Form a blank cmd/pkt to store the acknowledgement message
11600 */
11601 pkt->pkt_ha_private = (opaque_t)&diag;
11602 pkt->pkt_flags = FLAG_HEAD;
11603 pkt->pkt_time = 60;
11604 cmd->cmd_pkt = pkt;
11605 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
11606
11607 /*
11608 * Save the command in a slot
11609 */
11610 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11611 /*
11612 * Once passthru command get slot, set cmd_flags
11613 * CFLAG_PREPARED.
11614 */
11615 cmd->cmd_flags |= CFLAG_PREPARED;
11616 mptsas_start_diag(mpt, cmd);
11617 } else {
11618 mptsas_waitq_add(mpt, cmd);
11619 }
11620
11621 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11622 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
11623 }
11624
11625 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11626 status = DDI_FAILURE;
11627 mptsas_log(mpt, CE_WARN, "Post FW Diag command timeout");
11628 goto out;
11629 }
11630
11631 /*
11632 * cmd_rfm points to the reply message if a reply was given. Check the
11633 * IOCStatus to make sure everything went OK with the FW diag request
11634 * and set buffer flags.
11635 */
11636 if (cmd->cmd_rfm) {
11637 post_flags |= MPTSAS_ADDRESS_REPLY;
11638 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11639 DDI_DMA_SYNC_FORCPU);
11640 reply = (pMpi2DiagBufferPostReply_t)(mpt->m_reply_frame +
11641 (cmd->cmd_rfm -
11642 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
11643
11644 /*
11645 * Get the reply message data
11646 */
11647 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
11648 &reply->IOCStatus);
11649 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
11650 &reply->IOCLogInfo);
11651 transfer_length = ddi_get32(mpt->m_acc_reply_frame_hdl,
11652 &reply->TransferLength);
11653
11654 /*
11655 * If post failed quit.
11656 */
11657 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
11658 status = DDI_FAILURE;
11659 NDBG13(("post FW Diag Buffer failed: IOCStatus=0x%x, "
11660 "IOCLogInfo=0x%x, TransferLength=0x%x", iocstatus,
11661 iocloginfo, transfer_length));
11662 goto out;
11663 }
11664
11665 /*
11666 * Post was successful.
11667 */
11668 pBuffer->valid_data = TRUE;
11669 pBuffer->owned_by_firmware = TRUE;
11670 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11671 status = DDI_SUCCESS;
11672 }
11673
11674 out:
11675 /*
11676 * Put the reply frame back on the free queue, increment the free
11677 * index, and write the new index to the free index register. But only
11678 * if this reply is an ADDRESS reply.
11679 */
11680 if (post_flags & MPTSAS_ADDRESS_REPLY) {
11681 ddi_put32(mpt->m_acc_free_queue_hdl,
11682 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11683 cmd->cmd_rfm);
11684 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11685 DDI_DMA_SYNC_FORDEV);
11686 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11687 mpt->m_free_index = 0;
11688 }
11689 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11690 mpt->m_free_index);
11691 }
11692 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
11693 mptsas_remove_cmd(mpt, cmd);
11694 post_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11695 }
11696 if (post_flags & MPTSAS_REQUEST_POOL_CMD) {
11697 mptsas_return_to_pool(mpt, cmd);
11698 }
11699
11700 return (status);
11701 }
11702
11703 static int
11704 mptsas_release_fw_diag_buffer(mptsas_t *mpt,
11705 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
11706 uint32_t diag_type)
11707 {
11708 mptsas_diag_request_t diag;
11709 int status, slot_num, rel_flags = 0;
11710 mptsas_cmd_t *cmd = NULL;
11711 struct scsi_pkt *pkt;
11712 pMpi2DiagReleaseReply_t reply;
11713 uint16_t iocstatus;
11714 uint32_t iocloginfo;
11715
11716 /*
11717 * If buffer is not enabled, just leave.
11718 */
11719 *return_code = MPTSAS_FW_DIAG_ERROR_RELEASE_FAILED;
11720 if (!pBuffer->enabled) {
11721 mptsas_log(mpt, CE_NOTE, "This buffer type is not supported "
11722 "by the IOC");
11723 status = DDI_FAILURE;
11724 goto out;
11725 }
11726
11727 /*
11728 * Clear some flags initially.
11729 */
11730 pBuffer->force_release = FALSE;
11731 pBuffer->valid_data = FALSE;
11732 pBuffer->owned_by_firmware = FALSE;
11733
11734 /*
11735 * Get a cmd buffer from the cmd buffer pool
11736 */
11737 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11738 status = DDI_FAILURE;
11739 mptsas_log(mpt, CE_NOTE, "command pool is full: Release FW "
11740 "Diag");
11741 goto out;
11742 }
11743 rel_flags |= MPTSAS_REQUEST_POOL_CMD;
11744
11745 bzero((caddr_t)cmd, sizeof (*cmd));
11746 bzero((caddr_t)pkt, scsi_pkt_size());
11747
11748 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
11749
11750 diag.pBuffer = pBuffer;
11751 diag.function = MPI2_FUNCTION_DIAG_RELEASE;
11752
11753 /*
11754 * Form a blank cmd/pkt to store the acknowledgement message
11755 */
11756 pkt->pkt_ha_private = (opaque_t)&diag;
11757 pkt->pkt_flags = FLAG_HEAD;
11758 pkt->pkt_time = 60;
11759 cmd->cmd_pkt = pkt;
11760 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
11761
11762 /*
11763 * Save the command in a slot
11764 */
11765 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11766 /*
11767 * Once passthru command get slot, set cmd_flags
11768 * CFLAG_PREPARED.
11769 */
11770 cmd->cmd_flags |= CFLAG_PREPARED;
11771 mptsas_start_diag(mpt, cmd);
11772 } else {
11773 mptsas_waitq_add(mpt, cmd);
11774 }
11775
11776 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11777 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
11778 }
11779
11780 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11781 status = DDI_FAILURE;
11782 mptsas_log(mpt, CE_WARN, "Release FW Diag command timeout");
11783 goto out;
11784 }
11785
11786 /*
11787 * cmd_rfm points to the reply message if a reply was given. Check the
11788 * IOCStatus to make sure everything went OK with the FW diag request
11789 * and set buffer flags.
11790 */
11791 if (cmd->cmd_rfm) {
11792 rel_flags |= MPTSAS_ADDRESS_REPLY;
11793 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11794 DDI_DMA_SYNC_FORCPU);
11795 reply = (pMpi2DiagReleaseReply_t)(mpt->m_reply_frame +
11796 (cmd->cmd_rfm -
11797 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
11798
11799 /*
11800 * Get the reply message data
11801 */
11802 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
11803 &reply->IOCStatus);
11804 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
11805 &reply->IOCLogInfo);
11806
11807 /*
11808 * If release failed quit.
11809 */
11810 if ((iocstatus != MPI2_IOCSTATUS_SUCCESS) ||
11811 pBuffer->owned_by_firmware) {
11812 status = DDI_FAILURE;
11813 NDBG13(("release FW Diag Buffer failed: "
11814 "IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus,
11815 iocloginfo));
11816 goto out;
11817 }
11818
11819 /*
11820 * Release was successful.
11821 */
11822 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11823 status = DDI_SUCCESS;
11824
11825 /*
11826 * If this was for an UNREGISTER diag type command, clear the
11827 * unique ID.
11828 */
11829 if (diag_type == MPTSAS_FW_DIAG_TYPE_UNREGISTER) {
11830 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
11831 }
11832 }
11833
11834 out:
11835 /*
11836 * Put the reply frame back on the free queue, increment the free
11837 * index, and write the new index to the free index register. But only
11838 * if this reply is an ADDRESS reply.
11839 */
11840 if (rel_flags & MPTSAS_ADDRESS_REPLY) {
11841 ddi_put32(mpt->m_acc_free_queue_hdl,
11842 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11843 cmd->cmd_rfm);
11844 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11845 DDI_DMA_SYNC_FORDEV);
11846 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11847 mpt->m_free_index = 0;
11848 }
11849 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11850 mpt->m_free_index);
11851 }
11852 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
11853 mptsas_remove_cmd(mpt, cmd);
11854 rel_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11855 }
11856 if (rel_flags & MPTSAS_REQUEST_POOL_CMD) {
11857 mptsas_return_to_pool(mpt, cmd);
11858 }
11859
11860 return (status);
11861 }
11862
11863 static int
11864 mptsas_diag_register(mptsas_t *mpt, mptsas_fw_diag_register_t *diag_register,
11865 uint32_t *return_code)
11866 {
11867 mptsas_fw_diagnostic_buffer_t *pBuffer;
11868 uint8_t extended_type, buffer_type, i;
11869 uint32_t buffer_size;
11870 uint32_t unique_id;
11871 int status;
11872
11873 ASSERT(mutex_owned(&mpt->m_mutex));
11874
11875 extended_type = diag_register->ExtendedType;
11876 buffer_type = diag_register->BufferType;
11877 buffer_size = diag_register->RequestedBufferSize;
11878 unique_id = diag_register->UniqueId;
11879
11880 /*
11881 * Check for valid buffer type
11882 */
11883 if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
11884 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11885 return (DDI_FAILURE);
11886 }
11887
11888 /*
11889 * Get the current buffer and look up the unique ID. The unique ID
11890 * should not be found. If it is, the ID is already in use.
11891 */
11892 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11893 pBuffer = &mpt->m_fw_diag_buffer_list[buffer_type];
11894 if (i != MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11895 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11896 return (DDI_FAILURE);
11897 }
11898
11899 /*
11900 * The buffer's unique ID should not be registered yet, and the given
11901 * unique ID cannot be 0.
11902 */
11903 if ((pBuffer->unique_id != MPTSAS_FW_DIAG_INVALID_UID) ||
11904 (unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
11905 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11906 return (DDI_FAILURE);
11907 }
11908
11909 /*
11910 * If this buffer is already posted as immediate, just change owner.
11911 */
11912 if (pBuffer->immediate && pBuffer->owned_by_firmware &&
11913 (pBuffer->unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
11914 pBuffer->immediate = FALSE;
11915 pBuffer->unique_id = unique_id;
11916 return (DDI_SUCCESS);
11917 }
11918
11919 /*
11920 * Post a new buffer after checking if it's enabled. The DMA buffer
11921 * that is allocated will be contiguous (sgl_len = 1).
11922 */
11923 if (!pBuffer->enabled) {
11924 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
11925 return (DDI_FAILURE);
11926 }
11927 bzero(&pBuffer->buffer_data, sizeof (mptsas_dma_alloc_state_t));
11928 pBuffer->buffer_data.size = buffer_size;
11929 if (mptsas_dma_alloc(mpt, &pBuffer->buffer_data) != DDI_SUCCESS) {
11930 mptsas_log(mpt, CE_WARN, "failed to alloc DMA resource for "
11931 "diag buffer: size = %d bytes", buffer_size);
11932 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
11933 return (DDI_FAILURE);
11934 }
11935
11936 /*
11937 * Copy the given info to the diag buffer and post the buffer.
11938 */
11939 pBuffer->buffer_type = buffer_type;
11940 pBuffer->immediate = FALSE;
11941 if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
11942 for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
11943 i++) {
11944 pBuffer->product_specific[i] =
11945 diag_register->ProductSpecific[i];
11946 }
11947 }
11948 pBuffer->extended_type = extended_type;
11949 pBuffer->unique_id = unique_id;
11950 status = mptsas_post_fw_diag_buffer(mpt, pBuffer, return_code);
11951
11952 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
11953 DDI_SUCCESS) {
11954 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed in "
11955 "mptsas_diag_register.");
11956 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11957 status = DDI_FAILURE;
11958 }
11959
11960 /*
11961 * In case there was a failure, free the DMA buffer.
11962 */
11963 if (status == DDI_FAILURE) {
11964 mptsas_dma_free(&pBuffer->buffer_data);
11965 }
11966
11967 return (status);
11968 }
11969
11970 static int
11971 mptsas_diag_unregister(mptsas_t *mpt,
11972 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
11973 {
11974 mptsas_fw_diagnostic_buffer_t *pBuffer;
11975 uint8_t i;
11976 uint32_t unique_id;
11977 int status;
11978
11979 ASSERT(mutex_owned(&mpt->m_mutex));
11980
11981 unique_id = diag_unregister->UniqueId;
11982
11983 /*
11984 * Get the current buffer and look up the unique ID. The unique ID
11985 * should be there.
11986 */
11987 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11988 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11989 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11990 return (DDI_FAILURE);
11991 }
11992
11993 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11994
11995 /*
11996 * Try to release the buffer from FW before freeing it. If release
11997 * fails, don't free the DMA buffer in case FW tries to access it
11998 * later. If buffer is not owned by firmware, can't release it.
11999 */
12000 if (!pBuffer->owned_by_firmware) {
12001 status = DDI_SUCCESS;
12002 } else {
12003 status = mptsas_release_fw_diag_buffer(mpt, pBuffer,
12004 return_code, MPTSAS_FW_DIAG_TYPE_UNREGISTER);
12005 }
12006
12007 /*
12008 * At this point, return the current status no matter what happens with
12009 * the DMA buffer.
12010 */
12011 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
12012 if (status == DDI_SUCCESS) {
12013 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
12014 DDI_SUCCESS) {
12015 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed "
12016 "in mptsas_diag_unregister.");
12017 ddi_fm_service_impact(mpt->m_dip,
12018 DDI_SERVICE_UNAFFECTED);
12019 }
12020 mptsas_dma_free(&pBuffer->buffer_data);
12021 }
12022
12023 return (status);
12024 }
12025
12026 static int
12027 mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
12028 uint32_t *return_code)
12029 {
12030 mptsas_fw_diagnostic_buffer_t *pBuffer;
12031 uint8_t i;
12032 uint32_t unique_id;
12033
12034 ASSERT(mutex_owned(&mpt->m_mutex));
12035
12036 unique_id = diag_query->UniqueId;
12037
12038 /*
12039 * If ID is valid, query on ID.
12040 * If ID is invalid, query on buffer type.
12041 */
12042 if (unique_id == MPTSAS_FW_DIAG_INVALID_UID) {
12043 i = diag_query->BufferType;
12044 if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
12045 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12046 return (DDI_FAILURE);
12047 }
12048 } else {
12049 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
12050 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
12051 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12052 return (DDI_FAILURE);
12053 }
12054 }
12055
12056 /*
12057 * Fill query structure with the diag buffer info.
12058 */
12059 pBuffer = &mpt->m_fw_diag_buffer_list[i];
12060 diag_query->BufferType = pBuffer->buffer_type;
12061 diag_query->ExtendedType = pBuffer->extended_type;
12062 if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
12063 for (i = 0; i < (sizeof (diag_query->ProductSpecific) / 4);
12064 i++) {
12065 diag_query->ProductSpecific[i] =
12066 pBuffer->product_specific[i];
12067 }
12068 }
12069 diag_query->TotalBufferSize = pBuffer->buffer_data.size;
12070 diag_query->DriverAddedBufferSize = 0;
12071 diag_query->UniqueId = pBuffer->unique_id;
12072 diag_query->ApplicationFlags = 0;
12073 diag_query->DiagnosticFlags = 0;
12074
12075 /*
12076 * Set/Clear application flags
12077 */
12078 if (pBuffer->immediate) {
12079 diag_query->ApplicationFlags &= ~MPTSAS_FW_DIAG_FLAG_APP_OWNED;
12080 } else {
12081 diag_query->ApplicationFlags |= MPTSAS_FW_DIAG_FLAG_APP_OWNED;
12082 }
12083 if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
12084 diag_query->ApplicationFlags |=
12085 MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
12086 } else {
12087 diag_query->ApplicationFlags &=
12088 ~MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
12089 }
12090 if (pBuffer->owned_by_firmware) {
12091 diag_query->ApplicationFlags |=
12092 MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
12093 } else {
12094 diag_query->ApplicationFlags &=
12095 ~MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
12096 }
12097
12098 return (DDI_SUCCESS);
12099 }
12100
12101 static int
12102 mptsas_diag_read_buffer(mptsas_t *mpt,
12103 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
12104 uint32_t *return_code, int ioctl_mode)
12105 {
12106 mptsas_fw_diagnostic_buffer_t *pBuffer;
12107 uint8_t i, *pData;
12108 uint32_t unique_id, byte;
12109 int status;
12110
12111 ASSERT(mutex_owned(&mpt->m_mutex));
12112
12113 unique_id = diag_read_buffer->UniqueId;
12114
12115 /*
12116 * Get the current buffer and look up the unique ID. The unique ID
12117 * should be there.
12118 */
12119 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
12120 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
12121 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12122 return (DDI_FAILURE);
12123 }
12124
12125 pBuffer = &mpt->m_fw_diag_buffer_list[i];
12126
12127 /*
12128 * Make sure requested read is within limits
12129 */
12130 if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
12131 pBuffer->buffer_data.size) {
12132 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12133 return (DDI_FAILURE);
12134 }
12135
12136 /*
12137 * Copy the requested data from DMA to the diag_read_buffer. The DMA
12138 * buffer that was allocated is one contiguous buffer.
12139 */
12140 pData = (uint8_t *)(pBuffer->buffer_data.memp +
12141 diag_read_buffer->StartingOffset);
12142 (void) ddi_dma_sync(pBuffer->buffer_data.handle, 0, 0,
12143 DDI_DMA_SYNC_FORCPU);
12144 for (byte = 0; byte < diag_read_buffer->BytesToRead; byte++) {
12145 if (ddi_copyout(pData + byte, ioctl_buf + byte, 1, ioctl_mode)
12146 != 0) {
12147 return (DDI_FAILURE);
12148 }
12149 }
12150 diag_read_buffer->Status = 0;
12151
12152 /*
12153 * Set or clear the Force Release flag.
12154 */
12155 if (pBuffer->force_release) {
12156 diag_read_buffer->Flags |= MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
12157 } else {
12158 diag_read_buffer->Flags &= ~MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
12159 }
12160
12161 /*
12162 * If buffer is to be reregistered, make sure it's not already owned by
12163 * firmware first.
12164 */
12165 status = DDI_SUCCESS;
12166 if (!pBuffer->owned_by_firmware) {
12167 if (diag_read_buffer->Flags & MPTSAS_FW_DIAG_FLAG_REREGISTER) {
12168 status = mptsas_post_fw_diag_buffer(mpt, pBuffer,
12169 return_code);
12170 }
12171 }
12172
12173 return (status);
12174 }
12175
12176 static int
12177 mptsas_diag_release(mptsas_t *mpt, mptsas_fw_diag_release_t *diag_release,
12178 uint32_t *return_code)
12179 {
12180 mptsas_fw_diagnostic_buffer_t *pBuffer;
12181 uint8_t i;
12182 uint32_t unique_id;
12183 int status;
12184
12185 ASSERT(mutex_owned(&mpt->m_mutex));
12186
12187 unique_id = diag_release->UniqueId;
12188
12189 /*
12190 * Get the current buffer and look up the unique ID. The unique ID
12191 * should be there.
12192 */
12193 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
12194 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
12195 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12196 return (DDI_FAILURE);
12197 }
12198
12199 pBuffer = &mpt->m_fw_diag_buffer_list[i];
12200
12201 /*
12202 * If buffer is not owned by firmware, it's already been released.
12203 */
12204 if (!pBuffer->owned_by_firmware) {
12205 *return_code = MPTSAS_FW_DIAG_ERROR_ALREADY_RELEASED;
12206 return (DDI_FAILURE);
12207 }
12208
12209 /*
12210 * Release the buffer.
12211 */
12212 status = mptsas_release_fw_diag_buffer(mpt, pBuffer, return_code,
12213 MPTSAS_FW_DIAG_TYPE_RELEASE);
12214 return (status);
12215 }
12216
12217 static int
12218 mptsas_do_diag_action(mptsas_t *mpt, uint32_t action, uint8_t *diag_action,
12219 uint32_t length, uint32_t *return_code, int ioctl_mode)
12220 {
12221 mptsas_fw_diag_register_t diag_register;
12222 mptsas_fw_diag_unregister_t diag_unregister;
12223 mptsas_fw_diag_query_t diag_query;
12224 mptsas_diag_read_buffer_t diag_read_buffer;
12225 mptsas_fw_diag_release_t diag_release;
12226 int status = DDI_SUCCESS;
12227 uint32_t original_return_code, read_buf_len;
12228
12229 ASSERT(mutex_owned(&mpt->m_mutex));
12230
12231 original_return_code = *return_code;
12232 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
12233
12234 switch (action) {
12235 case MPTSAS_FW_DIAG_TYPE_REGISTER:
12236 if (!length) {
12237 *return_code =
12238 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12239 status = DDI_FAILURE;
12240 break;
12241 }
12242 if (ddi_copyin(diag_action, &diag_register,
12243 sizeof (diag_register), ioctl_mode) != 0) {
12244 return (DDI_FAILURE);
12245 }
12246 status = mptsas_diag_register(mpt, &diag_register,
12247 return_code);
12248 break;
12249
12250 case MPTSAS_FW_DIAG_TYPE_UNREGISTER:
12251 if (length < sizeof (diag_unregister)) {
12252 *return_code =
12253 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12254 status = DDI_FAILURE;
12255 break;
12256 }
12257 if (ddi_copyin(diag_action, &diag_unregister,
12258 sizeof (diag_unregister), ioctl_mode) != 0) {
12259 return (DDI_FAILURE);
12260 }
12261 status = mptsas_diag_unregister(mpt, &diag_unregister,
12262 return_code);
12263 break;
12264
12265 case MPTSAS_FW_DIAG_TYPE_QUERY:
12266 if (length < sizeof (diag_query)) {
12267 *return_code =
12268 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12269 status = DDI_FAILURE;
12270 break;
12271 }
12272 if (ddi_copyin(diag_action, &diag_query,
12273 sizeof (diag_query), ioctl_mode) != 0) {
12274 return (DDI_FAILURE);
12275 }
12276 status = mptsas_diag_query(mpt, &diag_query,
12277 return_code);
12278 if (status == DDI_SUCCESS) {
12279 if (ddi_copyout(&diag_query, diag_action,
12280 sizeof (diag_query), ioctl_mode) != 0) {
12281 return (DDI_FAILURE);
12282 }
12283 }
12284 break;
12285
12286 case MPTSAS_FW_DIAG_TYPE_READ_BUFFER:
12287 if (ddi_copyin(diag_action, &diag_read_buffer,
12288 sizeof (diag_read_buffer) - 4, ioctl_mode) != 0) {
12289 return (DDI_FAILURE);
12290 }
12291 read_buf_len = sizeof (diag_read_buffer) -
12292 sizeof (diag_read_buffer.DataBuffer) +
12293 diag_read_buffer.BytesToRead;
12294 if (length < read_buf_len) {
12295 *return_code =
12296 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12297 status = DDI_FAILURE;
12298 break;
12299 }
12300 status = mptsas_diag_read_buffer(mpt,
12301 &diag_read_buffer, diag_action +
12302 sizeof (diag_read_buffer) - 4, return_code,
12303 ioctl_mode);
12304 if (status == DDI_SUCCESS) {
12305 if (ddi_copyout(&diag_read_buffer, diag_action,
12306 sizeof (diag_read_buffer) - 4, ioctl_mode)
12307 != 0) {
12308 return (DDI_FAILURE);
12309 }
12310 }
12311 break;
12312
12313 case MPTSAS_FW_DIAG_TYPE_RELEASE:
12314 if (length < sizeof (diag_release)) {
12315 *return_code =
12316 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12317 status = DDI_FAILURE;
12318 break;
12319 }
12320 if (ddi_copyin(diag_action, &diag_release,
12321 sizeof (diag_release), ioctl_mode) != 0) {
12322 return (DDI_FAILURE);
12323 }
12324 status = mptsas_diag_release(mpt, &diag_release,
12325 return_code);
12326 break;
12327
12328 default:
12329 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12330 status = DDI_FAILURE;
12331 break;
12332 }
12333
12334 if ((status == DDI_FAILURE) &&
12335 (original_return_code == MPTSAS_FW_DIAG_NEW) &&
12336 (*return_code != MPTSAS_FW_DIAG_ERROR_SUCCESS)) {
12337 status = DDI_SUCCESS;
12338 }
12339
12340 return (status);
12341 }
12342
12343 static int
12344 mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *user_data, int mode)
12345 {
12346 int status;
12347 mptsas_diag_action_t driver_data;
12348
12349 ASSERT(mutex_owned(&mpt->m_mutex));
12350
12351 /*
12352 * Copy the user data to a driver data buffer.
12353 */
12354 if (ddi_copyin(user_data, &driver_data, sizeof (mptsas_diag_action_t),
12355 mode) == 0) {
12356 /*
12357 * Send diag action request if Action is valid
12358 */
12359 if (driver_data.Action == MPTSAS_FW_DIAG_TYPE_REGISTER ||
12360 driver_data.Action == MPTSAS_FW_DIAG_TYPE_UNREGISTER ||
12361 driver_data.Action == MPTSAS_FW_DIAG_TYPE_QUERY ||
12362 driver_data.Action == MPTSAS_FW_DIAG_TYPE_READ_BUFFER ||
12363 driver_data.Action == MPTSAS_FW_DIAG_TYPE_RELEASE) {
12364 status = mptsas_do_diag_action(mpt, driver_data.Action,
12365 (void *)(uintptr_t)driver_data.PtrDiagAction,
12366 driver_data.Length, &driver_data.ReturnCode,
12367 mode);
12368 if (status == DDI_SUCCESS) {
12369 if (ddi_copyout(&driver_data.ReturnCode,
12370 &user_data->ReturnCode,
12371 sizeof (user_data->ReturnCode), mode)
12372 != 0) {
12373 status = EFAULT;
12374 } else {
12375 status = 0;
12376 }
12377 } else {
12378 status = EIO;
12379 }
12380 } else {
12381 status = EINVAL;
12382 }
12383 } else {
12384 status = EFAULT;
12385 }
12386
12387 return (status);
12388 }
12389
12390 /*
12391 * This routine handles the "event query" ioctl.
12392 */
12393 static int
12394 mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, int mode,
12395 int *rval)
12396 {
12397 int status;
12398 mptsas_event_query_t driverdata;
12399 uint8_t i;
12400
12401 driverdata.Entries = MPTSAS_EVENT_QUEUE_SIZE;
12402
12403 mutex_enter(&mpt->m_mutex);
12404 for (i = 0; i < 4; i++) {
12405 driverdata.Types[i] = mpt->m_event_mask[i];
12406 }
12407 mutex_exit(&mpt->m_mutex);
12408
12409 if (ddi_copyout(&driverdata, data, sizeof (driverdata), mode) != 0) {
12410 status = EFAULT;
12411 } else {
12412 *rval = MPTIOCTL_STATUS_GOOD;
12413 status = 0;
12414 }
12415
12416 return (status);
12417 }
12418
12419 /*
12420 * This routine handles the "event enable" ioctl.
12421 */
12422 static int
12423 mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, int mode,
12424 int *rval)
12425 {
12426 int status;
12427 mptsas_event_enable_t driverdata;
12428 uint8_t i;
12429
12430 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
12431 mutex_enter(&mpt->m_mutex);
12432 for (i = 0; i < 4; i++) {
12433 mpt->m_event_mask[i] = driverdata.Types[i];
12434 }
12435 mutex_exit(&mpt->m_mutex);
12436
12437 *rval = MPTIOCTL_STATUS_GOOD;
12438 status = 0;
12439 } else {
12440 status = EFAULT;
12441 }
12442 return (status);
12443 }
12444
12445 /*
12446 * This routine handles the "event report" ioctl.
12447 */
12448 static int
12449 mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, int mode,
12450 int *rval)
12451 {
12452 int status;
12453 mptsas_event_report_t driverdata;
12454
12455 mutex_enter(&mpt->m_mutex);
12456
12457 if (ddi_copyin(&data->Size, &driverdata.Size, sizeof (driverdata.Size),
12458 mode) == 0) {
12459 if (driverdata.Size >= sizeof (mpt->m_events)) {
12460 if (ddi_copyout(mpt->m_events, data->Events,
12461 sizeof (mpt->m_events), mode) != 0) {
12462 status = EFAULT;
12463 } else {
12464 if (driverdata.Size > sizeof (mpt->m_events)) {
12465 driverdata.Size =
12466 sizeof (mpt->m_events);
12467 if (ddi_copyout(&driverdata.Size,
12468 &data->Size,
12469 sizeof (driverdata.Size),
12470 mode) != 0) {
12471 status = EFAULT;
12472 } else {
12473 *rval = MPTIOCTL_STATUS_GOOD;
12474 status = 0;
12475 }
12476 } else {
12477 *rval = MPTIOCTL_STATUS_GOOD;
12478 status = 0;
12479 }
12480 }
12481 } else {
12482 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
12483 status = 0;
12484 }
12485 } else {
12486 status = EFAULT;
12487 }
12488
12489 mutex_exit(&mpt->m_mutex);
12490 return (status);
12491 }
12492
12493 static void
12494 mptsas_lookup_pci_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
12495 {
12496 int *reg_data;
12497 uint_t reglen;
12498
12499 /*
12500 * Lookup the 'reg' property and extract the other data
12501 */
12502 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
12503 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
12504 DDI_PROP_SUCCESS) {
12505 /*
12506 * Extract the PCI data from the 'reg' property first DWORD.
12507 * The entry looks like the following:
12508 * First DWORD:
12509 * Bits 0 - 7 8-bit Register number
12510 * Bits 8 - 10 3-bit Function number
12511 * Bits 11 - 15 5-bit Device number
12512 * Bits 16 - 23 8-bit Bus number
12513 * Bits 24 - 25 2-bit Address Space type identifier
12514 *
12515 */
12516 adapter_data->PciInformation.u.bits.BusNumber =
12517 (reg_data[0] & 0x00FF0000) >> 16;
12518 adapter_data->PciInformation.u.bits.DeviceNumber =
12519 (reg_data[0] & 0x0000F800) >> 11;
12520 adapter_data->PciInformation.u.bits.FunctionNumber =
12521 (reg_data[0] & 0x00000700) >> 8;
12522 ddi_prop_free((void *)reg_data);
12523 } else {
12524 /*
12525 * If we can't determine the PCI data then we fill in FF's for
12526 * the data to indicate this.
12527 */
12528 adapter_data->PCIDeviceHwId = 0xFFFFFFFF;
12529 adapter_data->MpiPortNumber = 0xFFFFFFFF;
12530 adapter_data->PciInformation.u.AsDWORD = 0xFFFFFFFF;
12531 }
12532
12533 /*
12534 * Saved in the mpt->m_fwversion
12535 */
12536 adapter_data->MpiFirmwareVersion = mpt->m_fwversion;
12537 }
12538
12539 static void
12540 mptsas_read_adapter_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
12541 {
12542 char *driver_verstr = MPTSAS_MOD_STRING;
12543
12544 mptsas_lookup_pci_data(mpt, adapter_data);
12545 adapter_data->AdapterType = mpt->m_MPI25 ?
12546 MPTIOCTL_ADAPTER_TYPE_SAS3 :
12547 MPTIOCTL_ADAPTER_TYPE_SAS2;
12548 adapter_data->PCIDeviceHwId = (uint32_t)mpt->m_devid;
12549 adapter_data->PCIDeviceHwRev = (uint32_t)mpt->m_revid;
12550 adapter_data->SubSystemId = (uint32_t)mpt->m_ssid;
12551 adapter_data->SubsystemVendorId = (uint32_t)mpt->m_svid;
12552 (void) strcpy((char *)&adapter_data->DriverVersion[0], driver_verstr);
12553 adapter_data->BiosVersion = 0;
12554 (void) mptsas_get_bios_page3(mpt, &adapter_data->BiosVersion);
12555 }
12556
12557 static void
12558 mptsas_read_pci_info(mptsas_t *mpt, mptsas_pci_info_t *pci_info)
12559 {
12560 int *reg_data, i;
12561 uint_t reglen;
12562
12563 /*
12564 * Lookup the 'reg' property and extract the other data
12565 */
12566 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
12567 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
12568 DDI_PROP_SUCCESS) {
12569 /*
12570 * Extract the PCI data from the 'reg' property first DWORD.
12571 * The entry looks like the following:
12572 * First DWORD:
12573 * Bits 8 - 10 3-bit Function number
12574 * Bits 11 - 15 5-bit Device number
12575 * Bits 16 - 23 8-bit Bus number
12576 */
12577 pci_info->BusNumber = (reg_data[0] & 0x00FF0000) >> 16;
12578 pci_info->DeviceNumber = (reg_data[0] & 0x0000F800) >> 11;
12579 pci_info->FunctionNumber = (reg_data[0] & 0x00000700) >> 8;
12580 ddi_prop_free((void *)reg_data);
12581 } else {
12582 /*
12583 * If we can't determine the PCI info then we fill in FF's for
12584 * the data to indicate this.
12585 */
12586 pci_info->BusNumber = 0xFFFFFFFF;
12587 pci_info->DeviceNumber = 0xFF;
12588 pci_info->FunctionNumber = 0xFF;
12589 }
12590
12591 /*
12592 * Now get the interrupt vector and the pci header. The vector can
12593 * only be 0 right now. The header is the first 256 bytes of config
12594 * space.
12595 */
12596 pci_info->InterruptVector = 0;
12597 for (i = 0; i < sizeof (pci_info->PciHeader); i++) {
12598 pci_info->PciHeader[i] = pci_config_get8(mpt->m_config_handle,
12599 i);
12600 }
12601 }
12602
12603 static int
12604 mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data, int mode)
12605 {
12606 int status = 0;
12607 mptsas_reg_access_t driverdata;
12608
12609 mutex_enter(&mpt->m_mutex);
12610 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
12611 switch (driverdata.Command) {
12612 /*
12613 * IO access is not supported.
12614 */
12615 case REG_IO_READ:
12616 case REG_IO_WRITE:
12617 mptsas_log(mpt, CE_WARN, "IO access is not "
12618 "supported. Use memory access.");
12619 status = EINVAL;
12620 break;
12621
12622 case REG_MEM_READ:
12623 driverdata.RegData = ddi_get32(mpt->m_datap,
12624 (uint32_t *)(void *)mpt->m_reg +
12625 driverdata.RegOffset);
12626 if (ddi_copyout(&driverdata.RegData,
12627 &data->RegData,
12628 sizeof (driverdata.RegData), mode) != 0) {
12629 mptsas_log(mpt, CE_WARN, "Register "
12630 "Read Failed");
12631 status = EFAULT;
12632 }
12633 break;
12634
12635 case REG_MEM_WRITE:
12636 ddi_put32(mpt->m_datap,
12637 (uint32_t *)(void *)mpt->m_reg +
12638 driverdata.RegOffset,
12639 driverdata.RegData);
12640 break;
12641
12642 default:
12643 status = EINVAL;
12644 break;
12645 }
12646 } else {
12647 status = EFAULT;
12648 }
12649
12650 mutex_exit(&mpt->m_mutex);
12651 return (status);
12652 }
12653
12654 static int
12655 led_control(mptsas_t *mpt, intptr_t data, int mode)
12656 {
12657 int ret = 0;
12658 mptsas_led_control_t lc;
12659 mptsas_enclosure_t *mep;
12660 uint16_t slotidx;
12661
12662 if (ddi_copyin((void *)data, &lc, sizeof (lc), mode) != 0) {
12663 return (EFAULT);
12664 }
12665
12666 if ((lc.Command != MPTSAS_LEDCTL_FLAG_SET &&
12667 lc.Command != MPTSAS_LEDCTL_FLAG_GET) ||
12668 lc.Led < MPTSAS_LEDCTL_LED_MIN ||
12669 lc.Led > MPTSAS_LEDCTL_LED_MAX ||
12670 (lc.Command == MPTSAS_LEDCTL_FLAG_SET && lc.LedStatus != 0 &&
12671 lc.LedStatus != 1)) {
12672 return (EINVAL);
12673 }
12674
12675 if ((lc.Command == MPTSAS_LEDCTL_FLAG_SET && (mode & FWRITE) == 0) ||
12676 (lc.Command == MPTSAS_LEDCTL_FLAG_GET && (mode & FREAD) == 0))
12677 return (EACCES);
12678
12679 /* Locate the required enclosure */
12680 mutex_enter(&mpt->m_mutex);
12681 mep = mptsas_enc_lookup(mpt, lc.Enclosure);
12682 if (mep == NULL) {
12683 mutex_exit(&mpt->m_mutex);
12684 return (ENOENT);
12685 }
12686
12687 if (lc.Slot < mep->me_fslot) {
12688 mutex_exit(&mpt->m_mutex);
12689 return (ENOENT);
12690 }
12691
12692 /*
12693 * Slots on the enclosure are maintained in array where me_fslot is
12694 * entry zero. We normalize the requested slot.
12695 */
12696 slotidx = lc.Slot - mep->me_fslot;
12697 if (slotidx >= mep->me_nslots) {
12698 mutex_exit(&mpt->m_mutex);
12699 return (ENOENT);
12700 }
12701
12702 if (lc.Command == MPTSAS_LEDCTL_FLAG_SET) {
12703 /* Update our internal LED state. */
12704 mep->me_slotleds[slotidx] &= ~(1 << (lc.Led - 1));
12705 mep->me_slotleds[slotidx] |= lc.LedStatus << (lc.Led - 1);
12706
12707 /* Flush it to the controller. */
12708 ret = mptsas_flush_led_status(mpt, mep, slotidx);
12709 mutex_exit(&mpt->m_mutex);
12710 return (ret);
12711 }
12712
12713 /* Return our internal LED state. */
12714 lc.LedStatus = (mep->me_slotleds[slotidx] >> (lc.Led - 1)) & 1;
12715 mutex_exit(&mpt->m_mutex);
12716
12717 if (ddi_copyout(&lc, (void *)data, sizeof (lc), mode) != 0) {
12718 return (EFAULT);
12719 }
12720
12721 return (0);
12722 }
12723
12724 static int
12725 get_disk_info(mptsas_t *mpt, intptr_t data, int mode)
12726 {
12727 uint16_t i = 0;
12728 uint16_t count = 0;
12729 int ret = 0;
12730 mptsas_target_t *ptgt;
12731 mptsas_disk_info_t *di;
12732 STRUCT_DECL(mptsas_get_disk_info, gdi);
12733
12734 if ((mode & FREAD) == 0)
12735 return (EACCES);
12736
12737 STRUCT_INIT(gdi, get_udatamodel());
12738
12739 if (ddi_copyin((void *)data, STRUCT_BUF(gdi), STRUCT_SIZE(gdi),
12740 mode) != 0) {
12741 return (EFAULT);
12742 }
12743
12744 /* Find out how many targets there are. */
12745 mutex_enter(&mpt->m_mutex);
12746 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12747 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12748 count++;
12749 }
12750 mutex_exit(&mpt->m_mutex);
12751
12752 /*
12753 * If we haven't been asked to copy out information on each target,
12754 * then just return the count.
12755 */
12756 STRUCT_FSET(gdi, DiskCount, count);
12757 if (STRUCT_FGETP(gdi, PtrDiskInfoArray) == NULL)
12758 goto copy_out;
12759
12760 /*
12761 * If we haven't been given a large enough buffer to copy out into,
12762 * let the caller know.
12763 */
12764 if (STRUCT_FGET(gdi, DiskInfoArraySize) <
12765 count * sizeof (mptsas_disk_info_t)) {
12766 ret = ENOSPC;
12767 goto copy_out;
12768 }
12769
12770 di = kmem_zalloc(count * sizeof (mptsas_disk_info_t), KM_SLEEP);
12771
12772 mutex_enter(&mpt->m_mutex);
12773 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12774 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12775 if (i >= count) {
12776 /*
12777 * The number of targets changed while we weren't
12778 * looking, so give up.
12779 */
12780 refhash_rele(mpt->m_targets, ptgt);
12781 mutex_exit(&mpt->m_mutex);
12782 kmem_free(di, count * sizeof (mptsas_disk_info_t));
12783 return (EAGAIN);
12784 }
12785 di[i].Instance = mpt->m_instance;
12786 di[i].Enclosure = ptgt->m_enclosure;
12787 di[i].Slot = ptgt->m_slot_num;
12788 di[i].SasAddress = ptgt->m_addr.mta_wwn;
12789 i++;
12790 }
12791 mutex_exit(&mpt->m_mutex);
12792 STRUCT_FSET(gdi, DiskCount, i);
12793
12794 /* Copy out the disk information to the caller. */
12795 if (ddi_copyout((void *)di, STRUCT_FGETP(gdi, PtrDiskInfoArray),
12796 i * sizeof (mptsas_disk_info_t), mode) != 0) {
12797 ret = EFAULT;
12798 }
12799
12800 kmem_free(di, count * sizeof (mptsas_disk_info_t));
12801
12802 copy_out:
12803 if (ddi_copyout(STRUCT_BUF(gdi), (void *)data, STRUCT_SIZE(gdi),
12804 mode) != 0) {
12805 ret = EFAULT;
12806 }
12807
12808 return (ret);
12809 }
12810
12811 static int
12812 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
12813 int *rval)
12814 {
12815 int status = 0;
12816 mptsas_t *mpt;
12817 mptsas_update_flash_t flashdata;
12818 mptsas_pass_thru_t passthru_data;
12819 mptsas_adapter_data_t adapter_data;
12820 mptsas_pci_info_t pci_info;
12821 int copylen;
12822
12823 int iport_flag = 0;
12824 dev_info_t *dip = NULL;
12825 mptsas_phymask_t phymask = 0;
12826 struct devctl_iocdata *dcp = NULL;
12827 char *addr = NULL;
12828 mptsas_target_t *ptgt = NULL;
12829
12830 *rval = MPTIOCTL_STATUS_GOOD;
12831 if (secpolicy_sys_config(credp, B_FALSE) != 0) {
12832 return (EPERM);
12833 }
12834
12835 mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
12836 if (mpt == NULL) {
12837 /*
12838 * Called from iport node, get the states
12839 */
12840 iport_flag = 1;
12841 dip = mptsas_get_dip_from_dev(dev, &phymask);
12842 if (dip == NULL) {
12843 return (ENXIO);
12844 }
12845 mpt = DIP2MPT(dip);
12846 }
12847 /* Make sure power level is D0 before accessing registers */
12848 mutex_enter(&mpt->m_mutex);
12849 if (mpt->m_options & MPTSAS_OPT_PM) {
12850 (void) pm_busy_component(mpt->m_dip, 0);
12851 if (mpt->m_power_level != PM_LEVEL_D0) {
12852 mutex_exit(&mpt->m_mutex);
12853 if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
12854 DDI_SUCCESS) {
12855 mptsas_log(mpt, CE_WARN,
12856 "mptsas%d: mptsas_ioctl: Raise power "
12857 "request failed.", mpt->m_instance);
12858 (void) pm_idle_component(mpt->m_dip, 0);
12859 return (ENXIO);
12860 }
12861 } else {
12862 mutex_exit(&mpt->m_mutex);
12863 }
12864 } else {
12865 mutex_exit(&mpt->m_mutex);
12866 }
12867
12868 if (iport_flag) {
12869 status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
12870 if (status != 0) {
12871 goto out;
12872 }
12873 /*
12874 * The following code control the OK2RM LED, it doesn't affect
12875 * the ioctl return status.
12876 */
12877 if ((cmd == DEVCTL_DEVICE_ONLINE) ||
12878 (cmd == DEVCTL_DEVICE_OFFLINE)) {
12879 if (ndi_dc_allochdl((void *)data, &dcp) !=
12880 NDI_SUCCESS) {
12881 goto out;
12882 }
12883 addr = ndi_dc_getaddr(dcp);
12884 ptgt = mptsas_addr_to_ptgt(mpt, addr, phymask);
12885 if (ptgt == NULL) {
12886 NDBG14(("mptsas_ioctl led control: tgt %s not "
12887 "found", addr));
12888 ndi_dc_freehdl(dcp);
12889 goto out;
12890 }
12891 ndi_dc_freehdl(dcp);
12892 }
12893 goto out;
12894 }
12895 switch (cmd) {
12896 case MPTIOCTL_GET_DISK_INFO:
12897 status = get_disk_info(mpt, data, mode);
12898 break;
12899 case MPTIOCTL_LED_CONTROL:
12900 status = led_control(mpt, data, mode);
12901 break;
12902 case MPTIOCTL_UPDATE_FLASH:
12903 if (ddi_copyin((void *)data, &flashdata,
12904 sizeof (struct mptsas_update_flash), mode)) {
12905 status = EFAULT;
12906 break;
12907 }
12908
12909 mutex_enter(&mpt->m_mutex);
12910 if (mptsas_update_flash(mpt,
12911 (caddr_t)(long)flashdata.PtrBuffer,
12912 flashdata.ImageSize, flashdata.ImageType, mode)) {
12913 status = EFAULT;
12914 }
12915
12916 /*
12917 * Reset the chip to start using the new
12918 * firmware. Reset if failed also.
12919 */
12920 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12921 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
12922 status = EFAULT;
12923 }
12924 mutex_exit(&mpt->m_mutex);
12925 break;
12926 case MPTIOCTL_PASS_THRU:
12927 /*
12928 * The user has requested to pass through a command to
12929 * be executed by the MPT firmware. Call our routine
12930 * which does this. Only allow one passthru IOCTL at
12931 * one time. Other threads will block on
12932 * m_passthru_mutex, which is of adaptive variant.
12933 */
12934 if (ddi_copyin((void *)data, &passthru_data,
12935 sizeof (mptsas_pass_thru_t), mode)) {
12936 status = EFAULT;
12937 break;
12938 }
12939 mutex_enter(&mpt->m_passthru_mutex);
12940 mutex_enter(&mpt->m_mutex);
12941 status = mptsas_pass_thru(mpt, &passthru_data, mode);
12942 mutex_exit(&mpt->m_mutex);
12943 mutex_exit(&mpt->m_passthru_mutex);
12944
12945 break;
12946 case MPTIOCTL_GET_ADAPTER_DATA:
12947 /*
12948 * The user has requested to read adapter data. Call
12949 * our routine which does this.
12950 */
12951 bzero(&adapter_data, sizeof (mptsas_adapter_data_t));
12952 if (ddi_copyin((void *)data, (void *)&adapter_data,
12953 sizeof (mptsas_adapter_data_t), mode)) {
12954 status = EFAULT;
12955 break;
12956 }
12957 if (adapter_data.StructureLength >=
12958 sizeof (mptsas_adapter_data_t)) {
12959 adapter_data.StructureLength = (uint32_t)
12960 sizeof (mptsas_adapter_data_t);
12961 copylen = sizeof (mptsas_adapter_data_t);
12962 mutex_enter(&mpt->m_mutex);
12963 mptsas_read_adapter_data(mpt, &adapter_data);
12964 mutex_exit(&mpt->m_mutex);
12965 } else {
12966 adapter_data.StructureLength = (uint32_t)
12967 sizeof (mptsas_adapter_data_t);
12968 copylen = sizeof (adapter_data.StructureLength);
12969 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
12970 }
12971 if (ddi_copyout((void *)(&adapter_data), (void *)data,
12972 copylen, mode) != 0) {
12973 status = EFAULT;
12974 }
12975 break;
12976 case MPTIOCTL_GET_PCI_INFO:
12977 /*
12978 * The user has requested to read pci info. Call
12979 * our routine which does this.
12980 */
12981 bzero(&pci_info, sizeof (mptsas_pci_info_t));
12982 mutex_enter(&mpt->m_mutex);
12983 mptsas_read_pci_info(mpt, &pci_info);
12984 mutex_exit(&mpt->m_mutex);
12985 if (ddi_copyout((void *)(&pci_info), (void *)data,
12986 sizeof (mptsas_pci_info_t), mode) != 0) {
12987 status = EFAULT;
12988 }
12989 break;
12990 case MPTIOCTL_RESET_ADAPTER:
12991 mutex_enter(&mpt->m_mutex);
12992 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12993 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
12994 mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
12995 "failed");
12996 status = EFAULT;
12997 }
12998 mutex_exit(&mpt->m_mutex);
12999 break;
13000 case MPTIOCTL_DIAG_ACTION:
13001 /*
13002 * The user has done a diag buffer action. Call our
13003 * routine which does this. Only allow one diag action
13004 * at one time.
13005 */
13006 mutex_enter(&mpt->m_mutex);
13007 if (mpt->m_diag_action_in_progress) {
13008 mutex_exit(&mpt->m_mutex);
13009 return (EBUSY);
13010 }
13011 mpt->m_diag_action_in_progress = 1;
13012 status = mptsas_diag_action(mpt,
13013 (mptsas_diag_action_t *)data, mode);
13014 mpt->m_diag_action_in_progress = 0;
13015 mutex_exit(&mpt->m_mutex);
13016 break;
13017 case MPTIOCTL_EVENT_QUERY:
13018 /*
13019 * The user has done an event query. Call our routine
13020 * which does this.
13021 */
13022 status = mptsas_event_query(mpt,
13023 (mptsas_event_query_t *)data, mode, rval);
13024 break;
13025 case MPTIOCTL_EVENT_ENABLE:
13026 /*
13027 * The user has done an event enable. Call our routine
13028 * which does this.
13029 */
13030 status = mptsas_event_enable(mpt,
13031 (mptsas_event_enable_t *)data, mode, rval);
13032 break;
13033 case MPTIOCTL_EVENT_REPORT:
13034 /*
13035 * The user has done an event report. Call our routine
13036 * which does this.
13037 */
13038 status = mptsas_event_report(mpt,
13039 (mptsas_event_report_t *)data, mode, rval);
13040 break;
13041 case MPTIOCTL_REG_ACCESS:
13042 /*
13043 * The user has requested register access. Call our
13044 * routine which does this.
13045 */
13046 status = mptsas_reg_access(mpt,
13047 (mptsas_reg_access_t *)data, mode);
13048 break;
13049 default:
13050 status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
13051 rval);
13052 break;
13053 }
13054
13055 out:
13056 return (status);
13057 }
13058
13059 int
13060 mptsas_restart_ioc(mptsas_t *mpt)
13061 {
13062 int rval = DDI_SUCCESS;
13063 mptsas_target_t *ptgt = NULL;
13064
13065 ASSERT(mutex_owned(&mpt->m_mutex));
13066
13067 /*
13068 * Set a flag telling I/O path that we're processing a reset. This is
13069 * needed because after the reset is complete, the hash table still
13070 * needs to be rebuilt. If I/Os are started before the hash table is
13071 * rebuilt, I/O errors will occur. This flag allows I/Os to be marked
13072 * so that they can be retried.
13073 */
13074 mpt->m_in_reset = TRUE;
13075
13076 /*
13077 * Wait until all the allocated sense data buffers for DMA are freed.
13078 */
13079 while (mpt->m_extreq_sense_refcount > 0)
13080 cv_wait(&mpt->m_extreq_sense_refcount_cv, &mpt->m_mutex);
13081
13082 /*
13083 * Set all throttles to HOLD
13084 */
13085 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
13086 ptgt = refhash_next(mpt->m_targets, ptgt)) {
13087 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
13088 }
13089
13090 /*
13091 * Disable interrupts
13092 */
13093 MPTSAS_DISABLE_INTR(mpt);
13094
13095 /*
13096 * Abort all commands: outstanding commands, commands in waitq and
13097 * tx_waitq.
13098 */
13099 mptsas_flush_hba(mpt);
13100
13101 /*
13102 * Reinitialize the chip.
13103 */
13104 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
13105 rval = DDI_FAILURE;
13106 }
13107
13108 /*
13109 * Enable interrupts again
13110 */
13111 MPTSAS_ENABLE_INTR(mpt);
13112
13113 /*
13114 * If mptsas_init_chip was successful, update the driver data.
13115 */
13116 if (rval == DDI_SUCCESS) {
13117 mptsas_update_driver_data(mpt);
13118 }
13119
13120 /*
13121 * Reset the throttles
13122 */
13123 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
13124 ptgt = refhash_next(mpt->m_targets, ptgt)) {
13125 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
13126 }
13127
13128 mptsas_doneq_empty(mpt);
13129 mptsas_restart_hba(mpt);
13130
13131 if (rval != DDI_SUCCESS) {
13132 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
13133 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
13134 }
13135
13136 /*
13137 * Clear the reset flag so that I/Os can continue.
13138 */
13139 mpt->m_in_reset = FALSE;
13140
13141 return (rval);
13142 }
13143
13144 static int
13145 mptsas_init_chip(mptsas_t *mpt, int first_time)
13146 {
13147 ddi_dma_cookie_t cookie;
13148 uint32_t i;
13149 int rval;
13150
13151 /*
13152 * Check to see if the firmware image is valid
13153 */
13154 if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) &
13155 MPI2_DIAG_FLASH_BAD_SIG) {
13156 mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
13157 goto fail;
13158 }
13159
13160 /*
13161 * Reset the chip
13162 */
13163 rval = mptsas_ioc_reset(mpt, first_time);
13164 if (rval == MPTSAS_RESET_FAIL) {
13165 mptsas_log(mpt, CE_WARN, "hard reset failed!");
13166 goto fail;
13167 }
13168
13169 if ((rval == MPTSAS_SUCCESS_MUR) && (!first_time)) {
13170 goto mur;
13171 }
13172 /*
13173 * Setup configuration space
13174 */
13175 if (mptsas_config_space_init(mpt) == FALSE) {
13176 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
13177 "failed!");
13178 goto fail;
13179 }
13180
13181 /*
13182 * IOC facts can change after a diag reset so all buffers that are
13183 * based on these numbers must be de-allocated and re-allocated. Get
13184 * new IOC facts each time chip is initialized.
13185 */
13186 if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
13187 mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed");
13188 goto fail;
13189 }
13190
13191 if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
13192 goto fail;
13193 }
13194 /*
13195 * Allocate request message frames, reply free queue, reply descriptor
13196 * post queue, and reply message frames using latest IOC facts.
13197 */
13198 if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
13199 mptsas_log(mpt, CE_WARN, "mptsas_alloc_request_frames failed");
13200 goto fail;
13201 }
13202 if (mptsas_alloc_sense_bufs(mpt) == DDI_FAILURE) {
13203 mptsas_log(mpt, CE_WARN, "mptsas_alloc_sense_bufs failed");
13204 goto fail;
13205 }
13206 if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
13207 mptsas_log(mpt, CE_WARN, "mptsas_alloc_free_queue failed!");
13208 goto fail;
13209 }
13210 if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
13211 mptsas_log(mpt, CE_WARN, "mptsas_alloc_post_queue failed!");
13212 goto fail;
13213 }
13214 if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
13215 mptsas_log(mpt, CE_WARN, "mptsas_alloc_reply_frames failed!");
13216 goto fail;
13217 }
13218
13219 mur:
13220 /*
13221 * Re-Initialize ioc to operational state
13222 */
13223 if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
13224 mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
13225 goto fail;
13226 }
13227
13228 mptsas_alloc_reply_args(mpt);
13229
13230 /*
13231 * Initialize reply post index. Reply free index is initialized after
13232 * the next loop.
13233 */
13234 mpt->m_post_index = 0;
13235
13236 /*
13237 * Initialize the Reply Free Queue with the physical addresses of our
13238 * reply frames.
13239 */
13240 cookie.dmac_address = mpt->m_reply_frame_dma_addr & 0xffffffffu;
13241 for (i = 0; i < mpt->m_max_replies; i++) {
13242 ddi_put32(mpt->m_acc_free_queue_hdl,
13243 &((uint32_t *)(void *)mpt->m_free_queue)[i],
13244 cookie.dmac_address);
13245 cookie.dmac_address += mpt->m_reply_frame_size;
13246 }
13247 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
13248 DDI_DMA_SYNC_FORDEV);
13249
13250 /*
13251 * Initialize the reply free index to one past the last frame on the
13252 * queue. This will signify that the queue is empty to start with.
13253 */
13254 mpt->m_free_index = i;
13255 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, i);
13256
13257 /*
13258 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
13259 */
13260 for (i = 0; i < mpt->m_post_queue_depth; i++) {
13261 ddi_put64(mpt->m_acc_post_queue_hdl,
13262 &((uint64_t *)(void *)mpt->m_post_queue)[i],
13263 0xFFFFFFFFFFFFFFFF);
13264 }
13265 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
13266 DDI_DMA_SYNC_FORDEV);
13267
13268 /*
13269 * Enable ports
13270 */
13271 if (mptsas_ioc_enable_port(mpt) == DDI_FAILURE) {
13272 mptsas_log(mpt, CE_WARN, "mptsas_ioc_enable_port failed");
13273 goto fail;
13274 }
13275
13276 /*
13277 * enable events
13278 */
13279 if (mptsas_ioc_enable_event_notification(mpt)) {
13280 mptsas_log(mpt, CE_WARN,
13281 "mptsas_ioc_enable_event_notification failed");
13282 goto fail;
13283 }
13284
13285 /*
13286 * We need checks in attach and these.
13287 * chip_init is called in mult. places
13288 */
13289
13290 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
13291 DDI_SUCCESS) ||
13292 (mptsas_check_dma_handle(mpt->m_dma_req_sense_hdl) !=
13293 DDI_SUCCESS) ||
13294 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
13295 DDI_SUCCESS) ||
13296 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
13297 DDI_SUCCESS) ||
13298 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
13299 DDI_SUCCESS) ||
13300 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
13301 DDI_SUCCESS)) {
13302 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
13303 goto fail;
13304 }
13305
13306 /* Check all acc handles */
13307 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
13308 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
13309 DDI_SUCCESS) ||
13310 (mptsas_check_acc_handle(mpt->m_acc_req_sense_hdl) !=
13311 DDI_SUCCESS) ||
13312 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
13313 DDI_SUCCESS) ||
13314 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
13315 DDI_SUCCESS) ||
13316 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
13317 DDI_SUCCESS) ||
13318 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
13319 DDI_SUCCESS) ||
13320 (mptsas_check_acc_handle(mpt->m_config_handle) !=
13321 DDI_SUCCESS)) {
13322 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
13323 goto fail;
13324 }
13325
13326 return (DDI_SUCCESS);
13327
13328 fail:
13329 return (DDI_FAILURE);
13330 }
13331
13332 static int
13333 mptsas_get_pci_cap(mptsas_t *mpt)
13334 {
13335 ushort_t caps_ptr, cap, cap_count;
13336
13337 if (mpt->m_config_handle == NULL)
13338 return (FALSE);
13339 /*
13340 * Check if capabilities list is supported and if so,
13341 * get initial capabilities pointer and clear bits 0,1.
13342 */
13343 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
13344 & PCI_STAT_CAP) {
13345 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
13346 PCI_CONF_CAP_PTR), 4);
13347 } else {
13348 caps_ptr = PCI_CAP_NEXT_PTR_NULL;
13349 }
13350
13351 /*
13352 * Walk capabilities if supported.
13353 */
13354 for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
13355
13356 /*
13357 * Check that we haven't exceeded the maximum number of
13358 * capabilities and that the pointer is in a valid range.
13359 */
13360 if (++cap_count > 48) {
13361 mptsas_log(mpt, CE_WARN,
13362 "too many device capabilities.\n");
13363 break;
13364 }
13365 if (caps_ptr < 64) {
13366 mptsas_log(mpt, CE_WARN,
13367 "capabilities pointer 0x%x out of range.\n",
13368 caps_ptr);
13369 break;
13370 }
13371
13372 /*
13373 * Get next capability and check that it is valid.
13374 * For now, we only support power management.
13375 */
13376 cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
13377 switch (cap) {
13378 case PCI_CAP_ID_PM:
13379 mptsas_log(mpt, CE_NOTE,
13380 "?mptsas%d supports power management.\n",
13381 mpt->m_instance);
13382 mpt->m_options |= MPTSAS_OPT_PM;
13383
13384 /* Save PMCSR offset */
13385 mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
13386 break;
13387 /*
13388 * The following capabilities are valid. Any others
13389 * will cause a message to be logged.
13390 */
13391 case PCI_CAP_ID_VPD:
13392 case PCI_CAP_ID_MSI:
13393 case PCI_CAP_ID_PCIX:
13394 case PCI_CAP_ID_PCI_E:
13395 case PCI_CAP_ID_MSI_X:
13396 break;
13397 default:
13398 mptsas_log(mpt, CE_NOTE,
13399 "?mptsas%d unrecognized capability "
13400 "0x%x.\n", mpt->m_instance, cap);
13401 break;
13402 }
13403
13404 /*
13405 * Get next capabilities pointer and clear bits 0,1.
13406 */
13407 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
13408 (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
13409 }
13410 return (TRUE);
13411 }
13412
13413 static int
13414 mptsas_init_pm(mptsas_t *mpt)
13415 {
13416 char pmc_name[16];
13417 char *pmc[] = {
13418 NULL,
13419 "0=Off (PCI D3 State)",
13420 "3=On (PCI D0 State)",
13421 NULL
13422 };
13423 uint16_t pmcsr_stat;
13424
13425 if (mptsas_get_pci_cap(mpt) == FALSE) {
13426 return (DDI_FAILURE);
13427 }
13428 /*
13429 * If PCI's capability does not support PM, then don't need
13430 * to registe the pm-components
13431 */
13432 if (!(mpt->m_options & MPTSAS_OPT_PM))
13433 return (DDI_SUCCESS);
13434 /*
13435 * If power management is supported by this chip, create
13436 * pm-components property for the power management framework
13437 */
13438 (void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
13439 pmc[0] = pmc_name;
13440 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
13441 "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
13442 mpt->m_options &= ~MPTSAS_OPT_PM;
13443 mptsas_log(mpt, CE_WARN,
13444 "mptsas%d: pm-component property creation failed.",
13445 mpt->m_instance);
13446 return (DDI_FAILURE);
13447 }
13448
13449 /*
13450 * Power on device.
13451 */
13452 (void) pm_busy_component(mpt->m_dip, 0);
13453 pmcsr_stat = pci_config_get16(mpt->m_config_handle,
13454 mpt->m_pmcsr_offset);
13455 if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
13456 mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
13457 mpt->m_instance);
13458 pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
13459 PCI_PMCSR_D0);
13460 }
13461 if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
13462 mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
13463 return (DDI_FAILURE);
13464 }
13465 mpt->m_power_level = PM_LEVEL_D0;
13466 /*
13467 * Set pm idle delay.
13468 */
13469 mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
13470 mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
13471
13472 return (DDI_SUCCESS);
13473 }
13474
13475 static int
13476 mptsas_register_intrs(mptsas_t *mpt)
13477 {
13478 dev_info_t *dip;
13479 int intr_types;
13480
13481 dip = mpt->m_dip;
13482
13483 /* Get supported interrupt types */
13484 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
13485 mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
13486 "failed\n");
13487 return (FALSE);
13488 }
13489
13490 NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
13491
13492 /*
13493 * Try MSI, but fall back to FIXED
13494 */
13495 if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
13496 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
13497 NDBG0(("Using MSI interrupt type"));
13498 mpt->m_intr_type = DDI_INTR_TYPE_MSI;
13499 return (TRUE);
13500 }
13501 }
13502 if (intr_types & DDI_INTR_TYPE_FIXED) {
13503 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
13504 NDBG0(("Using FIXED interrupt type"));
13505 mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
13506 return (TRUE);
13507 } else {
13508 NDBG0(("FIXED interrupt registration failed"));
13509 return (FALSE);
13510 }
13511 }
13512
13513 return (FALSE);
13514 }
13515
13516 static void
13517 mptsas_unregister_intrs(mptsas_t *mpt)
13518 {
13519 mptsas_rem_intrs(mpt);
13520 }
13521
13522 /*
13523 * mptsas_add_intrs:
13524 *
13525 * Register FIXED or MSI interrupts.
13526 */
13527 static int
13528 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
13529 {
13530 dev_info_t *dip = mpt->m_dip;
13531 int avail, actual, count = 0;
13532 int i, flag, ret;
13533
13534 NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
13535
13536 /* Get number of interrupts */
13537 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
13538 if ((ret != DDI_SUCCESS) || (count <= 0)) {
13539 mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
13540 "ret %d count %d\n", ret, count);
13541
13542 return (DDI_FAILURE);
13543 }
13544
13545 /* Get number of available interrupts */
13546 ret = ddi_intr_get_navail(dip, intr_type, &avail);
13547 if ((ret != DDI_SUCCESS) || (avail == 0)) {
13548 mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
13549 "ret %d avail %d\n", ret, avail);
13550
13551 return (DDI_FAILURE);
13552 }
13553
13554 if (avail < count) {
13555 mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, "
13556 "navail() returned %d", count, avail);
13557 }
13558
13559 /* Mpt only have one interrupt routine */
13560 if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
13561 count = 1;
13562 }
13563
13564 /* Allocate an array of interrupt handles */
13565 mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
13566 mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
13567
13568 flag = DDI_INTR_ALLOC_NORMAL;
13569
13570 /* call ddi_intr_alloc() */
13571 ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
13572 count, &actual, flag);
13573
13574 if ((ret != DDI_SUCCESS) || (actual == 0)) {
13575 mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d\n",
13576 ret);
13577 kmem_free(mpt->m_htable, mpt->m_intr_size);
13578 return (DDI_FAILURE);
13579 }
13580
13581 /* use interrupt count returned or abort? */
13582 if (actual < count) {
13583 mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d\n",
13584 count, actual);
13585 }
13586
13587 mpt->m_intr_cnt = actual;
13588
13589 /*
13590 * Get priority for first msi, assume remaining are all the same
13591 */
13592 if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
13593 &mpt->m_intr_pri)) != DDI_SUCCESS) {
13594 mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d\n", ret);
13595
13596 /* Free already allocated intr */
13597 for (i = 0; i < actual; i++) {
13598 (void) ddi_intr_free(mpt->m_htable[i]);
13599 }
13600
13601 kmem_free(mpt->m_htable, mpt->m_intr_size);
13602 return (DDI_FAILURE);
13603 }
13604
13605 /* Test for high level mutex */
13606 if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
13607 mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
13608 "Hi level interrupt not supported\n");
13609
13610 /* Free already allocated intr */
13611 for (i = 0; i < actual; i++) {
13612 (void) ddi_intr_free(mpt->m_htable[i]);
13613 }
13614
13615 kmem_free(mpt->m_htable, mpt->m_intr_size);
13616 return (DDI_FAILURE);
13617 }
13618
13619 /* Call ddi_intr_add_handler() */
13620 for (i = 0; i < actual; i++) {
13621 if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
13622 (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
13623 mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
13624 "failed %d\n", ret);
13625
13626 /* Free already allocated intr */
13627 for (i = 0; i < actual; i++) {
13628 (void) ddi_intr_free(mpt->m_htable[i]);
13629 }
13630
13631 kmem_free(mpt->m_htable, mpt->m_intr_size);
13632 return (DDI_FAILURE);
13633 }
13634 }
13635
13636 if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
13637 != DDI_SUCCESS) {
13638 mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d\n", ret);
13639
13640 /* Free already allocated intr */
13641 for (i = 0; i < actual; i++) {
13642 (void) ddi_intr_free(mpt->m_htable[i]);
13643 }
13644
13645 kmem_free(mpt->m_htable, mpt->m_intr_size);
13646 return (DDI_FAILURE);
13647 }
13648
13649 /*
13650 * Enable interrupts
13651 */
13652 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
13653 /* Call ddi_intr_block_enable() for MSI interrupts */
13654 (void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
13655 } else {
13656 /* Call ddi_intr_enable for MSI or FIXED interrupts */
13657 for (i = 0; i < mpt->m_intr_cnt; i++) {
13658 (void) ddi_intr_enable(mpt->m_htable[i]);
13659 }
13660 }
13661 return (DDI_SUCCESS);
13662 }
13663
13664 /*
13665 * mptsas_rem_intrs:
13666 *
13667 * Unregister FIXED or MSI interrupts
13668 */
13669 static void
13670 mptsas_rem_intrs(mptsas_t *mpt)
13671 {
13672 int i;
13673
13674 NDBG6(("mptsas_rem_intrs"));
13675
13676 /* Disable all interrupts */
13677 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
13678 /* Call ddi_intr_block_disable() */
13679 (void) ddi_intr_block_disable(mpt->m_htable, mpt->m_intr_cnt);
13680 } else {
13681 for (i = 0; i < mpt->m_intr_cnt; i++) {
13682 (void) ddi_intr_disable(mpt->m_htable[i]);
13683 }
13684 }
13685
13686 /* Call ddi_intr_remove_handler() */
13687 for (i = 0; i < mpt->m_intr_cnt; i++) {
13688 (void) ddi_intr_remove_handler(mpt->m_htable[i]);
13689 (void) ddi_intr_free(mpt->m_htable[i]);
13690 }
13691
13692 kmem_free(mpt->m_htable, mpt->m_intr_size);
13693 }
13694
13695 /*
13696 * The IO fault service error handling callback function
13697 */
13698 /*ARGSUSED*/
13699 static int
13700 mptsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
13701 {
13702 /*
13703 * as the driver can always deal with an error in any dma or
13704 * access handle, we can just return the fme_status value.
13705 */
13706 pci_ereport_post(dip, err, NULL);
13707 return (err->fme_status);
13708 }
13709
13710 /*
13711 * mptsas_fm_init - initialize fma capabilities and register with IO
13712 * fault services.
13713 */
13714 static void
13715 mptsas_fm_init(mptsas_t *mpt)
13716 {
13717 /*
13718 * Need to change iblock to priority for new MSI intr
13719 */
13720 ddi_iblock_cookie_t fm_ibc;
13721
13722 /* Only register with IO Fault Services if we have some capability */
13723 if (mpt->m_fm_capabilities) {
13724 /* Adjust access and dma attributes for FMA */
13725 mpt->m_reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
13726 mpt->m_msg_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
13727 mpt->m_io_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
13728
13729 /*
13730 * Register capabilities with IO Fault Services.
13731 * mpt->m_fm_capabilities will be updated to indicate
13732 * capabilities actually supported (not requested.)
13733 */
13734 ddi_fm_init(mpt->m_dip, &mpt->m_fm_capabilities, &fm_ibc);
13735
13736 /*
13737 * Initialize pci ereport capabilities if ereport
13738 * capable (should always be.)
13739 */
13740 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
13741 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13742 pci_ereport_setup(mpt->m_dip);
13743 }
13744
13745 /*
13746 * Register error callback if error callback capable.
13747 */
13748 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13749 ddi_fm_handler_register(mpt->m_dip,
13750 mptsas_fm_error_cb, (void *) mpt);
13751 }
13752 }
13753 }
13754
13755 /*
13756 * mptsas_fm_fini - Releases fma capabilities and un-registers with IO
13757 * fault services.
13758 *
13759 */
13760 static void
13761 mptsas_fm_fini(mptsas_t *mpt)
13762 {
13763 /* Only unregister FMA capabilities if registered */
13764 if (mpt->m_fm_capabilities) {
13765
13766 /*
13767 * Un-register error callback if error callback capable.
13768 */
13769
13770 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13771 ddi_fm_handler_unregister(mpt->m_dip);
13772 }
13773
13774 /*
13775 * Release any resources allocated by pci_ereport_setup()
13776 */
13777
13778 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
13779 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13780 pci_ereport_teardown(mpt->m_dip);
13781 }
13782
13783 /* Unregister from IO Fault Services */
13784 ddi_fm_fini(mpt->m_dip);
13785
13786 /* Adjust access and dma attributes for FMA */
13787 mpt->m_reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
13788 mpt->m_msg_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
13789 mpt->m_io_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
13790
13791 }
13792 }
13793
13794 int
13795 mptsas_check_acc_handle(ddi_acc_handle_t handle)
13796 {
13797 ddi_fm_error_t de;
13798
13799 if (handle == NULL)
13800 return (DDI_FAILURE);
13801 ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
13802 return (de.fme_status);
13803 }
13804
13805 int
13806 mptsas_check_dma_handle(ddi_dma_handle_t handle)
13807 {
13808 ddi_fm_error_t de;
13809
13810 if (handle == NULL)
13811 return (DDI_FAILURE);
13812 ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
13813 return (de.fme_status);
13814 }
13815
13816 void
13817 mptsas_fm_ereport(mptsas_t *mpt, char *detail)
13818 {
13819 uint64_t ena;
13820 char buf[FM_MAX_CLASS];
13821
13822 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
13823 ena = fm_ena_generate(0, FM_ENA_FMT1);
13824 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities)) {
13825 ddi_fm_ereport_post(mpt->m_dip, buf, ena, DDI_NOSLEEP,
13826 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
13827 }
13828 }
13829
13830 static int
13831 mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
13832 uint16_t *dev_handle, mptsas_target_t **pptgt)
13833 {
13834 int rval;
13835 uint32_t dev_info;
13836 uint64_t sas_wwn;
13837 mptsas_phymask_t phymask;
13838 uint8_t physport, phynum, config, disk;
13839 uint64_t devicename;
13840 uint16_t pdev_hdl;
13841 mptsas_target_t *tmp_tgt = NULL;
13842 uint16_t bay_num, enclosure, io_flags;
13843
13844 ASSERT(*pptgt == NULL);
13845
13846 rval = mptsas_get_sas_device_page0(mpt, page_address, dev_handle,
13847 &sas_wwn, &dev_info, &physport, &phynum, &pdev_hdl,
13848 &bay_num, &enclosure, &io_flags);
13849 if (rval != DDI_SUCCESS) {
13850 rval = DEV_INFO_FAIL_PAGE0;
13851 return (rval);
13852 }
13853
13854 if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
13855 MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13856 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) == NULL) {
13857 rval = DEV_INFO_WRONG_DEVICE_TYPE;
13858 return (rval);
13859 }
13860
13861 /*
13862 * Check if the dev handle is for a Phys Disk. If so, set return value
13863 * and exit. Don't add Phys Disks to hash.
13864 */
13865 for (config = 0; config < mpt->m_num_raid_configs; config++) {
13866 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
13867 if (*dev_handle == mpt->m_raidconfig[config].
13868 m_physdisk_devhdl[disk]) {
13869 rval = DEV_INFO_PHYS_DISK;
13870 return (rval);
13871 }
13872 }
13873 }
13874
13875 /*
13876 * Get SATA Device Name from SAS device page0 for
13877 * sata device, if device name doesn't exist, set mta_wwn to
13878 * 0 for direct attached SATA. For the device behind the expander
13879 * we still can use STP address assigned by expander.
13880 */
13881 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13882 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
13883 /* alloc a temporary target to send the cmd to */
13884 tmp_tgt = mptsas_tgt_alloc(mpt->m_tmp_targets, *dev_handle,
13885 0, dev_info, 0, 0);
13886 mutex_exit(&mpt->m_mutex);
13887
13888 devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
13889
13890 if (devicename == -1) {
13891 mutex_enter(&mpt->m_mutex);
13892 refhash_remove(mpt->m_tmp_targets, tmp_tgt);
13893 rval = DEV_INFO_FAIL_GUID;
13894 return (rval);
13895 }
13896
13897 if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
13898 sas_wwn = devicename;
13899 } else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
13900 sas_wwn = 0;
13901 }
13902
13903 mutex_enter(&mpt->m_mutex);
13904 refhash_remove(mpt->m_tmp_targets, tmp_tgt);
13905 }
13906
13907 phymask = mptsas_physport_to_phymask(mpt, physport);
13908 *pptgt = mptsas_tgt_alloc(mpt->m_targets, *dev_handle, sas_wwn,
13909 dev_info, phymask, phynum);
13910 if (*pptgt == NULL) {
13911 mptsas_log(mpt, CE_WARN, "Failed to allocated target"
13912 "structure!");
13913 rval = DEV_INFO_FAIL_ALLOC;
13914 return (rval);
13915 }
13916 (*pptgt)->m_io_flags = io_flags;
13917 (*pptgt)->m_enclosure = enclosure;
13918 (*pptgt)->m_slot_num = bay_num;
13919 return (DEV_INFO_SUCCESS);
13920 }
13921
13922 uint64_t
13923 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
13924 {
13925 uint64_t sata_guid = 0, *pwwn = NULL;
13926 int target = ptgt->m_devhdl;
13927 uchar_t *inq83 = NULL;
13928 int inq83_len = 0xFF;
13929 uchar_t *dblk = NULL;
13930 int inq83_retry = 3;
13931 int rval = DDI_FAILURE;
13932
13933 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
13934
13935 inq83_retry:
13936 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
13937 inq83_len, NULL, 1);
13938 if (rval != DDI_SUCCESS) {
13939 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
13940 "0x83 for target:%x, lun:%x failed!", target, lun);
13941 sata_guid = -1;
13942 goto out;
13943 }
13944 /* According to SAT2, the first descriptor is logic unit name */
13945 dblk = &inq83[4];
13946 if ((dblk[1] & 0x30) != 0) {
13947 mptsas_log(mpt, CE_WARN, "!Descriptor is not lun associated.");
13948 goto out;
13949 }
13950 pwwn = (uint64_t *)(void *)(&dblk[4]);
13951 if ((dblk[4] & 0xf0) == 0x50) {
13952 sata_guid = BE_64(*pwwn);
13953 goto out;
13954 } else if (dblk[4] == 'A') {
13955 NDBG20(("SATA drive has no NAA format GUID."));
13956 goto out;
13957 } else {
13958 /* The data is not ready, wait and retry */
13959 inq83_retry--;
13960 if (inq83_retry <= 0) {
13961 goto out;
13962 }
13963 NDBG20(("The GUID is not ready, retry..."));
13964 delay(1 * drv_usectohz(1000000));
13965 goto inq83_retry;
13966 }
13967 out:
13968 kmem_free(inq83, inq83_len);
13969 return (sata_guid);
13970 }
13971
13972 static int
13973 mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, uchar_t page,
13974 unsigned char *buf, int len, int *reallen, uchar_t evpd)
13975 {
13976 uchar_t cdb[CDB_GROUP0];
13977 struct scsi_address ap;
13978 struct buf *data_bp = NULL;
13979 int resid = 0;
13980 int ret = DDI_FAILURE;
13981
13982 ASSERT(len <= 0xffff);
13983
13984 ap.a_target = MPTSAS_INVALID_DEVHDL;
13985 ap.a_lun = (uchar_t)(lun);
13986 ap.a_hba_tran = mpt->m_tran;
13987
13988 data_bp = scsi_alloc_consistent_buf(&ap,
13989 (struct buf *)NULL, len, B_READ, NULL_FUNC, NULL);
13990 if (data_bp == NULL) {
13991 return (ret);
13992 }
13993 bzero(cdb, CDB_GROUP0);
13994 cdb[0] = SCMD_INQUIRY;
13995 cdb[1] = evpd;
13996 cdb[2] = page;
13997 cdb[3] = (len & 0xff00) >> 8;
13998 cdb[4] = (len & 0x00ff);
13999 cdb[5] = 0;
14000
14001 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP0, data_bp,
14002 &resid);
14003 if (ret == DDI_SUCCESS) {
14004 if (reallen) {
14005 *reallen = len - resid;
14006 }
14007 bcopy((caddr_t)data_bp->b_un.b_addr, buf, len);
14008 }
14009 if (data_bp) {
14010 scsi_free_consistent_buf(data_bp);
14011 }
14012 return (ret);
14013 }
14014
14015 static int
14016 mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
14017 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
14018 int *resid)
14019 {
14020 struct scsi_pkt *pktp = NULL;
14021 scsi_hba_tran_t *tran_clone = NULL;
14022 mptsas_tgt_private_t *tgt_private = NULL;
14023 int ret = DDI_FAILURE;
14024
14025 /*
14026 * scsi_hba_tran_t->tran_tgt_private is used to pass the address
14027 * information to scsi_init_pkt, allocate a scsi_hba_tran structure
14028 * to simulate the cmds from sd
14029 */
14030 tran_clone = kmem_alloc(
14031 sizeof (scsi_hba_tran_t), KM_SLEEP);
14032 if (tran_clone == NULL) {
14033 goto out;
14034 }
14035 bcopy((caddr_t)mpt->m_tran,
14036 (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
14037 tgt_private = kmem_alloc(
14038 sizeof (mptsas_tgt_private_t), KM_SLEEP);
14039 if (tgt_private == NULL) {
14040 goto out;
14041 }
14042 tgt_private->t_lun = ap->a_lun;
14043 tgt_private->t_private = ptgt;
14044 tran_clone->tran_tgt_private = tgt_private;
14045 ap->a_hba_tran = tran_clone;
14046
14047 pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
14048 data_bp, cdblen, sizeof (struct scsi_arq_status),
14049 0, PKT_CONSISTENT, NULL, NULL);
14050 if (pktp == NULL) {
14051 goto out;
14052 }
14053 bcopy(cdb, pktp->pkt_cdbp, cdblen);
14054 pktp->pkt_flags = FLAG_NOPARITY;
14055 if (scsi_poll(pktp) < 0) {
14056 goto out;
14057 }
14058 if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
14059 goto out;
14060 }
14061 if (resid != NULL) {
14062 *resid = pktp->pkt_resid;
14063 }
14064
14065 ret = DDI_SUCCESS;
14066 out:
14067 if (pktp) {
14068 scsi_destroy_pkt(pktp);
14069 }
14070 if (tran_clone) {
14071 kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
14072 }
14073 if (tgt_private) {
14074 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
14075 }
14076 return (ret);
14077 }
14078 static int
14079 mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, int *lun)
14080 {
14081 char *cp = NULL;
14082 char *ptr = NULL;
14083 size_t s = 0;
14084 char *wwid_str = NULL;
14085 char *lun_str = NULL;
14086 long lunnum;
14087 long phyid = -1;
14088 int rc = DDI_FAILURE;
14089
14090 ptr = name;
14091 ASSERT(ptr[0] == 'w' || ptr[0] == 'p');
14092 ptr++;
14093 if ((cp = strchr(ptr, ',')) == NULL) {
14094 return (DDI_FAILURE);
14095 }
14096
14097 wwid_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14098 s = (uintptr_t)cp - (uintptr_t)ptr;
14099
14100 bcopy(ptr, wwid_str, s);
14101 wwid_str[s] = '\0';
14102
14103 ptr = ++cp;
14104
14105 if ((cp = strchr(ptr, '\0')) == NULL) {
14106 goto out;
14107 }
14108 lun_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14109 s = (uintptr_t)cp - (uintptr_t)ptr;
14110
14111 bcopy(ptr, lun_str, s);
14112 lun_str[s] = '\0';
14113
14114 if (name[0] == 'p') {
14115 rc = ddi_strtol(wwid_str, NULL, 0x10, &phyid);
14116 } else {
14117 rc = scsi_wwnstr_to_wwn(wwid_str, wwid);
14118 }
14119 if (rc != DDI_SUCCESS)
14120 goto out;
14121
14122 if (phyid != -1) {
14123 ASSERT(phyid < MPTSAS_MAX_PHYS);
14124 *phy = (uint8_t)phyid;
14125 }
14126 rc = ddi_strtol(lun_str, NULL, 0x10, &lunnum);
14127 if (rc != 0)
14128 goto out;
14129
14130 *lun = (int)lunnum;
14131 rc = DDI_SUCCESS;
14132 out:
14133 if (wwid_str)
14134 kmem_free(wwid_str, SCSI_MAXNAMELEN);
14135 if (lun_str)
14136 kmem_free(lun_str, SCSI_MAXNAMELEN);
14137
14138 return (rc);
14139 }
14140
14141 /*
14142 * mptsas_parse_smp_name() is to parse sas wwn string
14143 * which format is "wWWN"
14144 */
14145 static int
14146 mptsas_parse_smp_name(char *name, uint64_t *wwn)
14147 {
14148 char *ptr = name;
14149
14150 if (*ptr != 'w') {
14151 return (DDI_FAILURE);
14152 }
14153
14154 ptr++;
14155 if (scsi_wwnstr_to_wwn(ptr, wwn)) {
14156 return (DDI_FAILURE);
14157 }
14158 return (DDI_SUCCESS);
14159 }
14160
14161 static int
14162 mptsas_bus_config(dev_info_t *pdip, uint_t flag,
14163 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
14164 {
14165 int ret = NDI_FAILURE;
14166 int circ = 0;
14167 int circ1 = 0;
14168 mptsas_t *mpt;
14169 char *ptr = NULL;
14170 char *devnm = NULL;
14171 uint64_t wwid = 0;
14172 uint8_t phy = 0xFF;
14173 int lun = 0;
14174 uint_t mflags = flag;
14175 int bconfig = TRUE;
14176
14177 if (scsi_hba_iport_unit_address(pdip) == 0) {
14178 return (DDI_FAILURE);
14179 }
14180
14181 mpt = DIP2MPT(pdip);
14182 if (!mpt) {
14183 return (DDI_FAILURE);
14184 }
14185 /*
14186 * Hold the nexus across the bus_config
14187 */
14188 ndi_devi_enter(scsi_vhci_dip, &circ);
14189 ndi_devi_enter(pdip, &circ1);
14190 switch (op) {
14191 case BUS_CONFIG_ONE:
14192 /* parse wwid/target name out of name given */
14193 if ((ptr = strchr((char *)arg, '@')) == NULL) {
14194 ret = NDI_FAILURE;
14195 break;
14196 }
14197 ptr++;
14198 if (strncmp((char *)arg, "smp", 3) == 0) {
14199 /*
14200 * This is a SMP target device
14201 */
14202 ret = mptsas_parse_smp_name(ptr, &wwid);
14203 if (ret != DDI_SUCCESS) {
14204 ret = NDI_FAILURE;
14205 break;
14206 }
14207 ret = mptsas_config_smp(pdip, wwid, childp);
14208 } else if ((ptr[0] == 'w') || (ptr[0] == 'p')) {
14209 /*
14210 * OBP could pass down a non-canonical form
14211 * bootpath without LUN part when LUN is 0.
14212 * So driver need adjust the string.
14213 */
14214 if (strchr(ptr, ',') == NULL) {
14215 devnm = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14216 (void) sprintf(devnm, "%s,0", (char *)arg);
14217 ptr = strchr(devnm, '@');
14218 ptr++;
14219 }
14220
14221 /*
14222 * The device path is wWWID format and the device
14223 * is not SMP target device.
14224 */
14225 ret = mptsas_parse_address(ptr, &wwid, &phy, &lun);
14226 if (ret != DDI_SUCCESS) {
14227 ret = NDI_FAILURE;
14228 break;
14229 }
14230 *childp = NULL;
14231 if (ptr[0] == 'w') {
14232 ret = mptsas_config_one_addr(pdip, wwid,
14233 lun, childp);
14234 } else if (ptr[0] == 'p') {
14235 ret = mptsas_config_one_phy(pdip, phy, lun,
14236 childp);
14237 }
14238
14239 /*
14240 * If this is CD/DVD device in OBP path, the
14241 * ndi_busop_bus_config can be skipped as config one
14242 * operation is done above.
14243 */
14244 if ((ret == NDI_SUCCESS) && (*childp != NULL) &&
14245 (strcmp(ddi_node_name(*childp), "cdrom") == 0) &&
14246 (strncmp((char *)arg, "disk", 4) == 0)) {
14247 bconfig = FALSE;
14248 ndi_hold_devi(*childp);
14249 }
14250 } else {
14251 ret = NDI_FAILURE;
14252 break;
14253 }
14254
14255 /*
14256 * DDI group instructed us to use this flag.
14257 */
14258 mflags |= NDI_MDI_FALLBACK;
14259 break;
14260 case BUS_CONFIG_DRIVER:
14261 case BUS_CONFIG_ALL:
14262 mptsas_config_all(pdip);
14263 ret = NDI_SUCCESS;
14264 break;
14265 default:
14266 ret = NDI_FAILURE;
14267 break;
14268 }
14269
14270 if ((ret == NDI_SUCCESS) && bconfig) {
14271 ret = ndi_busop_bus_config(pdip, mflags, op,
14272 (devnm == NULL) ? arg : devnm, childp, 0);
14273 }
14274
14275 ndi_devi_exit(pdip, circ1);
14276 ndi_devi_exit(scsi_vhci_dip, circ);
14277 if (devnm != NULL)
14278 kmem_free(devnm, SCSI_MAXNAMELEN);
14279 return (ret);
14280 }
14281
14282 static int
14283 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
14284 mptsas_target_t *ptgt)
14285 {
14286 int rval = DDI_FAILURE;
14287 struct scsi_inquiry *sd_inq = NULL;
14288 mptsas_t *mpt = DIP2MPT(pdip);
14289
14290 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
14291
14292 rval = mptsas_inquiry(mpt, ptgt, lun, 0, (uchar_t *)sd_inq,
14293 SUN_INQSIZE, 0, (uchar_t)0);
14294
14295 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
14296 rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
14297 } else {
14298 rval = DDI_FAILURE;
14299 }
14300
14301 kmem_free(sd_inq, SUN_INQSIZE);
14302 return (rval);
14303 }
14304
14305 static int
14306 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
14307 dev_info_t **lundip)
14308 {
14309 int rval;
14310 mptsas_t *mpt = DIP2MPT(pdip);
14311 int phymask;
14312 mptsas_target_t *ptgt = NULL;
14313
14314 /*
14315 * Get the physical port associated to the iport
14316 */
14317 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14318 "phymask", 0);
14319
14320 ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
14321 if (ptgt == NULL) {
14322 /*
14323 * didn't match any device by searching
14324 */
14325 return (DDI_FAILURE);
14326 }
14327 /*
14328 * If the LUN already exists and the status is online,
14329 * we just return the pointer to dev_info_t directly.
14330 * For the mdi_pathinfo node, we'll handle it in
14331 * mptsas_create_virt_lun()
14332 * TODO should be also in mptsas_handle_dr
14333 */
14334
14335 *lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
14336 if (*lundip != NULL) {
14337 /*
14338 * TODO Another senario is, we hotplug the same disk
14339 * on the same slot, the devhdl changed, is this
14340 * possible?
14341 * tgt_private->t_private != ptgt
14342 */
14343 if (sasaddr != ptgt->m_addr.mta_wwn) {
14344 /*
14345 * The device has changed although the devhdl is the
14346 * same (Enclosure mapping mode, change drive on the
14347 * same slot)
14348 */
14349 return (DDI_FAILURE);
14350 }
14351 return (DDI_SUCCESS);
14352 }
14353
14354 if (phymask == 0) {
14355 /*
14356 * Configure IR volume
14357 */
14358 rval = mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
14359 return (rval);
14360 }
14361 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
14362
14363 return (rval);
14364 }
14365
14366 static int
14367 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
14368 dev_info_t **lundip)
14369 {
14370 int rval;
14371 mptsas_t *mpt = DIP2MPT(pdip);
14372 mptsas_phymask_t phymask;
14373 mptsas_target_t *ptgt = NULL;
14374
14375 /*
14376 * Get the physical port associated to the iport
14377 */
14378 phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14379 "phymask", 0);
14380
14381 ptgt = mptsas_phy_to_tgt(mpt, phymask, phy);
14382 if (ptgt == NULL) {
14383 /*
14384 * didn't match any device by searching
14385 */
14386 return (DDI_FAILURE);
14387 }
14388
14389 /*
14390 * If the LUN already exists and the status is online,
14391 * we just return the pointer to dev_info_t directly.
14392 * For the mdi_pathinfo node, we'll handle it in
14393 * mptsas_create_virt_lun().
14394 */
14395
14396 *lundip = mptsas_find_child_phy(pdip, phy);
14397 if (*lundip != NULL) {
14398 return (DDI_SUCCESS);
14399 }
14400
14401 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
14402
14403 return (rval);
14404 }
14405
14406 static int
14407 mptsas_retrieve_lundata(int lun_cnt, uint8_t *buf, uint16_t *lun_num,
14408 uint8_t *lun_addr_type)
14409 {
14410 uint32_t lun_idx = 0;
14411
14412 ASSERT(lun_num != NULL);
14413 ASSERT(lun_addr_type != NULL);
14414
14415 lun_idx = (lun_cnt + 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
14416 /* determine report luns addressing type */
14417 switch (buf[lun_idx] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) {
14418 /*
14419 * Vendors in the field have been found to be concatenating
14420 * bus/target/lun to equal the complete lun value instead
14421 * of switching to flat space addressing
14422 */
14423 /* 00b - peripheral device addressing method */
14424 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL:
14425 /* FALLTHRU */
14426 /* 10b - logical unit addressing method */
14427 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT:
14428 /* FALLTHRU */
14429 /* 01b - flat space addressing method */
14430 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE:
14431 /* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
14432 *lun_addr_type = (buf[lun_idx] &
14433 MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) >> 6;
14434 *lun_num = (buf[lun_idx] & 0x3F) << 8;
14435 *lun_num |= buf[lun_idx + 1];
14436 return (DDI_SUCCESS);
14437 default:
14438 return (DDI_FAILURE);
14439 }
14440 }
14441
14442 static int
14443 mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt)
14444 {
14445 struct buf *repluns_bp = NULL;
14446 struct scsi_address ap;
14447 uchar_t cdb[CDB_GROUP5];
14448 int ret = DDI_FAILURE;
14449 int retry = 0;
14450 int lun_list_len = 0;
14451 uint16_t lun_num = 0;
14452 uint8_t lun_addr_type = 0;
14453 uint32_t lun_cnt = 0;
14454 uint32_t lun_total = 0;
14455 dev_info_t *cdip = NULL;
14456 uint16_t *saved_repluns = NULL;
14457 char *buffer = NULL;
14458 int buf_len = 128;
14459 mptsas_t *mpt = DIP2MPT(pdip);
14460 uint64_t sas_wwn = 0;
14461 uint8_t phy = 0xFF;
14462 uint32_t dev_info = 0;
14463
14464 mutex_enter(&mpt->m_mutex);
14465 sas_wwn = ptgt->m_addr.mta_wwn;
14466 phy = ptgt->m_phynum;
14467 dev_info = ptgt->m_deviceinfo;
14468 mutex_exit(&mpt->m_mutex);
14469
14470 if (sas_wwn == 0) {
14471 /*
14472 * It's a SATA without Device Name
14473 * So don't try multi-LUNs
14474 */
14475 if (mptsas_find_child_phy(pdip, phy)) {
14476 return (DDI_SUCCESS);
14477 } else {
14478 /*
14479 * need configure and create node
14480 */
14481 return (DDI_FAILURE);
14482 }
14483 }
14484
14485 /*
14486 * WWN (SAS address or Device Name exist)
14487 */
14488 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14489 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14490 /*
14491 * SATA device with Device Name
14492 * So don't try multi-LUNs
14493 */
14494 if (mptsas_find_child_addr(pdip, sas_wwn, 0)) {
14495 return (DDI_SUCCESS);
14496 } else {
14497 return (DDI_FAILURE);
14498 }
14499 }
14500
14501 do {
14502 ap.a_target = MPTSAS_INVALID_DEVHDL;
14503 ap.a_lun = 0;
14504 ap.a_hba_tran = mpt->m_tran;
14505 repluns_bp = scsi_alloc_consistent_buf(&ap,
14506 (struct buf *)NULL, buf_len, B_READ, NULL_FUNC, NULL);
14507 if (repluns_bp == NULL) {
14508 retry++;
14509 continue;
14510 }
14511 bzero(cdb, CDB_GROUP5);
14512 cdb[0] = SCMD_REPORT_LUNS;
14513 cdb[6] = (buf_len & 0xff000000) >> 24;
14514 cdb[7] = (buf_len & 0x00ff0000) >> 16;
14515 cdb[8] = (buf_len & 0x0000ff00) >> 8;
14516 cdb[9] = (buf_len & 0x000000ff);
14517
14518 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP5,
14519 repluns_bp, NULL);
14520 if (ret != DDI_SUCCESS) {
14521 scsi_free_consistent_buf(repluns_bp);
14522 retry++;
14523 continue;
14524 }
14525 lun_list_len = BE_32(*(int *)((void *)(
14526 repluns_bp->b_un.b_addr)));
14527 if (buf_len >= lun_list_len + 8) {
14528 ret = DDI_SUCCESS;
14529 break;
14530 }
14531 scsi_free_consistent_buf(repluns_bp);
14532 buf_len = lun_list_len + 8;
14533
14534 } while (retry < 3);
14535
14536 if (ret != DDI_SUCCESS)
14537 return (ret);
14538 buffer = (char *)repluns_bp->b_un.b_addr;
14539 /*
14540 * find out the number of luns returned by the SCSI ReportLun call
14541 * and allocate buffer space
14542 */
14543 lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
14544 saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
14545 if (saved_repluns == NULL) {
14546 scsi_free_consistent_buf(repluns_bp);
14547 return (DDI_FAILURE);
14548 }
14549 for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
14550 if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
14551 &lun_num, &lun_addr_type) != DDI_SUCCESS) {
14552 continue;
14553 }
14554 saved_repluns[lun_cnt] = lun_num;
14555 if ((cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num)) !=
14556 NULL) {
14557 ret = DDI_SUCCESS;
14558 } else {
14559 ret = mptsas_probe_lun(pdip, lun_num, &cdip,
14560 ptgt);
14561 }
14562 if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
14563 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
14564 MPTSAS_DEV_GONE);
14565 }
14566 }
14567 mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
14568 kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
14569 scsi_free_consistent_buf(repluns_bp);
14570 return (DDI_SUCCESS);
14571 }
14572
14573 static int
14574 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
14575 {
14576 int rval = DDI_FAILURE;
14577 struct scsi_inquiry *sd_inq = NULL;
14578 mptsas_t *mpt = DIP2MPT(pdip);
14579 mptsas_target_t *ptgt = NULL;
14580
14581 mutex_enter(&mpt->m_mutex);
14582 ptgt = refhash_linear_search(mpt->m_targets,
14583 mptsas_target_eval_devhdl, &target);
14584 mutex_exit(&mpt->m_mutex);
14585 if (ptgt == NULL) {
14586 mptsas_log(mpt, CE_WARN, "Volume with VolDevHandle of 0x%x "
14587 "not found.", target);
14588 return (rval);
14589 }
14590
14591 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
14592 rval = mptsas_inquiry(mpt, ptgt, 0, 0, (uchar_t *)sd_inq,
14593 SUN_INQSIZE, 0, (uchar_t)0);
14594
14595 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
14596 rval = mptsas_create_phys_lun(pdip, sd_inq, NULL, dip, ptgt,
14597 0);
14598 } else {
14599 rval = DDI_FAILURE;
14600 }
14601
14602 kmem_free(sd_inq, SUN_INQSIZE);
14603 return (rval);
14604 }
14605
14606 /*
14607 * configure all RAID volumes for virtual iport
14608 */
14609 static void
14610 mptsas_config_all_viport(dev_info_t *pdip)
14611 {
14612 mptsas_t *mpt = DIP2MPT(pdip);
14613 int config, vol;
14614 int target;
14615 dev_info_t *lundip = NULL;
14616
14617 /*
14618 * Get latest RAID info and search for any Volume DevHandles. If any
14619 * are found, configure the volume.
14620 */
14621 mutex_enter(&mpt->m_mutex);
14622 for (config = 0; config < mpt->m_num_raid_configs; config++) {
14623 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
14624 if (mpt->m_raidconfig[config].m_raidvol[vol].m_israid
14625 == 1) {
14626 target = mpt->m_raidconfig[config].
14627 m_raidvol[vol].m_raidhandle;
14628 mutex_exit(&mpt->m_mutex);
14629 (void) mptsas_config_raid(pdip, target,
14630 &lundip);
14631 mutex_enter(&mpt->m_mutex);
14632 }
14633 }
14634 }
14635 mutex_exit(&mpt->m_mutex);
14636 }
14637
14638 static void
14639 mptsas_offline_missed_luns(dev_info_t *pdip, uint16_t *repluns,
14640 int lun_cnt, mptsas_target_t *ptgt)
14641 {
14642 dev_info_t *child = NULL, *savechild = NULL;
14643 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
14644 uint64_t sas_wwn, wwid;
14645 uint8_t phy;
14646 int lun;
14647 int i;
14648 int find;
14649 char *addr;
14650 char *nodename;
14651 mptsas_t *mpt = DIP2MPT(pdip);
14652
14653 mutex_enter(&mpt->m_mutex);
14654 wwid = ptgt->m_addr.mta_wwn;
14655 mutex_exit(&mpt->m_mutex);
14656
14657 child = ddi_get_child(pdip);
14658 while (child) {
14659 find = 0;
14660 savechild = child;
14661 child = ddi_get_next_sibling(child);
14662
14663 nodename = ddi_node_name(savechild);
14664 if (strcmp(nodename, "smp") == 0) {
14665 continue;
14666 }
14667
14668 addr = ddi_get_name_addr(savechild);
14669 if (addr == NULL) {
14670 continue;
14671 }
14672
14673 if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
14674 DDI_SUCCESS) {
14675 continue;
14676 }
14677
14678 if (wwid == sas_wwn) {
14679 for (i = 0; i < lun_cnt; i++) {
14680 if (repluns[i] == lun) {
14681 find = 1;
14682 break;
14683 }
14684 }
14685 } else {
14686 continue;
14687 }
14688 if (find == 0) {
14689 /*
14690 * The lun has not been there already
14691 */
14692 (void) mptsas_offline_lun(savechild, NULL);
14693 }
14694 }
14695
14696 pip = mdi_get_next_client_path(pdip, NULL);
14697 while (pip) {
14698 find = 0;
14699 savepip = pip;
14700 addr = MDI_PI(pip)->pi_addr;
14701
14702 pip = mdi_get_next_client_path(pdip, pip);
14703
14704 if (addr == NULL) {
14705 continue;
14706 }
14707
14708 if (mptsas_parse_address(addr, &sas_wwn, &phy,
14709 &lun) != DDI_SUCCESS) {
14710 continue;
14711 }
14712
14713 if (sas_wwn == wwid) {
14714 for (i = 0; i < lun_cnt; i++) {
14715 if (repluns[i] == lun) {
14716 find = 1;
14717 break;
14718 }
14719 }
14720 } else {
14721 continue;
14722 }
14723
14724 if (find == 0) {
14725 /*
14726 * The lun has not been there already
14727 */
14728 (void) mptsas_offline_lun(NULL, savepip);
14729 }
14730 }
14731 }
14732
14733 /*
14734 * If this enclosure doesn't exist in the enclosure list, add it. If it does,
14735 * update it.
14736 */
14737 static void
14738 mptsas_enclosure_update(mptsas_t *mpt, mptsas_enclosure_t *mep)
14739 {
14740 mptsas_enclosure_t *m;
14741
14742 ASSERT(MUTEX_HELD(&mpt->m_mutex));
14743 m = mptsas_enc_lookup(mpt, mep->me_enchdl);
14744 if (m != NULL) {
14745 uint8_t *ledp;
14746 m->me_flags = mep->me_flags;
14747
14748
14749 /*
14750 * If the number of slots and the first slot entry in the
14751 * enclosure has not changed, then we don't need to do anything
14752 * here. Otherwise, we need to allocate a new array for the LED
14753 * status of the slot.
14754 */
14755 if (m->me_fslot == mep->me_fslot &&
14756 m->me_nslots == mep->me_nslots)
14757 return;
14758
14759 /*
14760 * If the number of slots or the first slot has changed, it's
14761 * not clear that we're really in a place that we can continue
14762 * to honor the existing flags.
14763 */
14764 if (mep->me_nslots > 0) {
14765 ledp = kmem_zalloc(sizeof (uint8_t) * mep->me_nslots,
14766 KM_SLEEP);
14767 } else {
14768 ledp = NULL;
14769 }
14770
14771 if (m->me_slotleds != NULL) {
14772 kmem_free(m->me_slotleds, sizeof (uint8_t) *
14773 m->me_nslots);
14774 }
14775 m->me_slotleds = ledp;
14776 m->me_fslot = mep->me_fslot;
14777 m->me_nslots = mep->me_nslots;
14778 return;
14779 }
14780
14781 m = kmem_zalloc(sizeof (*m), KM_SLEEP);
14782 m->me_enchdl = mep->me_enchdl;
14783 m->me_flags = mep->me_flags;
14784 m->me_nslots = mep->me_nslots;
14785 m->me_fslot = mep->me_fslot;
14786 if (m->me_nslots > 0) {
14787 m->me_slotleds = kmem_zalloc(sizeof (uint8_t) * mep->me_nslots,
14788 KM_SLEEP);
14789 /*
14790 * It may make sense to optionally flush all of the slots and/or
14791 * read the slot status flag here to synchronize between
14792 * ourselves and the card. So far, that hasn't been needed
14793 * annecdotally when enumerating something new. If we do, we
14794 * should kick that off in a taskq potentially.
14795 */
14796 }
14797 list_insert_tail(&mpt->m_enclosures, m);
14798 }
14799
14800 static void
14801 mptsas_update_hashtab(struct mptsas *mpt)
14802 {
14803 uint32_t page_address;
14804 int rval = 0;
14805 uint16_t dev_handle;
14806 mptsas_target_t *ptgt = NULL;
14807 mptsas_smp_t smp_node;
14808
14809 /*
14810 * Get latest RAID info.
14811 */
14812 (void) mptsas_get_raid_info(mpt);
14813
14814 dev_handle = mpt->m_smp_devhdl;
14815 while (mpt->m_done_traverse_smp == 0) {
14816 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
14817 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)dev_handle;
14818 if (mptsas_get_sas_expander_page0(mpt, page_address, &smp_node)
14819 != DDI_SUCCESS) {
14820 break;
14821 }
14822 mpt->m_smp_devhdl = dev_handle = smp_node.m_devhdl;
14823 (void) mptsas_smp_alloc(mpt, &smp_node);
14824 }
14825
14826 /*
14827 * Loop over enclosures so we can understand what's there.
14828 */
14829 dev_handle = MPTSAS_INVALID_DEVHDL;
14830 while (mpt->m_done_traverse_enc == 0) {
14831 mptsas_enclosure_t me;
14832
14833 page_address = (MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE &
14834 MPI2_SAS_ENCLOS_PGAD_FORM_MASK) | (uint32_t)dev_handle;
14835
14836 if (mptsas_get_enclosure_page0(mpt, page_address, &me) !=
14837 DDI_SUCCESS) {
14838 break;
14839 }
14840 dev_handle = me.me_enchdl;
14841 mptsas_enclosure_update(mpt, &me);
14842 }
14843
14844 /*
14845 * Config target devices
14846 */
14847 dev_handle = mpt->m_dev_handle;
14848
14849 /*
14850 * Loop to get sas device page 0 by GetNextHandle till the
14851 * the last handle. If the sas device is a SATA/SSP target,
14852 * we try to config it.
14853 */
14854 while (mpt->m_done_traverse_dev == 0) {
14855 ptgt = NULL;
14856 page_address =
14857 (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
14858 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14859 (uint32_t)dev_handle;
14860 rval = mptsas_get_target_device_info(mpt, page_address,
14861 &dev_handle, &ptgt);
14862 if ((rval == DEV_INFO_FAIL_PAGE0) ||
14863 (rval == DEV_INFO_FAIL_ALLOC) ||
14864 (rval == DEV_INFO_FAIL_GUID)) {
14865 break;
14866 }
14867
14868 mpt->m_dev_handle = dev_handle;
14869 }
14870
14871 }
14872
14873 void
14874 mptsas_update_driver_data(struct mptsas *mpt)
14875 {
14876 mptsas_target_t *tp;
14877 mptsas_smp_t *sp;
14878
14879 ASSERT(MUTEX_HELD(&mpt->m_mutex));
14880
14881 /*
14882 * TODO after hard reset, update the driver data structures
14883 * 1. update port/phymask mapping table mpt->m_phy_info
14884 * 2. invalid all the entries in hash table
14885 * m_devhdl = 0xffff and m_deviceinfo = 0
14886 * 3. call sas_device_page/expander_page to update hash table
14887 */
14888 mptsas_update_phymask(mpt);
14889
14890 /*
14891 * Remove all the devhdls for existing entries but leave their
14892 * addresses alone. In update_hashtab() below, we'll find all
14893 * targets that are still present and reassociate them with
14894 * their potentially new devhdls. Leaving the targets around in
14895 * this fashion allows them to be used on the tx waitq even
14896 * while IOC reset is occurring.
14897 */
14898 for (tp = refhash_first(mpt->m_targets); tp != NULL;
14899 tp = refhash_next(mpt->m_targets, tp)) {
14900 tp->m_devhdl = MPTSAS_INVALID_DEVHDL;
14901 tp->m_deviceinfo = 0;
14902 tp->m_dr_flag = MPTSAS_DR_INACTIVE;
14903 }
14904 for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
14905 sp = refhash_next(mpt->m_smp_targets, sp)) {
14906 sp->m_devhdl = MPTSAS_INVALID_DEVHDL;
14907 sp->m_deviceinfo = 0;
14908 }
14909 mpt->m_done_traverse_dev = 0;
14910 mpt->m_done_traverse_smp = 0;
14911 mpt->m_done_traverse_enc = 0;
14912 mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
14913 mptsas_update_hashtab(mpt);
14914 }
14915
14916 static void
14917 mptsas_config_all(dev_info_t *pdip)
14918 {
14919 dev_info_t *smpdip = NULL;
14920 mptsas_t *mpt = DIP2MPT(pdip);
14921 int phymask = 0;
14922 mptsas_phymask_t phy_mask;
14923 mptsas_target_t *ptgt = NULL;
14924 mptsas_smp_t *psmp;
14925
14926 /*
14927 * Get the phymask associated to the iport
14928 */
14929 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14930 "phymask", 0);
14931
14932 /*
14933 * Enumerate RAID volumes here (phymask == 0).
14934 */
14935 if (phymask == 0) {
14936 mptsas_config_all_viport(pdip);
14937 return;
14938 }
14939
14940 mutex_enter(&mpt->m_mutex);
14941
14942 if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp ||
14943 !mpt->m_done_traverse_enc) {
14944 mptsas_update_hashtab(mpt);
14945 }
14946
14947 for (psmp = refhash_first(mpt->m_smp_targets); psmp != NULL;
14948 psmp = refhash_next(mpt->m_smp_targets, psmp)) {
14949 phy_mask = psmp->m_addr.mta_phymask;
14950 if (phy_mask == phymask) {
14951 smpdip = NULL;
14952 mutex_exit(&mpt->m_mutex);
14953 (void) mptsas_online_smp(pdip, psmp, &smpdip);
14954 mutex_enter(&mpt->m_mutex);
14955 }
14956 }
14957
14958 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
14959 ptgt = refhash_next(mpt->m_targets, ptgt)) {
14960 phy_mask = ptgt->m_addr.mta_phymask;
14961 if (phy_mask == phymask) {
14962 mutex_exit(&mpt->m_mutex);
14963 (void) mptsas_config_target(pdip, ptgt);
14964 mutex_enter(&mpt->m_mutex);
14965 }
14966 }
14967 mutex_exit(&mpt->m_mutex);
14968 }
14969
14970 static int
14971 mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt)
14972 {
14973 int rval = DDI_FAILURE;
14974 dev_info_t *tdip;
14975
14976 rval = mptsas_config_luns(pdip, ptgt);
14977 if (rval != DDI_SUCCESS) {
14978 /*
14979 * The return value means the SCMD_REPORT_LUNS
14980 * did not execute successfully. The target maybe
14981 * doesn't support such command.
14982 */
14983 rval = mptsas_probe_lun(pdip, 0, &tdip, ptgt);
14984 }
14985 return (rval);
14986 }
14987
14988 /*
14989 * Return fail if not all the childs/paths are freed.
14990 * if there is any path under the HBA, the return value will be always fail
14991 * because we didn't call mdi_pi_free for path
14992 */
14993 static int
14994 mptsas_offline_target(dev_info_t *pdip, char *name)
14995 {
14996 dev_info_t *child = NULL, *prechild = NULL;
14997 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
14998 int tmp_rval, rval = DDI_SUCCESS;
14999 char *addr, *cp;
15000 size_t s;
15001 mptsas_t *mpt = DIP2MPT(pdip);
15002
15003 child = ddi_get_child(pdip);
15004 while (child) {
15005 addr = ddi_get_name_addr(child);
15006 prechild = child;
15007 child = ddi_get_next_sibling(child);
15008
15009 if (addr == NULL) {
15010 continue;
15011 }
15012 if ((cp = strchr(addr, ',')) == NULL) {
15013 continue;
15014 }
15015
15016 s = (uintptr_t)cp - (uintptr_t)addr;
15017
15018 if (strncmp(addr, name, s) != 0) {
15019 continue;
15020 }
15021
15022 tmp_rval = mptsas_offline_lun(prechild, NULL);
15023 if (tmp_rval != DDI_SUCCESS) {
15024 rval = DDI_FAILURE;
15025 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15026 prechild, MPTSAS_DEV_GONE) !=
15027 DDI_PROP_SUCCESS) {
15028 mptsas_log(mpt, CE_WARN, "mptsas driver "
15029 "unable to create property for "
15030 "SAS %s (MPTSAS_DEV_GONE)", addr);
15031 }
15032 }
15033 }
15034
15035 pip = mdi_get_next_client_path(pdip, NULL);
15036 while (pip) {
15037 addr = MDI_PI(pip)->pi_addr;
15038 savepip = pip;
15039 pip = mdi_get_next_client_path(pdip, pip);
15040 if (addr == NULL) {
15041 continue;
15042 }
15043
15044 if ((cp = strchr(addr, ',')) == NULL) {
15045 continue;
15046 }
15047
15048 s = (uintptr_t)cp - (uintptr_t)addr;
15049
15050 if (strncmp(addr, name, s) != 0) {
15051 continue;
15052 }
15053
15054 (void) mptsas_offline_lun(NULL, savepip);
15055 /*
15056 * driver will not invoke mdi_pi_free, so path will not
15057 * be freed forever, return DDI_FAILURE.
15058 */
15059 rval = DDI_FAILURE;
15060 }
15061 return (rval);
15062 }
15063
15064 static int
15065 mptsas_offline_lun(dev_info_t *rdip, mdi_pathinfo_t *rpip)
15066 {
15067 int rval = DDI_FAILURE;
15068
15069 if (rpip != NULL) {
15070 if (MDI_PI_IS_OFFLINE(rpip)) {
15071 rval = DDI_SUCCESS;
15072 } else {
15073 rval = mdi_pi_offline(rpip, 0);
15074 }
15075 } else if (rdip != NULL) {
15076 rval = ndi_devi_offline(rdip,
15077 NDI_DEVFS_CLEAN | NDI_DEVI_REMOVE);
15078 }
15079
15080 return (rval);
15081 }
15082
15083 static dev_info_t *
15084 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
15085 {
15086 dev_info_t *child = NULL;
15087 char *smp_wwn = NULL;
15088
15089 child = ddi_get_child(parent);
15090 while (child) {
15091 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
15092 DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
15093 != DDI_SUCCESS) {
15094 child = ddi_get_next_sibling(child);
15095 continue;
15096 }
15097
15098 if (strcmp(smp_wwn, str_wwn) == 0) {
15099 ddi_prop_free(smp_wwn);
15100 break;
15101 }
15102 child = ddi_get_next_sibling(child);
15103 ddi_prop_free(smp_wwn);
15104 }
15105 return (child);
15106 }
15107
15108 static int
15109 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node)
15110 {
15111 int rval = DDI_FAILURE;
15112 char wwn_str[MPTSAS_WWN_STRLEN];
15113 dev_info_t *cdip;
15114
15115 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
15116
15117 cdip = mptsas_find_smp_child(pdip, wwn_str);
15118 if (cdip == NULL)
15119 return (DDI_SUCCESS);
15120
15121 rval = ndi_devi_offline(cdip, NDI_DEVFS_CLEAN | NDI_DEVI_REMOVE);
15122
15123 return (rval);
15124 }
15125
15126 static dev_info_t *
15127 mptsas_find_child(dev_info_t *pdip, char *name)
15128 {
15129 dev_info_t *child = NULL;
15130 char *rname = NULL;
15131 int rval = DDI_FAILURE;
15132
15133 rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15134
15135 child = ddi_get_child(pdip);
15136 while (child) {
15137 rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
15138 if (rval != DDI_SUCCESS) {
15139 child = ddi_get_next_sibling(child);
15140 bzero(rname, SCSI_MAXNAMELEN);
15141 continue;
15142 }
15143
15144 if (strcmp(rname, name) == 0) {
15145 break;
15146 }
15147 child = ddi_get_next_sibling(child);
15148 bzero(rname, SCSI_MAXNAMELEN);
15149 }
15150
15151 kmem_free(rname, SCSI_MAXNAMELEN);
15152
15153 return (child);
15154 }
15155
15156
15157 static dev_info_t *
15158 mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, int lun)
15159 {
15160 dev_info_t *child = NULL;
15161 char *name = NULL;
15162 char *addr = NULL;
15163
15164 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15165 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15166 (void) sprintf(name, "%016"PRIx64, sasaddr);
15167 (void) sprintf(addr, "w%s,%x", name, lun);
15168 child = mptsas_find_child(pdip, addr);
15169 kmem_free(name, SCSI_MAXNAMELEN);
15170 kmem_free(addr, SCSI_MAXNAMELEN);
15171 return (child);
15172 }
15173
15174 static dev_info_t *
15175 mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy)
15176 {
15177 dev_info_t *child;
15178 char *addr;
15179
15180 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15181 (void) sprintf(addr, "p%x,0", phy);
15182 child = mptsas_find_child(pdip, addr);
15183 kmem_free(addr, SCSI_MAXNAMELEN);
15184 return (child);
15185 }
15186
15187 static mdi_pathinfo_t *
15188 mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy)
15189 {
15190 mdi_pathinfo_t *path;
15191 char *addr = NULL;
15192
15193 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15194 (void) sprintf(addr, "p%x,0", phy);
15195 path = mdi_pi_find(pdip, NULL, addr);
15196 kmem_free(addr, SCSI_MAXNAMELEN);
15197 return (path);
15198 }
15199
15200 static mdi_pathinfo_t *
15201 mptsas_find_path_addr(dev_info_t *parent, uint64_t sasaddr, int lun)
15202 {
15203 mdi_pathinfo_t *path;
15204 char *name = NULL;
15205 char *addr = NULL;
15206
15207 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15208 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15209 (void) sprintf(name, "%016"PRIx64, sasaddr);
15210 (void) sprintf(addr, "w%s,%x", name, lun);
15211 path = mdi_pi_find(parent, NULL, addr);
15212 kmem_free(name, SCSI_MAXNAMELEN);
15213 kmem_free(addr, SCSI_MAXNAMELEN);
15214
15215 return (path);
15216 }
15217
15218 static int
15219 mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
15220 dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
15221 {
15222 int i = 0;
15223 uchar_t *inq83 = NULL;
15224 int inq83_len1 = 0xFF;
15225 int inq83_len = 0;
15226 int rval = DDI_FAILURE;
15227 ddi_devid_t devid;
15228 char *guid = NULL;
15229 int target = ptgt->m_devhdl;
15230 mdi_pathinfo_t *pip = NULL;
15231 mptsas_t *mpt = DIP2MPT(pdip);
15232
15233 /*
15234 * For DVD/CD ROM and tape devices and optical
15235 * devices, we won't try to enumerate them under
15236 * scsi_vhci, so no need to try page83
15237 */
15238 if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
15239 sd_inq->inq_dtype == DTYPE_OPTICAL ||
15240 sd_inq->inq_dtype == DTYPE_ESI))
15241 goto create_lun;
15242
15243 /*
15244 * The LCA returns good SCSI status, but corrupt page 83 data the first
15245 * time it is queried. The solution is to keep trying to request page83
15246 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
15247 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
15248 * give up to get VPD page at this stage and fail the enumeration.
15249 */
15250
15251 inq83 = kmem_zalloc(inq83_len1, KM_SLEEP);
15252
15253 for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
15254 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
15255 inq83_len1, &inq83_len, 1);
15256 if (rval != 0) {
15257 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
15258 "0x83 for target:%x, lun:%x failed!", target, lun);
15259 if (mptsas_physical_bind_failed_page_83 != B_FALSE)
15260 goto create_lun;
15261 goto out;
15262 }
15263 /*
15264 * create DEVID from inquiry data
15265 */
15266 if ((rval = ddi_devid_scsi_encode(
15267 DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
15268 sizeof (struct scsi_inquiry), NULL, 0, inq83,
15269 (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
15270 /*
15271 * extract GUID from DEVID
15272 */
15273 guid = ddi_devid_to_guid(devid);
15274
15275 /*
15276 * Do not enable MPXIO if the strlen(guid) is greater
15277 * than MPTSAS_MAX_GUID_LEN, this constrain would be
15278 * handled by framework later.
15279 */
15280 if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
15281 ddi_devid_free_guid(guid);
15282 guid = NULL;
15283 if (mpt->m_mpxio_enable == TRUE) {
15284 mptsas_log(mpt, CE_NOTE, "!Target:%x, "
15285 "lun:%x doesn't have a valid GUID, "
15286 "multipathing for this drive is "
15287 "not enabled", target, lun);
15288 }
15289 }
15290
15291 /*
15292 * devid no longer needed
15293 */
15294 ddi_devid_free(devid);
15295 break;
15296 } else if (rval == DDI_NOT_WELL_FORMED) {
15297 /*
15298 * return value of ddi_devid_scsi_encode equal to
15299 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
15300 * to retry inquiry page 0x83 and get GUID.
15301 */
15302 NDBG20(("Not well formed devid, retry..."));
15303 delay(1 * drv_usectohz(1000000));
15304 continue;
15305 } else {
15306 mptsas_log(mpt, CE_WARN, "!Encode devid failed for "
15307 "path target:%x, lun:%x", target, lun);
15308 rval = DDI_FAILURE;
15309 goto create_lun;
15310 }
15311 }
15312
15313 if (i == mptsas_inq83_retry_timeout) {
15314 mptsas_log(mpt, CE_WARN, "!Repeated page83 requests timeout "
15315 "for path target:%x, lun:%x", target, lun);
15316 }
15317
15318 rval = DDI_FAILURE;
15319
15320 create_lun:
15321 if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
15322 rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
15323 ptgt, lun);
15324 }
15325 if (rval != DDI_SUCCESS) {
15326 rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
15327 ptgt, lun);
15328
15329 }
15330 out:
15331 if (guid != NULL) {
15332 /*
15333 * guid no longer needed
15334 */
15335 ddi_devid_free_guid(guid);
15336 }
15337 if (inq83 != NULL)
15338 kmem_free(inq83, inq83_len1);
15339 return (rval);
15340 }
15341
15342 static int
15343 mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid,
15344 dev_info_t **lun_dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, int lun)
15345 {
15346 int target;
15347 char *nodename = NULL;
15348 char **compatible = NULL;
15349 int ncompatible = 0;
15350 int mdi_rtn = MDI_FAILURE;
15351 int rval = DDI_FAILURE;
15352 char *old_guid = NULL;
15353 mptsas_t *mpt = DIP2MPT(pdip);
15354 char *lun_addr = NULL;
15355 char *wwn_str = NULL;
15356 char *attached_wwn_str = NULL;
15357 char *component = NULL;
15358 uint8_t phy = 0xFF;
15359 uint64_t sas_wwn;
15360 int64_t lun64 = 0;
15361 uint32_t devinfo;
15362 uint16_t dev_hdl;
15363 uint16_t pdev_hdl;
15364 uint64_t dev_sas_wwn;
15365 uint64_t pdev_sas_wwn;
15366 uint32_t pdev_info;
15367 uint8_t physport;
15368 uint8_t phy_id;
15369 uint32_t page_address;
15370 uint16_t bay_num, enclosure, io_flags;
15371 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
15372 uint32_t dev_info;
15373
15374 mutex_enter(&mpt->m_mutex);
15375 target = ptgt->m_devhdl;
15376 sas_wwn = ptgt->m_addr.mta_wwn;
15377 devinfo = ptgt->m_deviceinfo;
15378 phy = ptgt->m_phynum;
15379 mutex_exit(&mpt->m_mutex);
15380
15381 if (sas_wwn) {
15382 *pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
15383 } else {
15384 *pip = mptsas_find_path_phy(pdip, phy);
15385 }
15386
15387 if (*pip != NULL) {
15388 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
15389 ASSERT(*lun_dip != NULL);
15390 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
15391 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
15392 MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
15393 if (strncmp(guid, old_guid, strlen(guid)) == 0) {
15394 /*
15395 * Same path back online again.
15396 */
15397 (void) ddi_prop_free(old_guid);
15398 if ((!MDI_PI_IS_ONLINE(*pip)) &&
15399 (!MDI_PI_IS_STANDBY(*pip)) &&
15400 (ptgt->m_tgt_unconfigured == 0)) {
15401 rval = mdi_pi_online(*pip, 0);
15402 } else {
15403 rval = DDI_SUCCESS;
15404 }
15405 if (rval != DDI_SUCCESS) {
15406 mptsas_log(mpt, CE_WARN, "path:target: "
15407 "%x, lun:%x online failed!", target,
15408 lun);
15409 *pip = NULL;
15410 *lun_dip = NULL;
15411 }
15412 return (rval);
15413 } else {
15414 /*
15415 * The GUID of the LUN has changed which maybe
15416 * because customer mapped another volume to the
15417 * same LUN.
15418 */
15419 mptsas_log(mpt, CE_WARN, "The GUID of the "
15420 "target:%x, lun:%x was changed, maybe "
15421 "because someone mapped another volume "
15422 "to the same LUN", target, lun);
15423 (void) ddi_prop_free(old_guid);
15424 if (!MDI_PI_IS_OFFLINE(*pip)) {
15425 rval = mdi_pi_offline(*pip, 0);
15426 if (rval != MDI_SUCCESS) {
15427 mptsas_log(mpt, CE_WARN, "path:"
15428 "target:%x, lun:%x offline "
15429 "failed!", target, lun);
15430 *pip = NULL;
15431 *lun_dip = NULL;
15432 return (DDI_FAILURE);
15433 }
15434 }
15435 if (mdi_pi_free(*pip, 0) != MDI_SUCCESS) {
15436 mptsas_log(mpt, CE_WARN, "path:target:"
15437 "%x, lun:%x free failed!", target,
15438 lun);
15439 *pip = NULL;
15440 *lun_dip = NULL;
15441 return (DDI_FAILURE);
15442 }
15443 }
15444 } else {
15445 mptsas_log(mpt, CE_WARN, "Can't get client-guid "
15446 "property for path:target:%x, lun:%x", target, lun);
15447 *pip = NULL;
15448 *lun_dip = NULL;
15449 return (DDI_FAILURE);
15450 }
15451 }
15452 scsi_hba_nodename_compatible_get(inq, NULL,
15453 inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
15454
15455 /*
15456 * if nodename can't be determined then print a message and skip it
15457 */
15458 if (nodename == NULL) {
15459 mptsas_log(mpt, CE_WARN, "mptsas driver found no compatible "
15460 "driver for target%d lun %d dtype:0x%02x", target, lun,
15461 inq->inq_dtype);
15462 return (DDI_FAILURE);
15463 }
15464
15465 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
15466 /* The property is needed by MPAPI */
15467 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
15468
15469 lun_addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15470 if (guid) {
15471 (void) sprintf(lun_addr, "w%s,%x", wwn_str, lun);
15472 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15473 } else {
15474 (void) sprintf(lun_addr, "p%x,%x", phy, lun);
15475 (void) sprintf(wwn_str, "p%x", phy);
15476 }
15477
15478 mdi_rtn = mdi_pi_alloc_compatible(pdip, nodename,
15479 guid, lun_addr, compatible, ncompatible,
15480 0, pip);
15481 if (mdi_rtn == MDI_SUCCESS) {
15482
15483 if (mdi_prop_update_string(*pip, MDI_GUID,
15484 guid) != DDI_SUCCESS) {
15485 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15486 "create prop for target %d lun %d (MDI_GUID)",
15487 target, lun);
15488 mdi_rtn = MDI_FAILURE;
15489 goto virt_create_done;
15490 }
15491
15492 if (mdi_prop_update_int(*pip, LUN_PROP,
15493 lun) != DDI_SUCCESS) {
15494 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15495 "create prop for target %d lun %d (LUN_PROP)",
15496 target, lun);
15497 mdi_rtn = MDI_FAILURE;
15498 goto virt_create_done;
15499 }
15500 lun64 = (int64_t)lun;
15501 if (mdi_prop_update_int64(*pip, LUN64_PROP,
15502 lun64) != DDI_SUCCESS) {
15503 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15504 "create prop for target %d (LUN64_PROP)",
15505 target);
15506 mdi_rtn = MDI_FAILURE;
15507 goto virt_create_done;
15508 }
15509 if (mdi_prop_update_string_array(*pip, "compatible",
15510 compatible, ncompatible) !=
15511 DDI_PROP_SUCCESS) {
15512 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15513 "create prop for target %d lun %d (COMPATIBLE)",
15514 target, lun);
15515 mdi_rtn = MDI_FAILURE;
15516 goto virt_create_done;
15517 }
15518 if (sas_wwn && (mdi_prop_update_string(*pip,
15519 SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS)) {
15520 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15521 "create prop for target %d lun %d "
15522 "(target-port)", target, lun);
15523 mdi_rtn = MDI_FAILURE;
15524 goto virt_create_done;
15525 } else if ((sas_wwn == 0) && (mdi_prop_update_int(*pip,
15526 "sata-phy", phy) != DDI_PROP_SUCCESS)) {
15527 /*
15528 * Direct attached SATA device without DeviceName
15529 */
15530 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15531 "create prop for SAS target %d lun %d "
15532 "(sata-phy)", target, lun);
15533 mdi_rtn = MDI_FAILURE;
15534 goto virt_create_done;
15535 }
15536 mutex_enter(&mpt->m_mutex);
15537
15538 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15539 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15540 (uint32_t)ptgt->m_devhdl;
15541 rval = mptsas_get_sas_device_page0(mpt, page_address,
15542 &dev_hdl, &dev_sas_wwn, &dev_info, &physport,
15543 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15544 if (rval != DDI_SUCCESS) {
15545 mutex_exit(&mpt->m_mutex);
15546 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15547 "parent device for handle %d", page_address);
15548 mdi_rtn = MDI_FAILURE;
15549 goto virt_create_done;
15550 }
15551
15552 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15553 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
15554 rval = mptsas_get_sas_device_page0(mpt, page_address,
15555 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
15556 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15557 if (rval != DDI_SUCCESS) {
15558 mutex_exit(&mpt->m_mutex);
15559 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
15560 "device info for handle %d", page_address);
15561 mdi_rtn = MDI_FAILURE;
15562 goto virt_create_done;
15563 }
15564
15565 mutex_exit(&mpt->m_mutex);
15566
15567 /*
15568 * If this device direct attached to the controller
15569 * set the attached-port to the base wwid
15570 */
15571 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15572 != DEVINFO_DIRECT_ATTACHED) {
15573 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15574 pdev_sas_wwn);
15575 } else {
15576 /*
15577 * Update the iport's attached-port to guid
15578 */
15579 if (sas_wwn == 0) {
15580 (void) sprintf(wwn_str, "p%x", phy);
15581 } else {
15582 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15583 }
15584 if (ddi_prop_update_string(DDI_DEV_T_NONE,
15585 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15586 DDI_PROP_SUCCESS) {
15587 mptsas_log(mpt, CE_WARN,
15588 "mptsas unable to create "
15589 "property for iport target-port"
15590 " %s (sas_wwn)",
15591 wwn_str);
15592 mdi_rtn = MDI_FAILURE;
15593 goto virt_create_done;
15594 }
15595
15596 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15597 mpt->un.m_base_wwid);
15598 }
15599
15600 if (IS_SATA_DEVICE(ptgt->m_deviceinfo)) {
15601 char uabuf[SCSI_WWN_BUFLEN];
15602
15603 if (scsi_wwn_to_wwnstr(dev_sas_wwn, 1, uabuf) == NULL) {
15604 mptsas_log(mpt, CE_WARN,
15605 "mptsas unable to format SATA bridge WWN");
15606 mdi_rtn = MDI_FAILURE;
15607 goto virt_create_done;
15608 }
15609
15610 if (mdi_prop_update_string(*pip,
15611 SCSI_ADDR_PROP_BRIDGE_PORT, uabuf) !=
15612 DDI_SUCCESS) {
15613 mptsas_log(mpt, CE_WARN,
15614 "mptsas unable to create SCSI bridge port "
15615 "property for SATA device");
15616 mdi_rtn = MDI_FAILURE;
15617 goto virt_create_done;
15618 }
15619 }
15620
15621 if (mdi_prop_update_string(*pip,
15622 SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
15623 DDI_PROP_SUCCESS) {
15624 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15625 "property for iport attached-port %s (sas_wwn)",
15626 attached_wwn_str);
15627 mdi_rtn = MDI_FAILURE;
15628 goto virt_create_done;
15629 }
15630
15631
15632 if (inq->inq_dtype == 0) {
15633 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
15634 /*
15635 * set obp path for pathinfo
15636 */
15637 (void) snprintf(component, MAXPATHLEN,
15638 "disk@%s", lun_addr);
15639
15640 if (mdi_pi_pathname_obp_set(*pip, component) !=
15641 DDI_SUCCESS) {
15642 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
15643 "unable to set obp-path for object %s",
15644 component);
15645 mdi_rtn = MDI_FAILURE;
15646 goto virt_create_done;
15647 }
15648 }
15649
15650 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
15651 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
15652 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
15653 if ((ndi_prop_update_int(DDI_DEV_T_NONE, *lun_dip,
15654 "pm-capable", 1)) !=
15655 DDI_PROP_SUCCESS) {
15656 mptsas_log(mpt, CE_WARN, "mptsas driver"
15657 "failed to create pm-capable "
15658 "property, target %d", target);
15659 mdi_rtn = MDI_FAILURE;
15660 goto virt_create_done;
15661 }
15662 }
15663 /*
15664 * Create the phy-num property
15665 */
15666 if (mdi_prop_update_int(*pip, "phy-num",
15667 ptgt->m_phynum) != DDI_SUCCESS) {
15668 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15669 "create phy-num property for target %d lun %d",
15670 target, lun);
15671 mdi_rtn = MDI_FAILURE;
15672 goto virt_create_done;
15673 }
15674 NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
15675 mdi_rtn = mdi_pi_online(*pip, 0);
15676 if (mdi_rtn == MDI_NOT_SUPPORTED) {
15677 mdi_rtn = MDI_FAILURE;
15678 }
15679 virt_create_done:
15680 if (*pip && mdi_rtn != MDI_SUCCESS) {
15681 (void) mdi_pi_free(*pip, 0);
15682 *pip = NULL;
15683 *lun_dip = NULL;
15684 }
15685 }
15686
15687 scsi_hba_nodename_compatible_free(nodename, compatible);
15688 if (lun_addr != NULL) {
15689 kmem_free(lun_addr, SCSI_MAXNAMELEN);
15690 }
15691 if (wwn_str != NULL) {
15692 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
15693 }
15694 if (component != NULL) {
15695 kmem_free(component, MAXPATHLEN);
15696 }
15697
15698 return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15699 }
15700
15701 static int
15702 mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq,
15703 char *guid, dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
15704 {
15705 int target;
15706 int rval;
15707 int ndi_rtn = NDI_FAILURE;
15708 uint64_t be_sas_wwn;
15709 char *nodename = NULL;
15710 char **compatible = NULL;
15711 int ncompatible = 0;
15712 int instance = 0;
15713 mptsas_t *mpt = DIP2MPT(pdip);
15714 char *wwn_str = NULL;
15715 char *component = NULL;
15716 char *attached_wwn_str = NULL;
15717 uint8_t phy = 0xFF;
15718 uint64_t sas_wwn;
15719 uint32_t devinfo;
15720 uint16_t dev_hdl;
15721 uint16_t pdev_hdl;
15722 uint64_t pdev_sas_wwn;
15723 uint64_t dev_sas_wwn;
15724 uint32_t pdev_info;
15725 uint8_t physport;
15726 uint8_t phy_id;
15727 uint32_t page_address;
15728 uint16_t bay_num, enclosure, io_flags;
15729 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
15730 uint32_t dev_info;
15731 int64_t lun64 = 0;
15732
15733 mutex_enter(&mpt->m_mutex);
15734 target = ptgt->m_devhdl;
15735 sas_wwn = ptgt->m_addr.mta_wwn;
15736 devinfo = ptgt->m_deviceinfo;
15737 phy = ptgt->m_phynum;
15738 mutex_exit(&mpt->m_mutex);
15739
15740 /*
15741 * generate compatible property with binding-set "mpt"
15742 */
15743 scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL,
15744 &nodename, &compatible, &ncompatible);
15745
15746 /*
15747 * if nodename can't be determined then print a message and skip it
15748 */
15749 if (nodename == NULL) {
15750 mptsas_log(mpt, CE_WARN, "mptsas found no compatible driver "
15751 "for target %d lun %d", target, lun);
15752 return (DDI_FAILURE);
15753 }
15754
15755 ndi_rtn = ndi_devi_alloc(pdip, nodename,
15756 DEVI_SID_NODEID, lun_dip);
15757
15758 /*
15759 * if lun alloc success, set props
15760 */
15761 if (ndi_rtn == NDI_SUCCESS) {
15762
15763 if (ndi_prop_update_int(DDI_DEV_T_NONE,
15764 *lun_dip, LUN_PROP, lun) !=
15765 DDI_PROP_SUCCESS) {
15766 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15767 "property for target %d lun %d (LUN_PROP)",
15768 target, lun);
15769 ndi_rtn = NDI_FAILURE;
15770 goto phys_create_done;
15771 }
15772
15773 lun64 = (int64_t)lun;
15774 if (ndi_prop_update_int64(DDI_DEV_T_NONE,
15775 *lun_dip, LUN64_PROP, lun64) !=
15776 DDI_PROP_SUCCESS) {
15777 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15778 "property for target %d lun64 %d (LUN64_PROP)",
15779 target, lun);
15780 ndi_rtn = NDI_FAILURE;
15781 goto phys_create_done;
15782 }
15783 if (ndi_prop_update_string_array(DDI_DEV_T_NONE,
15784 *lun_dip, "compatible", compatible, ncompatible)
15785 != DDI_PROP_SUCCESS) {
15786 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15787 "property for target %d lun %d (COMPATIBLE)",
15788 target, lun);
15789 ndi_rtn = NDI_FAILURE;
15790 goto phys_create_done;
15791 }
15792
15793 /*
15794 * We need the SAS WWN for non-multipath devices, so
15795 * we'll use the same property as that multipathing
15796 * devices need to present for MPAPI. If we don't have
15797 * a WWN (e.g. parallel SCSI), don't create the prop.
15798 */
15799 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
15800 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15801 if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE,
15802 *lun_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str)
15803 != DDI_PROP_SUCCESS) {
15804 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15805 "create property for SAS target %d lun %d "
15806 "(target-port)", target, lun);
15807 ndi_rtn = NDI_FAILURE;
15808 goto phys_create_done;
15809 }
15810
15811 be_sas_wwn = BE_64(sas_wwn);
15812 if (sas_wwn && ndi_prop_update_byte_array(
15813 DDI_DEV_T_NONE, *lun_dip, "port-wwn",
15814 (uchar_t *)&be_sas_wwn, 8) != DDI_PROP_SUCCESS) {
15815 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15816 "create property for SAS target %d lun %d "
15817 "(port-wwn)", target, lun);
15818 ndi_rtn = NDI_FAILURE;
15819 goto phys_create_done;
15820 } else if ((sas_wwn == 0) && (ndi_prop_update_int(
15821 DDI_DEV_T_NONE, *lun_dip, "sata-phy", phy) !=
15822 DDI_PROP_SUCCESS)) {
15823 /*
15824 * Direct attached SATA device without DeviceName
15825 */
15826 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15827 "create property for SAS target %d lun %d "
15828 "(sata-phy)", target, lun);
15829 ndi_rtn = NDI_FAILURE;
15830 goto phys_create_done;
15831 }
15832
15833 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15834 *lun_dip, SAS_PROP) != DDI_PROP_SUCCESS) {
15835 mptsas_log(mpt, CE_WARN, "mptsas unable to"
15836 "create property for SAS target %d lun %d"
15837 " (SAS_PROP)", target, lun);
15838 ndi_rtn = NDI_FAILURE;
15839 goto phys_create_done;
15840 }
15841 if (guid && (ndi_prop_update_string(DDI_DEV_T_NONE,
15842 *lun_dip, NDI_GUID, guid) != DDI_SUCCESS)) {
15843 mptsas_log(mpt, CE_WARN, "mptsas unable "
15844 "to create guid property for target %d "
15845 "lun %d", target, lun);
15846 ndi_rtn = NDI_FAILURE;
15847 goto phys_create_done;
15848 }
15849
15850 /*
15851 * The following code is to set properties for SM-HBA support,
15852 * it doesn't apply to RAID volumes
15853 */
15854 if (ptgt->m_addr.mta_phymask == 0)
15855 goto phys_raid_lun;
15856
15857 mutex_enter(&mpt->m_mutex);
15858
15859 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15860 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15861 (uint32_t)ptgt->m_devhdl;
15862 rval = mptsas_get_sas_device_page0(mpt, page_address,
15863 &dev_hdl, &dev_sas_wwn, &dev_info,
15864 &physport, &phy_id, &pdev_hdl,
15865 &bay_num, &enclosure, &io_flags);
15866 if (rval != DDI_SUCCESS) {
15867 mutex_exit(&mpt->m_mutex);
15868 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
15869 "parent device for handle %d.", page_address);
15870 ndi_rtn = NDI_FAILURE;
15871 goto phys_create_done;
15872 }
15873
15874 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15875 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
15876 rval = mptsas_get_sas_device_page0(mpt, page_address,
15877 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
15878 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15879 if (rval != DDI_SUCCESS) {
15880 mutex_exit(&mpt->m_mutex);
15881 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15882 "device for handle %d.", page_address);
15883 ndi_rtn = NDI_FAILURE;
15884 goto phys_create_done;
15885 }
15886
15887 mutex_exit(&mpt->m_mutex);
15888
15889 /*
15890 * If this device direct attached to the controller
15891 * set the attached-port to the base wwid
15892 */
15893 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15894 != DEVINFO_DIRECT_ATTACHED) {
15895 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15896 pdev_sas_wwn);
15897 } else {
15898 /*
15899 * Update the iport's attached-port to guid
15900 */
15901 if (sas_wwn == 0) {
15902 (void) sprintf(wwn_str, "p%x", phy);
15903 } else {
15904 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15905 }
15906 if (ddi_prop_update_string(DDI_DEV_T_NONE,
15907 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15908 DDI_PROP_SUCCESS) {
15909 mptsas_log(mpt, CE_WARN,
15910 "mptsas unable to create "
15911 "property for iport target-port"
15912 " %s (sas_wwn)",
15913 wwn_str);
15914 ndi_rtn = NDI_FAILURE;
15915 goto phys_create_done;
15916 }
15917
15918 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15919 mpt->un.m_base_wwid);
15920 }
15921
15922 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15923 *lun_dip, SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
15924 DDI_PROP_SUCCESS) {
15925 mptsas_log(mpt, CE_WARN,
15926 "mptsas unable to create "
15927 "property for iport attached-port %s (sas_wwn)",
15928 attached_wwn_str);
15929 ndi_rtn = NDI_FAILURE;
15930 goto phys_create_done;
15931 }
15932
15933 if (IS_SATA_DEVICE(dev_info)) {
15934 char uabuf[SCSI_WWN_BUFLEN];
15935
15936 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15937 *lun_dip, MPTSAS_VARIANT, "sata") !=
15938 DDI_PROP_SUCCESS) {
15939 mptsas_log(mpt, CE_WARN,
15940 "mptsas unable to create "
15941 "property for device variant ");
15942 ndi_rtn = NDI_FAILURE;
15943 goto phys_create_done;
15944 }
15945
15946 if (scsi_wwn_to_wwnstr(dev_sas_wwn, 1, uabuf) == NULL) {
15947 mptsas_log(mpt, CE_WARN,
15948 "mptsas unable to format SATA bridge WWN");
15949 ndi_rtn = NDI_FAILURE;
15950 goto phys_create_done;
15951 }
15952
15953 if (ndi_prop_update_string(DDI_DEV_T_NONE, *lun_dip,
15954 SCSI_ADDR_PROP_BRIDGE_PORT, uabuf) !=
15955 DDI_PROP_SUCCESS) {
15956 mptsas_log(mpt, CE_WARN,
15957 "mptsas unable to create SCSI bridge port "
15958 "property for SATA device");
15959 ndi_rtn = NDI_FAILURE;
15960 goto phys_create_done;
15961 }
15962 }
15963
15964 if (IS_ATAPI_DEVICE(dev_info)) {
15965 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15966 *lun_dip, MPTSAS_VARIANT, "atapi") !=
15967 DDI_PROP_SUCCESS) {
15968 mptsas_log(mpt, CE_WARN,
15969 "mptsas unable to create "
15970 "property for device variant ");
15971 ndi_rtn = NDI_FAILURE;
15972 goto phys_create_done;
15973 }
15974 }
15975
15976 phys_raid_lun:
15977 /*
15978 * if this is a SAS controller, and the target is a SATA
15979 * drive, set the 'pm-capable' property for sd and if on
15980 * an OPL platform, also check if this is an ATAPI
15981 * device.
15982 */
15983 instance = ddi_get_instance(mpt->m_dip);
15984 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
15985 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
15986 NDBG2(("mptsas%d: creating pm-capable property, "
15987 "target %d", instance, target));
15988
15989 if ((ndi_prop_update_int(DDI_DEV_T_NONE,
15990 *lun_dip, "pm-capable", 1)) !=
15991 DDI_PROP_SUCCESS) {
15992 mptsas_log(mpt, CE_WARN, "mptsas "
15993 "failed to create pm-capable "
15994 "property, target %d", target);
15995 ndi_rtn = NDI_FAILURE;
15996 goto phys_create_done;
15997 }
15998
15999 }
16000
16001 if ((inq->inq_dtype == 0) || (inq->inq_dtype == 5)) {
16002 /*
16003 * add 'obp-path' properties for devinfo
16004 */
16005 bzero(wwn_str, sizeof (wwn_str));
16006 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
16007 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
16008 if (guid) {
16009 (void) snprintf(component, MAXPATHLEN,
16010 "disk@w%s,%x", wwn_str, lun);
16011 } else {
16012 (void) snprintf(component, MAXPATHLEN,
16013 "disk@p%x,%x", phy, lun);
16014 }
16015 if (ddi_pathname_obp_set(*lun_dip, component)
16016 != DDI_SUCCESS) {
16017 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
16018 "unable to set obp-path for SAS "
16019 "object %s", component);
16020 ndi_rtn = NDI_FAILURE;
16021 goto phys_create_done;
16022 }
16023 }
16024 /*
16025 * Create the phy-num property for non-raid disk
16026 */
16027 if (ptgt->m_addr.mta_phymask != 0) {
16028 if (ndi_prop_update_int(DDI_DEV_T_NONE,
16029 *lun_dip, "phy-num", ptgt->m_phynum) !=
16030 DDI_PROP_SUCCESS) {
16031 mptsas_log(mpt, CE_WARN, "mptsas driver "
16032 "failed to create phy-num property for "
16033 "target %d", target);
16034 ndi_rtn = NDI_FAILURE;
16035 goto phys_create_done;
16036 }
16037 }
16038 phys_create_done:
16039 /*
16040 * If props were setup ok, online the lun
16041 */
16042 if (ndi_rtn == NDI_SUCCESS) {
16043 /*
16044 * Try to online the new node
16045 */
16046 ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
16047 }
16048
16049 /*
16050 * If success set rtn flag, else unwire alloc'd lun
16051 */
16052 if (ndi_rtn != NDI_SUCCESS) {
16053 NDBG12(("mptsas driver unable to online "
16054 "target %d lun %d", target, lun));
16055 ndi_prop_remove_all(*lun_dip);
16056 (void) ndi_devi_free(*lun_dip);
16057 *lun_dip = NULL;
16058 }
16059 }
16060
16061 scsi_hba_nodename_compatible_free(nodename, compatible);
16062
16063 if (wwn_str != NULL) {
16064 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
16065 }
16066 if (component != NULL) {
16067 kmem_free(component, MAXPATHLEN);
16068 }
16069
16070
16071 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
16072 }
16073
16074 static int
16075 mptsas_probe_smp(dev_info_t *pdip, uint64_t wwn)
16076 {
16077 mptsas_t *mpt = DIP2MPT(pdip);
16078 struct smp_device smp_sd;
16079
16080 /* XXX An HBA driver should not be allocating an smp_device. */
16081 bzero(&smp_sd, sizeof (struct smp_device));
16082 smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran;
16083 bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE);
16084
16085 if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS)
16086 return (NDI_FAILURE);
16087 return (NDI_SUCCESS);
16088 }
16089
16090 static int
16091 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
16092 {
16093 mptsas_t *mpt = DIP2MPT(pdip);
16094 mptsas_smp_t *psmp = NULL;
16095 int rval;
16096 int phymask;
16097
16098 /*
16099 * Get the physical port associated to the iport
16100 * PHYMASK TODO
16101 */
16102 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
16103 "phymask", 0);
16104 /*
16105 * Find the smp node in hash table with specified sas address and
16106 * physical port
16107 */
16108 psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
16109 if (psmp == NULL) {
16110 return (DDI_FAILURE);
16111 }
16112
16113 rval = mptsas_online_smp(pdip, psmp, smp_dip);
16114
16115 return (rval);
16116 }
16117
16118 static int
16119 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
16120 dev_info_t **smp_dip)
16121 {
16122 char wwn_str[MPTSAS_WWN_STRLEN];
16123 char attached_wwn_str[MPTSAS_WWN_STRLEN];
16124 int ndi_rtn = NDI_FAILURE;
16125 int rval = 0;
16126 mptsas_smp_t dev_info;
16127 uint32_t page_address;
16128 mptsas_t *mpt = DIP2MPT(pdip);
16129 uint16_t dev_hdl;
16130 uint64_t sas_wwn;
16131 uint64_t smp_sas_wwn;
16132 uint8_t physport;
16133 uint8_t phy_id;
16134 uint16_t pdev_hdl;
16135 uint8_t numphys = 0;
16136 uint16_t i = 0;
16137 char phymask[MPTSAS_MAX_PHYS];
16138 char *iport = NULL;
16139 mptsas_phymask_t phy_mask = 0;
16140 uint16_t attached_devhdl;
16141 uint16_t bay_num, enclosure, io_flags;
16142
16143 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
16144
16145 /*
16146 * Probe smp device, prevent the node of removed device from being
16147 * configured succesfully
16148 */
16149 if (mptsas_probe_smp(pdip, smp_node->m_addr.mta_wwn) != NDI_SUCCESS) {
16150 return (DDI_FAILURE);
16151 }
16152
16153 if ((*smp_dip = mptsas_find_smp_child(pdip, wwn_str)) != NULL) {
16154 return (DDI_SUCCESS);
16155 }
16156
16157 ndi_rtn = ndi_devi_alloc(pdip, "smp", DEVI_SID_NODEID, smp_dip);
16158
16159 /*
16160 * if lun alloc success, set props
16161 */
16162 if (ndi_rtn == NDI_SUCCESS) {
16163 /*
16164 * Set the flavor of the child to be SMP flavored
16165 */
16166 ndi_flavor_set(*smp_dip, SCSA_FLAVOR_SMP);
16167
16168 if (ndi_prop_update_string(DDI_DEV_T_NONE,
16169 *smp_dip, SMP_WWN, wwn_str) !=
16170 DDI_PROP_SUCCESS) {
16171 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
16172 "property for smp device %s (sas_wwn)",
16173 wwn_str);
16174 ndi_rtn = NDI_FAILURE;
16175 goto smp_create_done;
16176 }
16177 (void) sprintf(wwn_str, "w%"PRIx64, smp_node->m_addr.mta_wwn);
16178 if (ndi_prop_update_string(DDI_DEV_T_NONE,
16179 *smp_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str) !=
16180 DDI_PROP_SUCCESS) {
16181 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
16182 "property for iport target-port %s (sas_wwn)",
16183 wwn_str);
16184 ndi_rtn = NDI_FAILURE;
16185 goto smp_create_done;
16186 }
16187
16188 mutex_enter(&mpt->m_mutex);
16189
16190 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
16191 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | smp_node->m_devhdl;
16192 rval = mptsas_get_sas_expander_page0(mpt, page_address,
16193 &dev_info);
16194 if (rval != DDI_SUCCESS) {
16195 mutex_exit(&mpt->m_mutex);
16196 mptsas_log(mpt, CE_WARN,
16197 "mptsas unable to get expander "
16198 "parent device info for %x", page_address);
16199 ndi_rtn = NDI_FAILURE;
16200 goto smp_create_done;
16201 }
16202
16203 smp_node->m_pdevhdl = dev_info.m_pdevhdl;
16204 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
16205 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
16206 (uint32_t)dev_info.m_pdevhdl;
16207 rval = mptsas_get_sas_device_page0(mpt, page_address,
16208 &dev_hdl, &sas_wwn, &smp_node->m_pdevinfo, &physport,
16209 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
16210 if (rval != DDI_SUCCESS) {
16211 mutex_exit(&mpt->m_mutex);
16212 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
16213 "device info for %x", page_address);
16214 ndi_rtn = NDI_FAILURE;
16215 goto smp_create_done;
16216 }
16217
16218 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
16219 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
16220 (uint32_t)dev_info.m_devhdl;
16221 rval = mptsas_get_sas_device_page0(mpt, page_address,
16222 &dev_hdl, &smp_sas_wwn, &smp_node->m_deviceinfo,
16223 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure,
16224 &io_flags);
16225 if (rval != DDI_SUCCESS) {
16226 mutex_exit(&mpt->m_mutex);
16227 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
16228 "device info for %x", page_address);
16229 ndi_rtn = NDI_FAILURE;
16230 goto smp_create_done;
16231 }
16232 mutex_exit(&mpt->m_mutex);
16233
16234 /*
16235 * If this smp direct attached to the controller
16236 * set the attached-port to the base wwid
16237 */
16238 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
16239 != DEVINFO_DIRECT_ATTACHED) {
16240 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
16241 sas_wwn);
16242 } else {
16243 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
16244 mpt->un.m_base_wwid);
16245 }
16246
16247 if (ndi_prop_update_string(DDI_DEV_T_NONE,
16248 *smp_dip, SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwn_str) !=
16249 DDI_PROP_SUCCESS) {
16250 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
16251 "property for smp attached-port %s (sas_wwn)",
16252 attached_wwn_str);
16253 ndi_rtn = NDI_FAILURE;
16254 goto smp_create_done;
16255 }
16256
16257 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
16258 *smp_dip, SMP_PROP) != DDI_PROP_SUCCESS) {
16259 mptsas_log(mpt, CE_WARN, "mptsas unable to "
16260 "create property for SMP %s (SMP_PROP) ",
16261 wwn_str);
16262 ndi_rtn = NDI_FAILURE;
16263 goto smp_create_done;
16264 }
16265
16266 /*
16267 * check the smp to see whether it direct
16268 * attached to the controller
16269 */
16270 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
16271 != DEVINFO_DIRECT_ATTACHED) {
16272 goto smp_create_done;
16273 }
16274 numphys = ddi_prop_get_int(DDI_DEV_T_ANY, pdip,
16275 DDI_PROP_DONTPASS, MPTSAS_NUM_PHYS, -1);
16276 if (numphys > 0) {
16277 goto smp_create_done;
16278 }
16279 /*
16280 * this iport is an old iport, we need to
16281 * reconfig the props for it.
16282 */
16283 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
16284 MPTSAS_VIRTUAL_PORT, 0) !=
16285 DDI_PROP_SUCCESS) {
16286 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
16287 MPTSAS_VIRTUAL_PORT);
16288 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
16289 "prop update failed");
16290 goto smp_create_done;
16291 }
16292
16293 mutex_enter(&mpt->m_mutex);
16294 numphys = 0;
16295 iport = ddi_get_name_addr(pdip);
16296 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
16297 bzero(phymask, sizeof (phymask));
16298 (void) sprintf(phymask,
16299 "%x", mpt->m_phy_info[i].phy_mask);
16300 if (strcmp(phymask, iport) == 0) {
16301 phy_mask = mpt->m_phy_info[i].phy_mask;
16302 break;
16303 }
16304 }
16305
16306 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
16307 if ((phy_mask >> i) & 0x01) {
16308 numphys++;
16309 }
16310 }
16311 /*
16312 * Update PHY info for smhba
16313 */
16314 if (mptsas_smhba_phy_init(mpt)) {
16315 mutex_exit(&mpt->m_mutex);
16316 mptsas_log(mpt, CE_WARN, "mptsas phy update "
16317 "failed");
16318 goto smp_create_done;
16319 }
16320 mutex_exit(&mpt->m_mutex);
16321
16322 mptsas_smhba_set_all_phy_props(mpt, pdip, numphys, phy_mask,
16323 &attached_devhdl);
16324
16325 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
16326 MPTSAS_NUM_PHYS, numphys) !=
16327 DDI_PROP_SUCCESS) {
16328 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
16329 MPTSAS_NUM_PHYS);
16330 mptsas_log(mpt, CE_WARN, "mptsas update "
16331 "num phys props failed");
16332 goto smp_create_done;
16333 }
16334 /*
16335 * Add parent's props for SMHBA support
16336 */
16337 if (ddi_prop_update_string(DDI_DEV_T_NONE, pdip,
16338 SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
16339 DDI_PROP_SUCCESS) {
16340 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
16341 SCSI_ADDR_PROP_ATTACHED_PORT);
16342 mptsas_log(mpt, CE_WARN, "mptsas update iport"
16343 "attached-port failed");
16344 goto smp_create_done;
16345 }
16346
16347 smp_create_done:
16348 /*
16349 * If props were setup ok, online the lun
16350 */
16351 if (ndi_rtn == NDI_SUCCESS) {
16352 /*
16353 * Try to online the new node
16354 */
16355 ndi_rtn = ndi_devi_online(*smp_dip, NDI_ONLINE_ATTACH);
16356 }
16357
16358 /*
16359 * If success set rtn flag, else unwire alloc'd lun
16360 */
16361 if (ndi_rtn != NDI_SUCCESS) {
16362 NDBG12(("mptsas unable to online "
16363 "SMP target %s", wwn_str));
16364 ndi_prop_remove_all(*smp_dip);
16365 (void) ndi_devi_free(*smp_dip);
16366 }
16367 }
16368
16369 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
16370 }
16371
16372 /* smp transport routine */
16373 static int mptsas_smp_start(struct smp_pkt *smp_pkt)
16374 {
16375 uint64_t wwn;
16376 Mpi2SmpPassthroughRequest_t req;
16377 Mpi2SmpPassthroughReply_t rep;
16378 uint32_t direction = 0;
16379 mptsas_t *mpt;
16380 int ret;
16381 uint64_t tmp64;
16382
16383 mpt = (mptsas_t *)smp_pkt->smp_pkt_address->
16384 smp_a_hba_tran->smp_tran_hba_private;
16385
16386 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
16387 /*
16388 * Need to compose a SMP request message
16389 * and call mptsas_do_passthru() function
16390 */
16391 bzero(&req, sizeof (req));
16392 bzero(&rep, sizeof (rep));
16393 req.PassthroughFlags = 0;
16394 req.PhysicalPort = 0xff;
16395 req.ChainOffset = 0;
16396 req.Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
16397
16398 if ((smp_pkt->smp_pkt_reqsize & 0xffff0000ul) != 0) {
16399 smp_pkt->smp_pkt_reason = ERANGE;
16400 return (DDI_FAILURE);
16401 }
16402 req.RequestDataLength = LE_16((uint16_t)(smp_pkt->smp_pkt_reqsize - 4));
16403
16404 req.MsgFlags = 0;
16405 tmp64 = LE_64(wwn);
16406 bcopy(&tmp64, &req.SASAddress, SAS_WWN_BYTE_SIZE);
16407 if (smp_pkt->smp_pkt_rspsize > 0) {
16408 direction |= MPTSAS_PASS_THRU_DIRECTION_READ;
16409 }
16410 if (smp_pkt->smp_pkt_reqsize > 0) {
16411 direction |= MPTSAS_PASS_THRU_DIRECTION_WRITE;
16412 }
16413
16414 mutex_enter(&mpt->m_mutex);
16415 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep,
16416 (uint8_t *)smp_pkt->smp_pkt_rsp,
16417 offsetof(Mpi2SmpPassthroughRequest_t, SGL), sizeof (rep),
16418 smp_pkt->smp_pkt_rspsize - 4, direction,
16419 (uint8_t *)smp_pkt->smp_pkt_req, smp_pkt->smp_pkt_reqsize - 4,
16420 smp_pkt->smp_pkt_timeout, FKIOCTL);
16421 mutex_exit(&mpt->m_mutex);
16422 if (ret != 0) {
16423 cmn_err(CE_WARN, "smp_start do passthru error %d", ret);
16424 smp_pkt->smp_pkt_reason = (uchar_t)(ret);
16425 return (DDI_FAILURE);
16426 }
16427 /* do passthrough success, check the smp status */
16428 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
16429 switch (LE_16(rep.IOCStatus)) {
16430 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
16431 smp_pkt->smp_pkt_reason = ENODEV;
16432 break;
16433 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
16434 smp_pkt->smp_pkt_reason = EOVERFLOW;
16435 break;
16436 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
16437 smp_pkt->smp_pkt_reason = EIO;
16438 break;
16439 default:
16440 mptsas_log(mpt, CE_NOTE, "smp_start: get unknown ioc"
16441 "status:%x", LE_16(rep.IOCStatus));
16442 smp_pkt->smp_pkt_reason = EIO;
16443 break;
16444 }
16445 return (DDI_FAILURE);
16446 }
16447 if (rep.SASStatus != MPI2_SASSTATUS_SUCCESS) {
16448 mptsas_log(mpt, CE_NOTE, "smp_start: get error SAS status:%x",
16449 rep.SASStatus);
16450 smp_pkt->smp_pkt_reason = EIO;
16451 return (DDI_FAILURE);
16452 }
16453
16454 return (DDI_SUCCESS);
16455 }
16456
16457 /*
16458 * If we didn't get a match, we need to get sas page0 for each device, and
16459 * untill we get a match. If failed, return NULL
16460 */
16461 static mptsas_target_t *
16462 mptsas_phy_to_tgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint8_t phy)
16463 {
16464 int i, j = 0;
16465 int rval = 0;
16466 uint16_t cur_handle;
16467 uint32_t page_address;
16468 mptsas_target_t *ptgt = NULL;
16469
16470 /*
16471 * PHY named device must be direct attached and attaches to
16472 * narrow port, if the iport is not parent of the device which
16473 * we are looking for.
16474 */
16475 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
16476 if ((1 << i) & phymask)
16477 j++;
16478 }
16479
16480 if (j > 1)
16481 return (NULL);
16482
16483 /*
16484 * Must be a narrow port and single device attached to the narrow port
16485 * So the physical port num of device which is equal to the iport's
16486 * port num is the device what we are looking for.
16487 */
16488
16489 if (mpt->m_phy_info[phy].phy_mask != phymask)
16490 return (NULL);
16491
16492 mutex_enter(&mpt->m_mutex);
16493
16494 ptgt = refhash_linear_search(mpt->m_targets, mptsas_target_eval_nowwn,
16495 &phy);
16496 if (ptgt != NULL) {
16497 mutex_exit(&mpt->m_mutex);
16498 return (ptgt);
16499 }
16500
16501 if (mpt->m_done_traverse_dev) {
16502 mutex_exit(&mpt->m_mutex);
16503 return (NULL);
16504 }
16505
16506 /* If didn't get a match, come here */
16507 cur_handle = mpt->m_dev_handle;
16508 for (; ; ) {
16509 ptgt = NULL;
16510 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
16511 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)cur_handle;
16512 rval = mptsas_get_target_device_info(mpt, page_address,
16513 &cur_handle, &ptgt);
16514 if ((rval == DEV_INFO_FAIL_PAGE0) ||
16515 (rval == DEV_INFO_FAIL_ALLOC) ||
16516 (rval == DEV_INFO_FAIL_GUID)) {
16517 break;
16518 }
16519 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
16520 (rval == DEV_INFO_PHYS_DISK)) {
16521 continue;
16522 }
16523 mpt->m_dev_handle = cur_handle;
16524
16525 if ((ptgt->m_addr.mta_wwn == 0) && (ptgt->m_phynum == phy)) {
16526 break;
16527 }
16528 }
16529
16530 mutex_exit(&mpt->m_mutex);
16531 return (ptgt);
16532 }
16533
16534 /*
16535 * The ptgt->m_addr.mta_wwn contains the wwid for each disk.
16536 * For Raid volumes, we need to check m_raidvol[x].m_raidwwid
16537 * If we didn't get a match, we need to get sas page0 for each device, and
16538 * untill we get a match
16539 * If failed, return NULL
16540 */
16541 static mptsas_target_t *
16542 mptsas_wwid_to_ptgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
16543 {
16544 int rval = 0;
16545 uint16_t cur_handle;
16546 uint32_t page_address;
16547 mptsas_target_t *tmp_tgt = NULL;
16548 mptsas_target_addr_t addr;
16549
16550 addr.mta_wwn = wwid;
16551 addr.mta_phymask = phymask;
16552 mutex_enter(&mpt->m_mutex);
16553 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
16554 if (tmp_tgt != NULL) {
16555 mutex_exit(&mpt->m_mutex);
16556 return (tmp_tgt);
16557 }
16558
16559 if (phymask == 0) {
16560 /*
16561 * It's IR volume
16562 */
16563 rval = mptsas_get_raid_info(mpt);
16564 if (rval) {
16565 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
16566 }
16567 mutex_exit(&mpt->m_mutex);
16568 return (tmp_tgt);
16569 }
16570
16571 if (mpt->m_done_traverse_dev) {
16572 mutex_exit(&mpt->m_mutex);
16573 return (NULL);
16574 }
16575
16576 /* If didn't get a match, come here */
16577 cur_handle = mpt->m_dev_handle;
16578 for (;;) {
16579 tmp_tgt = NULL;
16580 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
16581 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | cur_handle;
16582 rval = mptsas_get_target_device_info(mpt, page_address,
16583 &cur_handle, &tmp_tgt);
16584 if ((rval == DEV_INFO_FAIL_PAGE0) ||
16585 (rval == DEV_INFO_FAIL_ALLOC) ||
16586 (rval == DEV_INFO_FAIL_GUID)) {
16587 tmp_tgt = NULL;
16588 break;
16589 }
16590 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
16591 (rval == DEV_INFO_PHYS_DISK)) {
16592 continue;
16593 }
16594 mpt->m_dev_handle = cur_handle;
16595 if ((tmp_tgt->m_addr.mta_wwn) &&
16596 (tmp_tgt->m_addr.mta_wwn == wwid) &&
16597 (tmp_tgt->m_addr.mta_phymask == phymask)) {
16598 break;
16599 }
16600 }
16601
16602 mutex_exit(&mpt->m_mutex);
16603 return (tmp_tgt);
16604 }
16605
16606 static mptsas_smp_t *
16607 mptsas_wwid_to_psmp(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
16608 {
16609 int rval = 0;
16610 uint16_t cur_handle;
16611 uint32_t page_address;
16612 mptsas_smp_t smp_node, *psmp = NULL;
16613 mptsas_target_addr_t addr;
16614
16615 addr.mta_wwn = wwid;
16616 addr.mta_phymask = phymask;
16617 mutex_enter(&mpt->m_mutex);
16618 psmp = refhash_lookup(mpt->m_smp_targets, &addr);
16619 if (psmp != NULL) {
16620 mutex_exit(&mpt->m_mutex);
16621 return (psmp);
16622 }
16623
16624 if (mpt->m_done_traverse_smp) {
16625 mutex_exit(&mpt->m_mutex);
16626 return (NULL);
16627 }
16628
16629 /* If didn't get a match, come here */
16630 cur_handle = mpt->m_smp_devhdl;
16631 for (;;) {
16632 psmp = NULL;
16633 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
16634 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)cur_handle;
16635 rval = mptsas_get_sas_expander_page0(mpt, page_address,
16636 &smp_node);
16637 if (rval != DDI_SUCCESS) {
16638 break;
16639 }
16640 mpt->m_smp_devhdl = cur_handle = smp_node.m_devhdl;
16641 psmp = mptsas_smp_alloc(mpt, &smp_node);
16642 ASSERT(psmp);
16643 if ((psmp->m_addr.mta_wwn) && (psmp->m_addr.mta_wwn == wwid) &&
16644 (psmp->m_addr.mta_phymask == phymask)) {
16645 break;
16646 }
16647 }
16648
16649 mutex_exit(&mpt->m_mutex);
16650 return (psmp);
16651 }
16652
16653 mptsas_target_t *
16654 mptsas_tgt_alloc(refhash_t *refhash, uint16_t devhdl, uint64_t wwid,
16655 uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum)
16656 {
16657 mptsas_target_t *tmp_tgt = NULL;
16658 mptsas_target_addr_t addr;
16659
16660 addr.mta_wwn = wwid;
16661 addr.mta_phymask = phymask;
16662 tmp_tgt = refhash_lookup(refhash, &addr);
16663 if (tmp_tgt != NULL) {
16664 NDBG20(("Hash item already exist"));
16665 tmp_tgt->m_deviceinfo = devinfo;
16666 tmp_tgt->m_devhdl = devhdl; /* XXX - duplicate? */
16667 return (tmp_tgt);
16668 }
16669 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
16670 if (tmp_tgt == NULL) {
16671 cmn_err(CE_WARN, "Fatal, allocated tgt failed");
16672 return (NULL);
16673 }
16674 tmp_tgt->m_devhdl = devhdl;
16675 tmp_tgt->m_addr.mta_wwn = wwid;
16676 tmp_tgt->m_deviceinfo = devinfo;
16677 tmp_tgt->m_addr.mta_phymask = phymask;
16678 tmp_tgt->m_phynum = phynum;
16679 /* Initialized the tgt structure */
16680 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
16681 tmp_tgt->m_qfull_retry_interval =
16682 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
16683 tmp_tgt->m_t_throttle = MAX_THROTTLE;
16684 TAILQ_INIT(&tmp_tgt->m_active_cmdq);
16685
16686 refhash_insert(refhash, tmp_tgt);
16687
16688 return (tmp_tgt);
16689 }
16690
16691 static void
16692 mptsas_smp_target_copy(mptsas_smp_t *src, mptsas_smp_t *dst)
16693 {
16694 dst->m_devhdl = src->m_devhdl;
16695 dst->m_deviceinfo = src->m_deviceinfo;
16696 dst->m_pdevhdl = src->m_pdevhdl;
16697 dst->m_pdevinfo = src->m_pdevinfo;
16698 }
16699
16700 static mptsas_smp_t *
16701 mptsas_smp_alloc(mptsas_t *mpt, mptsas_smp_t *data)
16702 {
16703 mptsas_target_addr_t addr;
16704 mptsas_smp_t *ret_data;
16705
16706 addr.mta_wwn = data->m_addr.mta_wwn;
16707 addr.mta_phymask = data->m_addr.mta_phymask;
16708 ret_data = refhash_lookup(mpt->m_smp_targets, &addr);
16709 /*
16710 * If there's already a matching SMP target, update its fields
16711 * in place. Since the address is not changing, it's safe to do
16712 * this. We cannot just bcopy() here because the structure we've
16713 * been given has invalid hash links.
16714 */
16715 if (ret_data != NULL) {
16716 mptsas_smp_target_copy(data, ret_data);
16717 return (ret_data);
16718 }
16719
16720 ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
16721 bcopy(data, ret_data, sizeof (mptsas_smp_t));
16722 refhash_insert(mpt->m_smp_targets, ret_data);
16723 return (ret_data);
16724 }
16725
16726 /*
16727 * Functions for SGPIO LED support
16728 */
16729 static dev_info_t *
16730 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
16731 {
16732 dev_info_t *dip;
16733 int prop;
16734 dip = e_ddi_hold_devi_by_dev(dev, 0);
16735 if (dip == NULL)
16736 return (dip);
16737 prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
16738 "phymask", 0);
16739 *phymask = (mptsas_phymask_t)prop;
16740 ddi_release_devi(dip);
16741 return (dip);
16742 }
16743 static mptsas_target_t *
16744 mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask)
16745 {
16746 uint8_t phynum;
16747 uint64_t wwn;
16748 int lun;
16749 mptsas_target_t *ptgt = NULL;
16750
16751 if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) {
16752 return (NULL);
16753 }
16754 if (addr[0] == 'w') {
16755 ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn);
16756 } else {
16757 ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum);
16758 }
16759 return (ptgt);
16760 }
16761
16762 static int
16763 mptsas_flush_led_status(mptsas_t *mpt, mptsas_enclosure_t *mep, uint16_t idx)
16764 {
16765 uint32_t slotstatus = 0;
16766
16767 ASSERT3U(idx, <, mep->me_nslots);
16768
16769 /* Build an MPI2 Slot Status based on our view of the world */
16770 if (mep->me_slotleds[idx] & (1 << (MPTSAS_LEDCTL_LED_IDENT - 1)))
16771 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST;
16772 if (mep->me_slotleds[idx] & (1 << (MPTSAS_LEDCTL_LED_FAIL - 1)))
16773 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT;
16774 if (mep->me_slotleds[idx] & (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1)))
16775 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
16776
16777 /* Write it to the controller */
16778 NDBG14(("mptsas_ioctl: set LED status %x for slot %x",
16779 slotstatus, idx + mep->me_fslot));
16780 return (mptsas_send_sep(mpt, mep, idx, &slotstatus,
16781 MPI2_SEP_REQ_ACTION_WRITE_STATUS));
16782 }
16783
16784 /*
16785 * send sep request, use enclosure/slot addressing
16786 */
16787 static int
16788 mptsas_send_sep(mptsas_t *mpt, mptsas_enclosure_t *mep, uint16_t idx,
16789 uint32_t *status, uint8_t act)
16790 {
16791 Mpi2SepRequest_t req;
16792 Mpi2SepReply_t rep;
16793 int ret;
16794 uint16_t enctype;
16795 uint16_t slot;
16796
16797 ASSERT(mutex_owned(&mpt->m_mutex));
16798
16799 /*
16800 * Look through the enclosures and make sure that this enclosure is
16801 * something that is directly attached device. If we didn't find an
16802 * enclosure for this device, don't send the ioctl.
16803 */
16804 enctype = mep->me_flags & MPI2_SAS_ENCLS0_FLAGS_MNG_MASK;
16805 if (enctype != MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES &&
16806 enctype != MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO &&
16807 enctype != MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO) {
16808 return (ENOTTY);
16809 }
16810 slot = idx + mep->me_fslot;
16811
16812 bzero(&req, sizeof (req));
16813 bzero(&rep, sizeof (rep));
16814
16815 req.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
16816 req.Action = act;
16817 req.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
16818 req.EnclosureHandle = LE_16(mep->me_enchdl);
16819 req.Slot = LE_16(slot);
16820 if (act == MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16821 req.SlotStatus = LE_32(*status);
16822 }
16823 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
16824 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
16825 if (ret != 0) {
16826 mptsas_log(mpt, CE_NOTE, "mptsas_send_sep: passthru SEP "
16827 "Processor Request message error %d", ret);
16828 return (ret);
16829 }
16830 /* do passthrough success, check the ioc status */
16831 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
16832 mptsas_log(mpt, CE_NOTE, "send_sep act %x: ioc "
16833 "status:%x loginfo %x", act, LE_16(rep.IOCStatus),
16834 LE_32(rep.IOCLogInfo));
16835 switch (LE_16(rep.IOCStatus) & MPI2_IOCSTATUS_MASK) {
16836 case MPI2_IOCSTATUS_INVALID_FUNCTION:
16837 case MPI2_IOCSTATUS_INVALID_VPID:
16838 case MPI2_IOCSTATUS_INVALID_FIELD:
16839 case MPI2_IOCSTATUS_INVALID_STATE:
16840 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
16841 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
16842 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
16843 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
16844 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
16845 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
16846 return (EINVAL);
16847 case MPI2_IOCSTATUS_BUSY:
16848 return (EBUSY);
16849 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
16850 return (EAGAIN);
16851 case MPI2_IOCSTATUS_INVALID_SGL:
16852 case MPI2_IOCSTATUS_INTERNAL_ERROR:
16853 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
16854 default:
16855 return (EIO);
16856 }
16857 }
16858 if (act != MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16859 *status = LE_32(rep.SlotStatus);
16860 }
16861
16862 return (0);
16863 }
16864
16865 int
16866 mptsas_dma_addr_create(mptsas_t *mpt, ddi_dma_attr_t dma_attr,
16867 ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp, caddr_t *dma_memp,
16868 uint32_t alloc_size, ddi_dma_cookie_t *cookiep)
16869 {
16870 ddi_dma_cookie_t new_cookie;
16871 size_t alloc_len;
16872 uint_t ncookie;
16873
16874 if (cookiep == NULL)
16875 cookiep = &new_cookie;
16876
16877 if (ddi_dma_alloc_handle(mpt->m_dip, &dma_attr, DDI_DMA_SLEEP,
16878 NULL, dma_hdp) != DDI_SUCCESS) {
16879 return (FALSE);
16880 }
16881
16882 if (ddi_dma_mem_alloc(*dma_hdp, alloc_size, &mpt->m_dev_acc_attr,
16883 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, dma_memp, &alloc_len,
16884 acc_hdp) != DDI_SUCCESS) {
16885 ddi_dma_free_handle(dma_hdp);
16886 *dma_hdp = NULL;
16887 return (FALSE);
16888 }
16889
16890 if (ddi_dma_addr_bind_handle(*dma_hdp, NULL, *dma_memp, alloc_len,
16891 (DDI_DMA_RDWR | DDI_DMA_CONSISTENT), DDI_DMA_SLEEP, NULL,
16892 cookiep, &ncookie) != DDI_DMA_MAPPED) {
16893 (void) ddi_dma_mem_free(acc_hdp);
16894 ddi_dma_free_handle(dma_hdp);
16895 *dma_hdp = NULL;
16896 return (FALSE);
16897 }
16898
16899 return (TRUE);
16900 }
16901
16902 void
16903 mptsas_dma_addr_destroy(ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp)
16904 {
16905 if (*dma_hdp == NULL)
16906 return;
16907
16908 (void) ddi_dma_unbind_handle(*dma_hdp);
16909 (void) ddi_dma_mem_free(acc_hdp);
16910 ddi_dma_free_handle(dma_hdp);
16911 *dma_hdp = NULL;
16912 }