Print this page
OS-1997 mpt_sas: expose LED controls to libtopo
Reviewed by: Keith Wesolowski <keith.wesolowski@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c
+++ new/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
25 25 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
26 26 */
27 27
28 28 /*
29 29 * Copyright (c) 2000 to 2010, LSI Corporation.
30 30 * All rights reserved.
31 31 *
32 32 * Redistribution and use in source and binary forms of all code within
33 33 * this file that is exclusively owned by LSI, with or without
34 34 * modification, is permitted provided that, in addition to the CDDL 1.0
35 35 * License requirements, the following conditions are met:
36 36 *
37 37 * Neither the name of the author nor the names of its contributors may be
38 38 * used to endorse or promote products derived from this software without
39 39 * specific prior written permission.
40 40 *
41 41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
44 44 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
45 45 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
46 46 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
47 47 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
48 48 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
49 49 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
50 50 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
51 51 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
52 52 * DAMAGE.
53 53 */
54 54
55 55 /*
56 56 * mptsas - This is a driver based on LSI Logic's MPT2.0 interface.
57 57 *
58 58 */
59 59
60 60 #if defined(lint) || defined(DEBUG)
61 61 #define MPTSAS_DEBUG
62 62 #endif
63 63
64 64 /*
65 65 * standard header files.
66 66 */
67 67 #include <sys/note.h>
68 68 #include <sys/scsi/scsi.h>
69 69 #include <sys/pci.h>
70 70 #include <sys/file.h>
71 71 #include <sys/cpuvar.h>
72 72 #include <sys/policy.h>
73 73 #include <sys/model.h>
74 74 #include <sys/sysevent.h>
75 75 #include <sys/sysevent/eventdefs.h>
76 76 #include <sys/sysevent/dr.h>
77 77 #include <sys/sata/sata_defs.h>
78 78 #include <sys/scsi/generic/sas.h>
79 79 #include <sys/scsi/impl/scsi_sas.h>
80 80
81 81 #pragma pack(1)
82 82 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
83 83 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
84 84 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
85 85 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
86 86 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
87 87 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
88 88 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
89 89 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
90 90 #pragma pack()
91 91
92 92 /*
93 93 * private header files.
94 94 *
95 95 */
96 96 #include <sys/scsi/impl/scsi_reset_notify.h>
97 97 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
98 98 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
99 99 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
100 100
101 101 #include <sys/raidioctl.h>
102 102
103 103 #include <sys/fs/dv_node.h> /* devfs_clean */
104 104
105 105 /*
106 106 * FMA header files
107 107 */
108 108 #include <sys/ddifm.h>
109 109 #include <sys/fm/protocol.h>
110 110 #include <sys/fm/util.h>
111 111 #include <sys/fm/io/ddi.h>
112 112
113 113 /*
114 114 * For anyone who would modify the code in mptsas_driver, it must be awared
115 115 * that from snv_145 where CR6910752(mpt_sas driver performance can be
116 116 * improved) is integrated, the per_instance mutex m_mutex is not hold
117 117 * in the key IO code path, including mptsas_scsi_start(), mptsas_intr()
118 118 * and all of the recursive functions called in them, so don't
119 119 * make it for granted that all operations are sync/exclude correctly. Before
120 120 * doing any modification in key code path, and even other code path such as
121 121 * DR, watchsubr, ioctl, passthrough etc, make sure the elements modified have
122 122 * no releationship to elements shown in the fastpath
123 123 * (function mptsas_handle_io_fastpath()) in ISR and its recursive functions.
124 124 * otherwise, you have to use the new introduced mutex to protect them.
125 125 * As to how to do correctly, refer to the comments in mptsas_intr().
126 126 */
127 127
128 128 /*
129 129 * autoconfiguration data and routines.
130 130 */
131 131 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
132 132 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
133 133 static int mptsas_power(dev_info_t *dip, int component, int level);
134 134
135 135 /*
136 136 * cb_ops function
137 137 */
138 138 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
139 139 cred_t *credp, int *rval);
140 140 #ifdef __sparc
141 141 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd);
142 142 #else /* __sparc */
143 143 static int mptsas_quiesce(dev_info_t *devi);
144 144 #endif /* __sparc */
145 145
146 146 /*
147 147 * Resource initilaization for hardware
148 148 */
149 149 static void mptsas_setup_cmd_reg(mptsas_t *mpt);
150 150 static void mptsas_disable_bus_master(mptsas_t *mpt);
151 151 static void mptsas_hba_fini(mptsas_t *mpt);
152 152 static void mptsas_cfg_fini(mptsas_t *mptsas_blkp);
153 153 static int mptsas_hba_setup(mptsas_t *mpt);
154 154 static void mptsas_hba_teardown(mptsas_t *mpt);
155 155 static int mptsas_config_space_init(mptsas_t *mpt);
156 156 static void mptsas_config_space_fini(mptsas_t *mpt);
157 157 static void mptsas_iport_register(mptsas_t *mpt);
158 158 static int mptsas_smp_setup(mptsas_t *mpt);
159 159 static void mptsas_smp_teardown(mptsas_t *mpt);
160 160 static int mptsas_cache_create(mptsas_t *mpt);
161 161 static void mptsas_cache_destroy(mptsas_t *mpt);
162 162 static int mptsas_alloc_request_frames(mptsas_t *mpt);
163 163 static int mptsas_alloc_reply_frames(mptsas_t *mpt);
164 164 static int mptsas_alloc_free_queue(mptsas_t *mpt);
165 165 static int mptsas_alloc_post_queue(mptsas_t *mpt);
166 166 static void mptsas_alloc_reply_args(mptsas_t *mpt);
167 167 static int mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
168 168 static void mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
169 169 static int mptsas_init_chip(mptsas_t *mpt, int first_time);
170 170
171 171 /*
172 172 * SCSA function prototypes
173 173 */
174 174 static int mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
175 175 static int mptsas_scsi_reset(struct scsi_address *ap, int level);
176 176 static int mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
177 177 static int mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
178 178 static int mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
179 179 int tgtonly);
180 180 static void mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
181 181 static struct scsi_pkt *mptsas_scsi_init_pkt(struct scsi_address *ap,
182 182 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
183 183 int tgtlen, int flags, int (*callback)(), caddr_t arg);
184 184 static void mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
185 185 static void mptsas_scsi_destroy_pkt(struct scsi_address *ap,
186 186 struct scsi_pkt *pkt);
187 187 static int mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
188 188 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
189 189 static void mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
190 190 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
191 191 static int mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
192 192 void (*callback)(caddr_t), caddr_t arg);
193 193 static int mptsas_get_name(struct scsi_device *sd, char *name, int len);
194 194 static int mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len);
195 195 static int mptsas_scsi_quiesce(dev_info_t *dip);
196 196 static int mptsas_scsi_unquiesce(dev_info_t *dip);
197 197 static int mptsas_bus_config(dev_info_t *pdip, uint_t flags,
198 198 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
199 199
200 200 /*
201 201 * SMP functions
202 202 */
203 203 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
204 204
205 205 /*
206 206 * internal function prototypes.
207 207 */
208 208 static void mptsas_list_add(mptsas_t *mpt);
209 209 static void mptsas_list_del(mptsas_t *mpt);
210 210
211 211 static int mptsas_quiesce_bus(mptsas_t *mpt);
212 212 static int mptsas_unquiesce_bus(mptsas_t *mpt);
213 213
214 214 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
215 215 static void mptsas_free_handshake_msg(mptsas_t *mpt);
216 216
217 217 static void mptsas_ncmds_checkdrain(void *arg);
218 218
219 219 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
220 220 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
221 221
222 222 static int mptsas_do_detach(dev_info_t *dev);
223 223 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
224 224 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
225 225 struct scsi_pkt *pkt);
226 226 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp);
227 227
228 228 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
229 229 static void mptsas_handle_event(void *args);
230 230 static int mptsas_handle_event_sync(void *args);
231 231 static void mptsas_handle_dr(void *args);
232 232 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
233 233 dev_info_t *pdip);
234 234
235 235 static void mptsas_restart_cmd(void *);
236 236
237 237 static void mptsas_flush_hba(mptsas_t *mpt);
238 238 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
239 239 uint8_t tasktype);
240 240 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
241 241 uchar_t reason, uint_t stat);
242 242
243 243 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
244 244 static void mptsas_process_intr(mptsas_t *mpt,
245 245 pMpi2ReplyDescriptorsUnion_t reply_desc_union);
246 246 static int mptsas_handle_io_fastpath(mptsas_t *mpt, uint16_t SMID);
247 247 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
248 248 pMpi2ReplyDescriptorsUnion_t reply_desc);
249 249 static void mptsas_handle_address_reply(mptsas_t *mpt,
250 250 pMpi2ReplyDescriptorsUnion_t reply_desc);
251 251 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
252 252 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
253 253 uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
254 254
255 255 static void mptsas_watch(void *arg);
256 256 static void mptsas_watchsubr(mptsas_t *mpt);
257 257 static void mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl);
258 258
259 259 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
260 260 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
261 261 uint8_t *data, uint32_t request_size, uint32_t reply_size,
262 262 uint32_t data_size, uint32_t direction, uint8_t *dataout,
263 263 uint32_t dataout_size, short timeout, int mode);
264 264 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
265 265
266 266 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt,
267 267 uint32_t unique_id);
268 268 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd);
269 269 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt,
270 270 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
271 271 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt,
272 272 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
273 273 uint32_t diag_type);
274 274 static int mptsas_diag_register(mptsas_t *mpt,
275 275 mptsas_fw_diag_register_t *diag_register, uint32_t *return_code);
276 276 static int mptsas_diag_unregister(mptsas_t *mpt,
277 277 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
278 278 static int mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
279 279 uint32_t *return_code);
280 280 static int mptsas_diag_read_buffer(mptsas_t *mpt,
281 281 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
282 282 uint32_t *return_code, int ioctl_mode);
283 283 static int mptsas_diag_release(mptsas_t *mpt,
284 284 mptsas_fw_diag_release_t *diag_release, uint32_t *return_code);
285 285 static int mptsas_do_diag_action(mptsas_t *mpt, uint32_t action,
286 286 uint8_t *diag_action, uint32_t length, uint32_t *return_code,
287 287 int ioctl_mode);
288 288 static int mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *data,
289 289 int mode);
290 290
291 291 static int mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
292 292 int cmdlen, int tgtlen, int statuslen, int kf);
293 293 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
294 294
295 295 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
296 296 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
297 297
298 298 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
299 299 int kmflags);
300 300 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
301 301
302 302 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
303 303 mptsas_cmd_t *cmd);
304 304 static void mptsas_check_task_mgt(mptsas_t *mpt,
305 305 pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
306 306 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
307 307 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
308 308 int *resid);
309 309
310 310 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
311 311 static void mptsas_free_active_slots(mptsas_t *mpt);
312 312 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
313 313 static int mptsas_start_cmd0(mptsas_t *mpt, mptsas_cmd_t *cmd);
314 314
315 315 static void mptsas_restart_hba(mptsas_t *mpt);
316 316
317 317 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
318 318 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);
319 319 static inline void mptsas_doneq_add0(mptsas_t *mpt, mptsas_cmd_t *cmd);
320 320 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
321 321
322 322 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
323 323 static void mptsas_doneq_empty(mptsas_t *mpt);
324 324 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
325 325
326 326 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
327 327 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
328 328
329 329 static void mptsas_start_watch_reset_delay();
330 330 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
331 331 static void mptsas_watch_reset_delay(void *arg);
332 332 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
333 333
334 334 static int mptsas_outstanding_cmds_n(mptsas_t *mpt);
335 335 /*
336 336 * helper functions
337 337 */
338 338 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
339 339
340 340 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
341 341 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
342 342 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
343 343 int lun);
344 344 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
345 345 int lun);
346 346 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
347 347 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
348 348
349 349 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
350 350 int *lun);
351 351 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
352 352
353 353 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt, int phymask,
354 354 uint8_t phy);
355 355 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask,
356 356 uint64_t wwid);
357 357 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask,
358 358 uint64_t wwid);
359 359
360 360 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
361 361 uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
362 362
363 363 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
↓ open down ↓ |
363 lines elided |
↑ open up ↑ |
364 364 uint16_t *handle, mptsas_target_t **pptgt);
365 365 static void mptsas_update_phymask(mptsas_t *mpt);
366 366 static inline void mptsas_remove_cmd0(mptsas_t *mpt, mptsas_cmd_t *cmd);
367 367
368 368 static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
369 369 uint32_t *status, uint8_t cmd);
370 370 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
371 371 mptsas_phymask_t *phymask);
372 372 static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr,
373 373 mptsas_phymask_t phymask);
374 -static int mptsas_set_led_status(mptsas_t *mpt, mptsas_target_t *ptgt,
375 - uint32_t slotstatus);
374 +static int mptsas_flush_led_status(mptsas_t *mpt, mptsas_target_t *ptgt);
376 375
377 376
378 377 /*
379 378 * Enumeration / DR functions
380 379 */
381 380 static void mptsas_config_all(dev_info_t *pdip);
382 381 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
383 382 dev_info_t **lundip);
384 383 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
385 384 dev_info_t **lundip);
386 385
387 386 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
388 387 static int mptsas_offline_target(dev_info_t *pdip, char *name);
389 388
390 389 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
391 390 dev_info_t **dip);
392 391
393 392 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
394 393 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
395 394 dev_info_t **dip, mptsas_target_t *ptgt);
396 395
397 396 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
398 397 dev_info_t **dip, mptsas_target_t *ptgt, int lun);
399 398
400 399 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
401 400 char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
402 401 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
403 402 char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
404 403 int lun);
405 404
406 405 static void mptsas_offline_missed_luns(dev_info_t *pdip,
407 406 uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
408 407 static int mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
409 408 mdi_pathinfo_t *rpip, uint_t flags);
410 409
411 410 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
412 411 dev_info_t **smp_dip);
413 412 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
414 413 uint_t flags);
415 414
416 415 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
417 416 int mode, int *rval);
418 417 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
419 418 int mode, int *rval);
420 419 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
421 420 int mode, int *rval);
422 421 static void mptsas_record_event(void *args);
423 422 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
424 423 int mode);
425 424
426 425 static void mptsas_hash_init(mptsas_hash_table_t *hashtab);
427 426 static void mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen);
428 427 static void mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data);
429 428 static void * mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1,
430 429 mptsas_phymask_t key2);
431 430 static void * mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1,
432 431 mptsas_phymask_t key2);
433 432 static void * mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos);
434 433
435 434 mptsas_target_t *mptsas_tgt_alloc(mptsas_hash_table_t *, uint16_t, uint64_t,
436 435 uint32_t, mptsas_phymask_t, uint8_t, mptsas_t *);
437 436 static mptsas_smp_t *mptsas_smp_alloc(mptsas_hash_table_t *hashtab,
438 437 mptsas_smp_t *data);
439 438 static void mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
440 439 mptsas_phymask_t phymask);
441 440 static void mptsas_tgt_free(mptsas_hash_table_t *, uint64_t, mptsas_phymask_t);
442 441 static void * mptsas_search_by_devhdl(mptsas_hash_table_t *, uint16_t);
443 442 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
444 443 dev_info_t **smp_dip);
445 444
446 445 /*
447 446 * Power management functions
448 447 */
449 448 static int mptsas_get_pci_cap(mptsas_t *mpt);
450 449 static int mptsas_init_pm(mptsas_t *mpt);
451 450
452 451 /*
453 452 * MPT MSI tunable:
454 453 *
455 454 * By default MSI is enabled on all supported platforms.
456 455 */
457 456 boolean_t mptsas_enable_msi = B_TRUE;
458 457 boolean_t mptsas_physical_bind_failed_page_83 = B_FALSE;
459 458
460 459 static int mptsas_register_intrs(mptsas_t *);
461 460 static void mptsas_unregister_intrs(mptsas_t *);
462 461 static int mptsas_add_intrs(mptsas_t *, int);
463 462 static void mptsas_rem_intrs(mptsas_t *);
464 463
465 464 /*
466 465 * FMA Prototypes
467 466 */
468 467 static void mptsas_fm_init(mptsas_t *mpt);
469 468 static void mptsas_fm_fini(mptsas_t *mpt);
470 469 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
471 470
472 471 extern pri_t minclsyspri, maxclsyspri;
473 472
474 473 /*
475 474 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
476 475 * under this device that the paths to a physical device are created when
477 476 * MPxIO is used.
478 477 */
479 478 extern dev_info_t *scsi_vhci_dip;
480 479
481 480 /*
482 481 * Tunable timeout value for Inquiry VPD page 0x83
483 482 * By default the value is 30 seconds.
484 483 */
485 484 int mptsas_inq83_retry_timeout = 30;
486 485
487 486 /*
488 487 * This is used to allocate memory for message frame storage, not for
489 488 * data I/O DMA. All message frames must be stored in the first 4G of
490 489 * physical memory.
491 490 */
492 491 ddi_dma_attr_t mptsas_dma_attrs = {
493 492 DMA_ATTR_V0, /* attribute layout version */
494 493 0x0ull, /* address low - should be 0 (longlong) */
495 494 0xffffffffull, /* address high - 32-bit max range */
496 495 0x00ffffffull, /* count max - max DMA object size */
497 496 4, /* allocation alignment requirements */
498 497 0x78, /* burstsizes - binary encoded values */
499 498 1, /* minxfer - gran. of DMA engine */
500 499 0x00ffffffull, /* maxxfer - gran. of DMA engine */
501 500 0xffffffffull, /* max segment size (DMA boundary) */
502 501 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
503 502 512, /* granularity - device transfer size */
504 503 0 /* flags, set to 0 */
505 504 };
506 505
507 506 /*
508 507 * This is used for data I/O DMA memory allocation. (full 64-bit DMA
509 508 * physical addresses are supported.)
510 509 */
511 510 ddi_dma_attr_t mptsas_dma_attrs64 = {
512 511 DMA_ATTR_V0, /* attribute layout version */
513 512 0x0ull, /* address low - should be 0 (longlong) */
514 513 0xffffffffffffffffull, /* address high - 64-bit max */
515 514 0x00ffffffull, /* count max - max DMA object size */
516 515 4, /* allocation alignment requirements */
517 516 0x78, /* burstsizes - binary encoded values */
518 517 1, /* minxfer - gran. of DMA engine */
519 518 0x00ffffffull, /* maxxfer - gran. of DMA engine */
520 519 0xffffffffull, /* max segment size (DMA boundary) */
521 520 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
522 521 512, /* granularity - device transfer size */
523 522 DDI_DMA_RELAXED_ORDERING /* flags, enable relaxed ordering */
524 523 };
525 524
526 525 ddi_device_acc_attr_t mptsas_dev_attr = {
527 526 DDI_DEVICE_ATTR_V1,
528 527 DDI_STRUCTURE_LE_ACC,
529 528 DDI_STRICTORDER_ACC,
530 529 DDI_DEFAULT_ACC
531 530 };
532 531
533 532 static struct cb_ops mptsas_cb_ops = {
534 533 scsi_hba_open, /* open */
535 534 scsi_hba_close, /* close */
536 535 nodev, /* strategy */
537 536 nodev, /* print */
538 537 nodev, /* dump */
539 538 nodev, /* read */
540 539 nodev, /* write */
541 540 mptsas_ioctl, /* ioctl */
542 541 nodev, /* devmap */
543 542 nodev, /* mmap */
544 543 nodev, /* segmap */
545 544 nochpoll, /* chpoll */
546 545 ddi_prop_op, /* cb_prop_op */
547 546 NULL, /* streamtab */
548 547 D_MP, /* cb_flag */
549 548 CB_REV, /* rev */
550 549 nodev, /* aread */
551 550 nodev /* awrite */
552 551 };
553 552
554 553 static struct dev_ops mptsas_ops = {
555 554 DEVO_REV, /* devo_rev, */
556 555 0, /* refcnt */
557 556 ddi_no_info, /* info */
558 557 nulldev, /* identify */
559 558 nulldev, /* probe */
560 559 mptsas_attach, /* attach */
561 560 mptsas_detach, /* detach */
562 561 #ifdef __sparc
563 562 mptsas_reset,
564 563 #else
565 564 nodev, /* reset */
566 565 #endif /* __sparc */
567 566 &mptsas_cb_ops, /* driver operations */
568 567 NULL, /* bus operations */
569 568 mptsas_power, /* power management */
570 569 #ifdef __sparc
571 570 ddi_quiesce_not_needed
572 571 #else
573 572 mptsas_quiesce /* quiesce */
574 573 #endif /* __sparc */
575 574 };
576 575
577 576
578 577 #define MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24"
579 578
580 579 static struct modldrv modldrv = {
581 580 &mod_driverops, /* Type of module. This one is a driver */
582 581 MPTSAS_MOD_STRING, /* Name of the module. */
583 582 &mptsas_ops, /* driver ops */
584 583 };
585 584
586 585 static struct modlinkage modlinkage = {
587 586 MODREV_1, &modldrv, NULL
588 587 };
589 588 #define TARGET_PROP "target"
590 589 #define LUN_PROP "lun"
591 590 #define LUN64_PROP "lun64"
592 591 #define SAS_PROP "sas-mpt"
593 592 #define MDI_GUID "wwn"
594 593 #define NDI_GUID "guid"
595 594 #define MPTSAS_DEV_GONE "mptsas_dev_gone"
596 595
597 596 /*
598 597 * Local static data
599 598 */
600 599 #if defined(MPTSAS_DEBUG)
601 600 uint32_t mptsas_debug_flags = 0;
602 601 #endif /* defined(MPTSAS_DEBUG) */
603 602 uint32_t mptsas_debug_resets = 0;
604 603
605 604 static kmutex_t mptsas_global_mutex;
606 605 static void *mptsas_state; /* soft state ptr */
607 606 static krwlock_t mptsas_global_rwlock;
608 607
609 608 static kmutex_t mptsas_log_mutex;
610 609 static char mptsas_log_buf[256];
611 610 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex, mptsas_log_buf))
612 611
613 612 static mptsas_t *mptsas_head, *mptsas_tail;
614 613 static clock_t mptsas_scsi_watchdog_tick;
615 614 static clock_t mptsas_tick;
616 615 static timeout_id_t mptsas_reset_watch;
617 616 static timeout_id_t mptsas_timeout_id;
618 617 static int mptsas_timeouts_enabled = 0;
619 618 /*
620 619 * warlock directives
621 620 */
622 621 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \
623 622 mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status))
624 623 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt))
625 624 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
626 625 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private))
627 626 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private))
628 627
629 628 /*
630 629 * SM - HBA statics
631 630 */
632 631 char *mptsas_driver_rev = MPTSAS_MOD_STRING;
633 632
634 633 #ifdef MPTSAS_DEBUG
635 634 void debug_enter(char *);
636 635 #endif
637 636
638 637 /*
639 638 * Notes:
640 639 * - scsi_hba_init(9F) initializes SCSI HBA modules
641 640 * - must call scsi_hba_fini(9F) if modload() fails
642 641 */
643 642 int
644 643 _init(void)
645 644 {
646 645 int status;
647 646 /* CONSTCOND */
648 647 ASSERT(NO_COMPETING_THREADS);
649 648
650 649 NDBG0(("_init"));
651 650
652 651 status = ddi_soft_state_init(&mptsas_state, MPTSAS_SIZE,
653 652 MPTSAS_INITIAL_SOFT_SPACE);
654 653 if (status != 0) {
655 654 return (status);
656 655 }
657 656
658 657 if ((status = scsi_hba_init(&modlinkage)) != 0) {
659 658 ddi_soft_state_fini(&mptsas_state);
660 659 return (status);
661 660 }
662 661
663 662 mutex_init(&mptsas_global_mutex, NULL, MUTEX_DRIVER, NULL);
664 663 rw_init(&mptsas_global_rwlock, NULL, RW_DRIVER, NULL);
665 664 mutex_init(&mptsas_log_mutex, NULL, MUTEX_DRIVER, NULL);
666 665
667 666 if ((status = mod_install(&modlinkage)) != 0) {
668 667 mutex_destroy(&mptsas_log_mutex);
669 668 rw_destroy(&mptsas_global_rwlock);
670 669 mutex_destroy(&mptsas_global_mutex);
671 670 ddi_soft_state_fini(&mptsas_state);
672 671 scsi_hba_fini(&modlinkage);
673 672 }
674 673
675 674 return (status);
676 675 }
677 676
678 677 /*
679 678 * Notes:
680 679 * - scsi_hba_fini(9F) uninitializes SCSI HBA modules
681 680 */
682 681 int
683 682 _fini(void)
684 683 {
685 684 int status;
686 685 /* CONSTCOND */
687 686 ASSERT(NO_COMPETING_THREADS);
688 687
689 688 NDBG0(("_fini"));
690 689
691 690 if ((status = mod_remove(&modlinkage)) == 0) {
692 691 ddi_soft_state_fini(&mptsas_state);
693 692 scsi_hba_fini(&modlinkage);
694 693 mutex_destroy(&mptsas_global_mutex);
695 694 rw_destroy(&mptsas_global_rwlock);
696 695 mutex_destroy(&mptsas_log_mutex);
697 696 }
698 697 return (status);
699 698 }
700 699
701 700 /*
702 701 * The loadable-module _info(9E) entry point
703 702 */
704 703 int
705 704 _info(struct modinfo *modinfop)
706 705 {
707 706 /* CONSTCOND */
708 707 ASSERT(NO_COMPETING_THREADS);
709 708 NDBG0(("mptsas _info"));
710 709
711 710 return (mod_info(&modlinkage, modinfop));
712 711 }
713 712
714 713
715 714 static int
716 715 mptsas_iport_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
717 716 {
718 717 dev_info_t *pdip;
719 718 mptsas_t *mpt;
720 719 scsi_hba_tran_t *hba_tran;
721 720 char *iport = NULL;
722 721 char phymask[MPTSAS_MAX_PHYS];
723 722 mptsas_phymask_t phy_mask = 0;
724 723 int dynamic_port = 0;
725 724 uint32_t page_address;
726 725 char initiator_wwnstr[MPTSAS_WWN_STRLEN];
727 726 int rval = DDI_FAILURE;
728 727 int i = 0;
729 728 uint8_t numphys = 0;
730 729 uint8_t phy_id;
731 730 uint8_t phy_port = 0;
732 731 uint16_t attached_devhdl = 0;
733 732 uint32_t dev_info;
734 733 uint64_t attached_sas_wwn;
735 734 uint16_t dev_hdl;
736 735 uint16_t pdev_hdl;
737 736 uint16_t bay_num, enclosure;
738 737 char attached_wwnstr[MPTSAS_WWN_STRLEN];
739 738
740 739 /* CONSTCOND */
741 740 ASSERT(NO_COMPETING_THREADS);
742 741
743 742 switch (cmd) {
744 743 case DDI_ATTACH:
745 744 break;
746 745
747 746 case DDI_RESUME:
748 747 /*
749 748 * If this a scsi-iport node, nothing to do here.
750 749 */
751 750 return (DDI_SUCCESS);
752 751
753 752 default:
754 753 return (DDI_FAILURE);
755 754 }
756 755
757 756 pdip = ddi_get_parent(dip);
758 757
759 758 if ((hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SCSI_DEVICE)) ==
760 759 NULL) {
761 760 cmn_err(CE_WARN, "Failed attach iport because fail to "
762 761 "get tran vector for the HBA node");
763 762 return (DDI_FAILURE);
764 763 }
765 764
766 765 mpt = TRAN2MPT(hba_tran);
767 766 ASSERT(mpt != NULL);
768 767 if (mpt == NULL)
769 768 return (DDI_FAILURE);
770 769
771 770 if ((hba_tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) ==
772 771 NULL) {
773 772 mptsas_log(mpt, CE_WARN, "Failed attach iport because fail to "
774 773 "get tran vector for the iport node");
775 774 return (DDI_FAILURE);
776 775 }
777 776
778 777 /*
779 778 * Overwrite parent's tran_hba_private to iport's tran vector
780 779 */
781 780 hba_tran->tran_hba_private = mpt;
782 781
783 782 ddi_report_dev(dip);
784 783
785 784 /*
786 785 * Get SAS address for initiator port according dev_handle
787 786 */
788 787 iport = ddi_get_name_addr(dip);
789 788 if (iport && strncmp(iport, "v0", 2) == 0) {
790 789 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
791 790 MPTSAS_VIRTUAL_PORT, 1) !=
792 791 DDI_PROP_SUCCESS) {
793 792 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
794 793 MPTSAS_VIRTUAL_PORT);
795 794 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
796 795 "prop update failed");
797 796 return (DDI_FAILURE);
798 797 }
799 798 return (DDI_SUCCESS);
800 799 }
801 800
802 801 mutex_enter(&mpt->m_mutex);
803 802 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
804 803 bzero(phymask, sizeof (phymask));
805 804 (void) sprintf(phymask,
806 805 "%x", mpt->m_phy_info[i].phy_mask);
807 806 if (strcmp(phymask, iport) == 0) {
808 807 break;
809 808 }
810 809 }
811 810
812 811 if (i == MPTSAS_MAX_PHYS) {
813 812 mptsas_log(mpt, CE_WARN, "Failed attach port %s because port"
814 813 "seems not exist", iport);
815 814 mutex_exit(&mpt->m_mutex);
816 815 return (DDI_FAILURE);
817 816 }
818 817
819 818 phy_mask = mpt->m_phy_info[i].phy_mask;
820 819
821 820 if (mpt->m_phy_info[i].port_flags & AUTO_PORT_CONFIGURATION)
822 821 dynamic_port = 1;
823 822 else
824 823 dynamic_port = 0;
825 824
826 825 /*
827 826 * Update PHY info for smhba
828 827 */
829 828 if (mptsas_smhba_phy_init(mpt)) {
830 829 mutex_exit(&mpt->m_mutex);
831 830 mptsas_log(mpt, CE_WARN, "mptsas phy update "
832 831 "failed");
833 832 return (DDI_FAILURE);
834 833 }
835 834
836 835 mutex_exit(&mpt->m_mutex);
837 836
838 837 numphys = 0;
839 838 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
840 839 if ((phy_mask >> i) & 0x01) {
841 840 numphys++;
842 841 }
843 842 }
844 843
845 844 bzero(initiator_wwnstr, sizeof (initiator_wwnstr));
846 845 (void) sprintf(initiator_wwnstr, "w%016"PRIx64,
847 846 mpt->un.m_base_wwid);
848 847
849 848 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
850 849 SCSI_ADDR_PROP_INITIATOR_PORT, initiator_wwnstr) !=
851 850 DDI_PROP_SUCCESS) {
852 851 (void) ddi_prop_remove(DDI_DEV_T_NONE,
853 852 dip, SCSI_ADDR_PROP_INITIATOR_PORT);
854 853 mptsas_log(mpt, CE_WARN, "mptsas Initiator port "
855 854 "prop update failed");
856 855 return (DDI_FAILURE);
857 856 }
858 857 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
859 858 MPTSAS_NUM_PHYS, numphys) !=
860 859 DDI_PROP_SUCCESS) {
861 860 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, MPTSAS_NUM_PHYS);
862 861 return (DDI_FAILURE);
863 862 }
864 863
865 864 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
866 865 "phymask", phy_mask) !=
867 866 DDI_PROP_SUCCESS) {
868 867 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "phymask");
869 868 mptsas_log(mpt, CE_WARN, "mptsas phy mask "
870 869 "prop update failed");
871 870 return (DDI_FAILURE);
872 871 }
873 872
874 873 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
875 874 "dynamic-port", dynamic_port) !=
876 875 DDI_PROP_SUCCESS) {
877 876 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "dynamic-port");
878 877 mptsas_log(mpt, CE_WARN, "mptsas dynamic port "
879 878 "prop update failed");
880 879 return (DDI_FAILURE);
881 880 }
882 881 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
883 882 MPTSAS_VIRTUAL_PORT, 0) !=
884 883 DDI_PROP_SUCCESS) {
885 884 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
886 885 MPTSAS_VIRTUAL_PORT);
887 886 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
888 887 "prop update failed");
889 888 return (DDI_FAILURE);
890 889 }
891 890 mptsas_smhba_set_phy_props(mpt,
892 891 iport, dip, numphys, &attached_devhdl);
893 892
894 893 mutex_enter(&mpt->m_mutex);
895 894 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
896 895 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)attached_devhdl;
897 896 rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
898 897 &attached_sas_wwn, &dev_info, &phy_port, &phy_id,
899 898 &pdev_hdl, &bay_num, &enclosure);
900 899 if (rval != DDI_SUCCESS) {
901 900 mptsas_log(mpt, CE_WARN,
902 901 "Failed to get device page0 for handle:%d",
903 902 attached_devhdl);
904 903 mutex_exit(&mpt->m_mutex);
905 904 return (DDI_FAILURE);
906 905 }
907 906
908 907 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
909 908 bzero(phymask, sizeof (phymask));
910 909 (void) sprintf(phymask, "%x", mpt->m_phy_info[i].phy_mask);
911 910 if (strcmp(phymask, iport) == 0) {
912 911 (void) sprintf(&mpt->m_phy_info[i].smhba_info.path[0],
913 912 "%x",
914 913 mpt->m_phy_info[i].phy_mask);
915 914 }
916 915 }
917 916 mutex_exit(&mpt->m_mutex);
918 917
919 918 bzero(attached_wwnstr, sizeof (attached_wwnstr));
920 919 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
921 920 attached_sas_wwn);
922 921 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
923 922 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
924 923 DDI_PROP_SUCCESS) {
925 924 (void) ddi_prop_remove(DDI_DEV_T_NONE,
926 925 dip, SCSI_ADDR_PROP_ATTACHED_PORT);
927 926 return (DDI_FAILURE);
928 927 }
929 928
930 929 /* Create kstats for each phy on this iport */
931 930
932 931 mptsas_create_phy_stats(mpt, iport, dip);
933 932
934 933 /*
935 934 * register sas hba iport with mdi (MPxIO/vhci)
936 935 */
937 936 if (mdi_phci_register(MDI_HCI_CLASS_SCSI,
938 937 dip, 0) == MDI_SUCCESS) {
939 938 mpt->m_mpxio_enable = TRUE;
940 939 }
941 940 return (DDI_SUCCESS);
942 941 }
943 942
944 943 /*
945 944 * Notes:
946 945 * Set up all device state and allocate data structures,
947 946 * mutexes, condition variables, etc. for device operation.
948 947 * Add interrupts needed.
949 948 * Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
950 949 */
951 950 static int
952 951 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
953 952 {
954 953 mptsas_t *mpt = NULL;
955 954 int instance, i, j;
956 955 int doneq_thread_num;
957 956 char intr_added = 0;
958 957 char map_setup = 0;
959 958 char config_setup = 0;
960 959 char hba_attach_setup = 0;
961 960 char smp_attach_setup = 0;
962 961 char mutex_init_done = 0;
963 962 char event_taskq_create = 0;
964 963 char dr_taskq_create = 0;
965 964 char doneq_thread_create = 0;
966 965 scsi_hba_tran_t *hba_tran;
967 966 uint_t mem_bar = MEM_SPACE;
968 967 int rval = DDI_FAILURE;
969 968
970 969 /* CONSTCOND */
971 970 ASSERT(NO_COMPETING_THREADS);
972 971
973 972 if (scsi_hba_iport_unit_address(dip)) {
974 973 return (mptsas_iport_attach(dip, cmd));
975 974 }
976 975
977 976 switch (cmd) {
978 977 case DDI_ATTACH:
979 978 break;
980 979
981 980 case DDI_RESUME:
982 981 if ((hba_tran = ddi_get_driver_private(dip)) == NULL)
983 982 return (DDI_FAILURE);
984 983
985 984 mpt = TRAN2MPT(hba_tran);
986 985
987 986 if (!mpt) {
988 987 return (DDI_FAILURE);
989 988 }
990 989
991 990 /*
992 991 * Reset hardware and softc to "no outstanding commands"
993 992 * Note that a check condition can result on first command
994 993 * to a target.
995 994 */
996 995 mutex_enter(&mpt->m_mutex);
997 996
998 997 /*
999 998 * raise power.
1000 999 */
1001 1000 if (mpt->m_options & MPTSAS_OPT_PM) {
1002 1001 mutex_exit(&mpt->m_mutex);
1003 1002 (void) pm_busy_component(dip, 0);
1004 1003 rval = pm_power_has_changed(dip, 0, PM_LEVEL_D0);
1005 1004 if (rval == DDI_SUCCESS) {
1006 1005 mutex_enter(&mpt->m_mutex);
1007 1006 } else {
1008 1007 /*
1009 1008 * The pm_raise_power() call above failed,
1010 1009 * and that can only occur if we were unable
1011 1010 * to reset the hardware. This is probably
1012 1011 * due to unhealty hardware, and because
1013 1012 * important filesystems(such as the root
1014 1013 * filesystem) could be on the attached disks,
1015 1014 * it would not be a good idea to continue,
1016 1015 * as we won't be entirely certain we are
1017 1016 * writing correct data. So we panic() here
1018 1017 * to not only prevent possible data corruption,
1019 1018 * but to give developers or end users a hope
1020 1019 * of identifying and correcting any problems.
1021 1020 */
1022 1021 fm_panic("mptsas could not reset hardware "
1023 1022 "during resume");
1024 1023 }
1025 1024 }
1026 1025
1027 1026 mpt->m_suspended = 0;
1028 1027
1029 1028 /*
1030 1029 * Reinitialize ioc
1031 1030 */
1032 1031 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1033 1032 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
1034 1033 mutex_exit(&mpt->m_mutex);
1035 1034 if (mpt->m_options & MPTSAS_OPT_PM) {
1036 1035 (void) pm_idle_component(dip, 0);
1037 1036 }
1038 1037 fm_panic("mptsas init chip fail during resume");
1039 1038 }
1040 1039 /*
1041 1040 * mptsas_update_driver_data needs interrupts so enable them
1042 1041 * first.
1043 1042 */
1044 1043 MPTSAS_ENABLE_INTR(mpt);
1045 1044 mptsas_update_driver_data(mpt);
1046 1045
1047 1046 /* start requests, if possible */
1048 1047 mptsas_restart_hba(mpt);
1049 1048
1050 1049 mutex_exit(&mpt->m_mutex);
1051 1050
1052 1051 /*
1053 1052 * Restart watch thread
1054 1053 */
1055 1054 mutex_enter(&mptsas_global_mutex);
1056 1055 if (mptsas_timeout_id == 0) {
1057 1056 mptsas_timeout_id = timeout(mptsas_watch, NULL,
1058 1057 mptsas_tick);
1059 1058 mptsas_timeouts_enabled = 1;
1060 1059 }
1061 1060 mutex_exit(&mptsas_global_mutex);
1062 1061
1063 1062 /* report idle status to pm framework */
1064 1063 if (mpt->m_options & MPTSAS_OPT_PM) {
1065 1064 (void) pm_idle_component(dip, 0);
1066 1065 }
1067 1066
1068 1067 return (DDI_SUCCESS);
1069 1068
1070 1069 default:
1071 1070 return (DDI_FAILURE);
1072 1071
1073 1072 }
1074 1073
1075 1074 instance = ddi_get_instance(dip);
1076 1075
1077 1076 /*
1078 1077 * Allocate softc information.
1079 1078 */
1080 1079 if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
1081 1080 mptsas_log(NULL, CE_WARN,
1082 1081 "mptsas%d: cannot allocate soft state", instance);
1083 1082 goto fail;
1084 1083 }
1085 1084
1086 1085 mpt = ddi_get_soft_state(mptsas_state, instance);
1087 1086
1088 1087 if (mpt == NULL) {
1089 1088 mptsas_log(NULL, CE_WARN,
1090 1089 "mptsas%d: cannot get soft state", instance);
1091 1090 goto fail;
1092 1091 }
1093 1092
1094 1093 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
1095 1094 scsi_size_clean(dip);
1096 1095
1097 1096 mpt->m_dip = dip;
1098 1097 mpt->m_instance = instance;
1099 1098
1100 1099 /* Make a per-instance copy of the structures */
1101 1100 mpt->m_io_dma_attr = mptsas_dma_attrs64;
1102 1101 mpt->m_msg_dma_attr = mptsas_dma_attrs;
1103 1102 mpt->m_reg_acc_attr = mptsas_dev_attr;
1104 1103 mpt->m_dev_acc_attr = mptsas_dev_attr;
1105 1104
1106 1105 /*
1107 1106 * Initialize FMA
1108 1107 */
1109 1108 mpt->m_fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, mpt->m_dip,
1110 1109 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
1111 1110 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1112 1111 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1113 1112
1114 1113 mptsas_fm_init(mpt);
1115 1114
1116 1115 if (mptsas_alloc_handshake_msg(mpt,
1117 1116 sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) {
1118 1117 mptsas_log(mpt, CE_WARN, "cannot initialize handshake msg.");
1119 1118 goto fail;
1120 1119 }
1121 1120
1122 1121 /*
1123 1122 * Setup configuration space
1124 1123 */
1125 1124 if (mptsas_config_space_init(mpt) == FALSE) {
1126 1125 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init failed");
1127 1126 goto fail;
1128 1127 }
1129 1128 config_setup++;
1130 1129
1131 1130 if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&mpt->m_reg,
1132 1131 0, 0, &mpt->m_reg_acc_attr, &mpt->m_datap) != DDI_SUCCESS) {
1133 1132 mptsas_log(mpt, CE_WARN, "map setup failed");
1134 1133 goto fail;
1135 1134 }
1136 1135 map_setup++;
1137 1136
1138 1137 /*
1139 1138 * A taskq is created for dealing with the event handler
1140 1139 */
1141 1140 if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
1142 1141 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1143 1142 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
1144 1143 goto fail;
1145 1144 }
1146 1145 event_taskq_create++;
1147 1146
1148 1147 /*
1149 1148 * A taskq is created for dealing with dr events
1150 1149 */
1151 1150 if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1152 1151 "mptsas_dr_taskq",
1153 1152 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1154 1153 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1155 1154 "failed");
1156 1155 goto fail;
1157 1156 }
1158 1157 dr_taskq_create++;
1159 1158
1160 1159 mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1161 1160 0, "mptsas_doneq_thread_threshold_prop", 10);
1162 1161 mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1163 1162 0, "mptsas_doneq_length_threshold_prop", 8);
1164 1163 mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1165 1164 0, "mptsas_doneq_thread_n_prop", 8);
1166 1165
1167 1166 if (mpt->m_doneq_thread_n) {
1168 1167 cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1169 1168 mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1170 1169
1171 1170 mutex_enter(&mpt->m_doneq_mutex);
1172 1171 mpt->m_doneq_thread_id =
1173 1172 kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1174 1173 * mpt->m_doneq_thread_n, KM_SLEEP);
1175 1174
1176 1175 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1177 1176 cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1178 1177 CV_DRIVER, NULL);
1179 1178 mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1180 1179 MUTEX_DRIVER, NULL);
1181 1180 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1182 1181 mpt->m_doneq_thread_id[j].flag |=
1183 1182 MPTSAS_DONEQ_THREAD_ACTIVE;
1184 1183 mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1185 1184 mpt->m_doneq_thread_id[j].arg.t = j;
1186 1185 mpt->m_doneq_thread_id[j].threadp =
1187 1186 thread_create(NULL, 0, mptsas_doneq_thread,
1188 1187 &mpt->m_doneq_thread_id[j].arg,
1189 1188 0, &p0, TS_RUN, minclsyspri);
1190 1189 mpt->m_doneq_thread_id[j].donetail =
1191 1190 &mpt->m_doneq_thread_id[j].doneq;
1192 1191 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1193 1192 }
1194 1193 mutex_exit(&mpt->m_doneq_mutex);
1195 1194 doneq_thread_create++;
1196 1195 }
1197 1196
1198 1197 /* Initialize mutex used in interrupt handler */
1199 1198 mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1200 1199 DDI_INTR_PRI(mpt->m_intr_pri));
1201 1200 mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1202 1201 mutex_init(&mpt->m_intr_mutex, NULL, MUTEX_DRIVER,
1203 1202 DDI_INTR_PRI(mpt->m_intr_pri));
1204 1203 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1205 1204 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1206 1205 NULL, MUTEX_DRIVER,
1207 1206 DDI_INTR_PRI(mpt->m_intr_pri));
1208 1207 }
1209 1208
1210 1209 cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1211 1210 cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1212 1211 cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1213 1212 cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1214 1213 cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1215 1214 mutex_init_done++;
1216 1215
1217 1216 /*
1218 1217 * Disable hardware interrupt since we're not ready to
1219 1218 * handle it yet.
1220 1219 */
1221 1220 MPTSAS_DISABLE_INTR(mpt);
1222 1221 if (mptsas_register_intrs(mpt) == FALSE)
1223 1222 goto fail;
1224 1223 intr_added++;
1225 1224
1226 1225 mutex_enter(&mpt->m_mutex);
1227 1226 /*
1228 1227 * Initialize power management component
1229 1228 */
1230 1229 if (mpt->m_options & MPTSAS_OPT_PM) {
1231 1230 if (mptsas_init_pm(mpt)) {
1232 1231 mutex_exit(&mpt->m_mutex);
1233 1232 mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1234 1233 "failed");
1235 1234 goto fail;
1236 1235 }
1237 1236 }
1238 1237
1239 1238 /*
1240 1239 * Initialize chip using Message Unit Reset, if allowed
1241 1240 */
1242 1241 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1243 1242 if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1244 1243 mutex_exit(&mpt->m_mutex);
1245 1244 mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1246 1245 goto fail;
1247 1246 }
1248 1247
1249 1248 /*
1250 1249 * Fill in the phy_info structure and get the base WWID
1251 1250 */
1252 1251 if (mptsas_get_manufacture_page5(mpt) == DDI_FAILURE) {
1253 1252 mptsas_log(mpt, CE_WARN,
1254 1253 "mptsas_get_manufacture_page5 failed!");
1255 1254 goto fail;
1256 1255 }
1257 1256
1258 1257 if (mptsas_get_sas_io_unit_page_hndshk(mpt)) {
1259 1258 mptsas_log(mpt, CE_WARN,
1260 1259 "mptsas_get_sas_io_unit_page_hndshk failed!");
1261 1260 goto fail;
1262 1261 }
1263 1262
1264 1263 if (mptsas_get_manufacture_page0(mpt) == DDI_FAILURE) {
1265 1264 mptsas_log(mpt, CE_WARN,
1266 1265 "mptsas_get_manufacture_page0 failed!");
1267 1266 goto fail;
1268 1267 }
1269 1268
1270 1269 mutex_exit(&mpt->m_mutex);
1271 1270
1272 1271 /*
1273 1272 * Register the iport for multiple port HBA
1274 1273 */
1275 1274 mptsas_iport_register(mpt);
1276 1275
1277 1276 /*
1278 1277 * initialize SCSI HBA transport structure
1279 1278 */
1280 1279 if (mptsas_hba_setup(mpt) == FALSE)
1281 1280 goto fail;
1282 1281 hba_attach_setup++;
1283 1282
1284 1283 if (mptsas_smp_setup(mpt) == FALSE)
1285 1284 goto fail;
1286 1285 smp_attach_setup++;
1287 1286
1288 1287 if (mptsas_cache_create(mpt) == FALSE)
1289 1288 goto fail;
1290 1289
1291 1290 mpt->m_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1292 1291 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
1293 1292 if (mpt->m_scsi_reset_delay == 0) {
1294 1293 mptsas_log(mpt, CE_NOTE,
1295 1294 "scsi_reset_delay of 0 is not recommended,"
1296 1295 " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1297 1296 mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1298 1297 }
1299 1298
1300 1299 /*
1301 1300 * Initialize the wait and done FIFO queue
1302 1301 */
1303 1302 mpt->m_donetail = &mpt->m_doneq;
1304 1303 mpt->m_waitqtail = &mpt->m_waitq;
1305 1304
1306 1305 /*
1307 1306 * ioc cmd queue initialize
1308 1307 */
1309 1308 mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1310 1309 mpt->m_dev_handle = 0xFFFF;
1311 1310
1312 1311 MPTSAS_ENABLE_INTR(mpt);
1313 1312
1314 1313 /*
1315 1314 * enable event notification
1316 1315 */
1317 1316 mutex_enter(&mpt->m_mutex);
1318 1317 if (mptsas_ioc_enable_event_notification(mpt)) {
1319 1318 mutex_exit(&mpt->m_mutex);
1320 1319 goto fail;
1321 1320 }
1322 1321 mutex_exit(&mpt->m_mutex);
1323 1322
1324 1323 /*
1325 1324 * Initialize PHY info for smhba
1326 1325 */
1327 1326 if (mptsas_smhba_setup(mpt)) {
1328 1327 mptsas_log(mpt, CE_WARN, "mptsas phy initialization "
1329 1328 "failed");
1330 1329 goto fail;
1331 1330 }
1332 1331
1333 1332 /* Check all dma handles allocated in attach */
1334 1333 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl)
1335 1334 != DDI_SUCCESS) ||
1336 1335 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl)
1337 1336 != DDI_SUCCESS) ||
1338 1337 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl)
1339 1338 != DDI_SUCCESS) ||
1340 1339 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl)
1341 1340 != DDI_SUCCESS) ||
1342 1341 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl)
1343 1342 != DDI_SUCCESS)) {
1344 1343 goto fail;
1345 1344 }
1346 1345
1347 1346 /* Check all acc handles allocated in attach */
1348 1347 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
1349 1348 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl)
1350 1349 != DDI_SUCCESS) ||
1351 1350 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl)
1352 1351 != DDI_SUCCESS) ||
1353 1352 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl)
1354 1353 != DDI_SUCCESS) ||
1355 1354 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl)
1356 1355 != DDI_SUCCESS) ||
1357 1356 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl)
1358 1357 != DDI_SUCCESS) ||
1359 1358 (mptsas_check_acc_handle(mpt->m_config_handle)
1360 1359 != DDI_SUCCESS)) {
1361 1360 goto fail;
1362 1361 }
1363 1362
1364 1363 /*
1365 1364 * After this point, we are not going to fail the attach.
1366 1365 */
1367 1366 /*
1368 1367 * used for mptsas_watch
1369 1368 */
1370 1369 mptsas_list_add(mpt);
1371 1370
1372 1371 mutex_enter(&mptsas_global_mutex);
1373 1372 if (mptsas_timeouts_enabled == 0) {
1374 1373 mptsas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
1375 1374 dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
1376 1375
1377 1376 mptsas_tick = mptsas_scsi_watchdog_tick *
1378 1377 drv_usectohz((clock_t)1000000);
1379 1378
1380 1379 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
1381 1380 mptsas_timeouts_enabled = 1;
1382 1381 }
1383 1382 mutex_exit(&mptsas_global_mutex);
1384 1383
1385 1384 /* Print message of HBA present */
1386 1385 ddi_report_dev(dip);
1387 1386
1388 1387 /* report idle status to pm framework */
1389 1388 if (mpt->m_options & MPTSAS_OPT_PM) {
1390 1389 (void) pm_idle_component(dip, 0);
1391 1390 }
1392 1391
1393 1392 return (DDI_SUCCESS);
1394 1393
1395 1394 fail:
1396 1395 mptsas_log(mpt, CE_WARN, "attach failed");
1397 1396 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
1398 1397 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
1399 1398 if (mpt) {
1400 1399 mutex_enter(&mptsas_global_mutex);
1401 1400
1402 1401 if (mptsas_timeout_id && (mptsas_head == NULL)) {
1403 1402 timeout_id_t tid = mptsas_timeout_id;
1404 1403 mptsas_timeouts_enabled = 0;
1405 1404 mptsas_timeout_id = 0;
1406 1405 mutex_exit(&mptsas_global_mutex);
1407 1406 (void) untimeout(tid);
1408 1407 mutex_enter(&mptsas_global_mutex);
1409 1408 }
1410 1409 mutex_exit(&mptsas_global_mutex);
1411 1410 /* deallocate in reverse order */
1412 1411 mptsas_cache_destroy(mpt);
1413 1412
1414 1413 if (smp_attach_setup) {
1415 1414 mptsas_smp_teardown(mpt);
1416 1415 }
1417 1416 if (hba_attach_setup) {
1418 1417 mptsas_hba_teardown(mpt);
1419 1418 }
1420 1419
1421 1420 if (mpt->m_active) {
1422 1421 mptsas_hash_uninit(&mpt->m_active->m_smptbl,
1423 1422 sizeof (mptsas_smp_t));
1424 1423 mptsas_hash_uninit(&mpt->m_active->m_tgttbl,
1425 1424 sizeof (mptsas_target_t));
1426 1425 mptsas_free_active_slots(mpt);
1427 1426 }
1428 1427 if (intr_added) {
1429 1428 mptsas_unregister_intrs(mpt);
1430 1429 }
1431 1430
1432 1431 if (doneq_thread_create) {
1433 1432 mutex_enter(&mpt->m_doneq_mutex);
1434 1433 doneq_thread_num = mpt->m_doneq_thread_n;
1435 1434 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1436 1435 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1437 1436 mpt->m_doneq_thread_id[j].flag &=
1438 1437 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1439 1438 cv_signal(&mpt->m_doneq_thread_id[j].cv);
1440 1439 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1441 1440 }
1442 1441 while (mpt->m_doneq_thread_n) {
1443 1442 cv_wait(&mpt->m_doneq_thread_cv,
1444 1443 &mpt->m_doneq_mutex);
1445 1444 }
1446 1445 for (j = 0; j < doneq_thread_num; j++) {
1447 1446 cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1448 1447 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1449 1448 }
1450 1449 kmem_free(mpt->m_doneq_thread_id,
1451 1450 sizeof (mptsas_doneq_thread_list_t)
1452 1451 * doneq_thread_num);
1453 1452 mutex_exit(&mpt->m_doneq_mutex);
1454 1453 cv_destroy(&mpt->m_doneq_thread_cv);
1455 1454 mutex_destroy(&mpt->m_doneq_mutex);
1456 1455 }
1457 1456 if (event_taskq_create) {
1458 1457 ddi_taskq_destroy(mpt->m_event_taskq);
1459 1458 }
1460 1459 if (dr_taskq_create) {
1461 1460 ddi_taskq_destroy(mpt->m_dr_taskq);
1462 1461 }
1463 1462 if (mutex_init_done) {
1464 1463 mutex_destroy(&mpt->m_intr_mutex);
1465 1464 mutex_destroy(&mpt->m_passthru_mutex);
1466 1465 mutex_destroy(&mpt->m_mutex);
1467 1466 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1468 1467 mutex_destroy(
1469 1468 &mpt->m_phy_info[i].smhba_info.phy_mutex);
1470 1469 }
1471 1470 cv_destroy(&mpt->m_cv);
1472 1471 cv_destroy(&mpt->m_passthru_cv);
1473 1472 cv_destroy(&mpt->m_fw_cv);
1474 1473 cv_destroy(&mpt->m_config_cv);
1475 1474 cv_destroy(&mpt->m_fw_diag_cv);
1476 1475 }
1477 1476
1478 1477 if (map_setup) {
1479 1478 mptsas_cfg_fini(mpt);
1480 1479 }
1481 1480 if (config_setup) {
1482 1481 mptsas_config_space_fini(mpt);
1483 1482 }
1484 1483 mptsas_free_handshake_msg(mpt);
1485 1484 mptsas_hba_fini(mpt);
1486 1485
1487 1486 mptsas_fm_fini(mpt);
1488 1487 ddi_soft_state_free(mptsas_state, instance);
1489 1488 ddi_prop_remove_all(dip);
1490 1489 }
1491 1490 return (DDI_FAILURE);
1492 1491 }
1493 1492
1494 1493 static int
1495 1494 mptsas_suspend(dev_info_t *devi)
1496 1495 {
1497 1496 mptsas_t *mpt, *g;
1498 1497 scsi_hba_tran_t *tran;
1499 1498
1500 1499 if (scsi_hba_iport_unit_address(devi)) {
1501 1500 return (DDI_SUCCESS);
1502 1501 }
1503 1502
1504 1503 if ((tran = ddi_get_driver_private(devi)) == NULL)
1505 1504 return (DDI_SUCCESS);
1506 1505
1507 1506 mpt = TRAN2MPT(tran);
1508 1507 if (!mpt) {
1509 1508 return (DDI_SUCCESS);
1510 1509 }
1511 1510
1512 1511 mutex_enter(&mpt->m_mutex);
1513 1512
1514 1513 if (mpt->m_suspended++) {
1515 1514 mutex_exit(&mpt->m_mutex);
1516 1515 return (DDI_SUCCESS);
1517 1516 }
1518 1517
1519 1518 /*
1520 1519 * Cancel timeout threads for this mpt
1521 1520 */
1522 1521 if (mpt->m_quiesce_timeid) {
1523 1522 timeout_id_t tid = mpt->m_quiesce_timeid;
1524 1523 mpt->m_quiesce_timeid = 0;
1525 1524 mutex_exit(&mpt->m_mutex);
1526 1525 (void) untimeout(tid);
1527 1526 mutex_enter(&mpt->m_mutex);
1528 1527 }
1529 1528
1530 1529 if (mpt->m_restart_cmd_timeid) {
1531 1530 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1532 1531 mpt->m_restart_cmd_timeid = 0;
1533 1532 mutex_exit(&mpt->m_mutex);
1534 1533 (void) untimeout(tid);
1535 1534 mutex_enter(&mpt->m_mutex);
1536 1535 }
1537 1536
1538 1537 mutex_exit(&mpt->m_mutex);
1539 1538
1540 1539 (void) pm_idle_component(mpt->m_dip, 0);
1541 1540
1542 1541 /*
1543 1542 * Cancel watch threads if all mpts suspended
1544 1543 */
1545 1544 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1546 1545 for (g = mptsas_head; g != NULL; g = g->m_next) {
1547 1546 if (!g->m_suspended)
1548 1547 break;
1549 1548 }
1550 1549 rw_exit(&mptsas_global_rwlock);
1551 1550
1552 1551 mutex_enter(&mptsas_global_mutex);
1553 1552 if (g == NULL) {
1554 1553 timeout_id_t tid;
1555 1554
1556 1555 mptsas_timeouts_enabled = 0;
1557 1556 if (mptsas_timeout_id) {
1558 1557 tid = mptsas_timeout_id;
1559 1558 mptsas_timeout_id = 0;
1560 1559 mutex_exit(&mptsas_global_mutex);
1561 1560 (void) untimeout(tid);
1562 1561 mutex_enter(&mptsas_global_mutex);
1563 1562 }
1564 1563 if (mptsas_reset_watch) {
1565 1564 tid = mptsas_reset_watch;
1566 1565 mptsas_reset_watch = 0;
1567 1566 mutex_exit(&mptsas_global_mutex);
1568 1567 (void) untimeout(tid);
1569 1568 mutex_enter(&mptsas_global_mutex);
1570 1569 }
1571 1570 }
1572 1571 mutex_exit(&mptsas_global_mutex);
1573 1572
1574 1573 mutex_enter(&mpt->m_mutex);
1575 1574
1576 1575 /*
1577 1576 * If this mpt is not in full power(PM_LEVEL_D0), just return.
1578 1577 */
1579 1578 if ((mpt->m_options & MPTSAS_OPT_PM) &&
1580 1579 (mpt->m_power_level != PM_LEVEL_D0)) {
1581 1580 mutex_exit(&mpt->m_mutex);
1582 1581 return (DDI_SUCCESS);
1583 1582 }
1584 1583
1585 1584 /* Disable HBA interrupts in hardware */
1586 1585 MPTSAS_DISABLE_INTR(mpt);
1587 1586 /*
1588 1587 * Send RAID action system shutdown to sync IR
1589 1588 */
1590 1589 mptsas_raid_action_system_shutdown(mpt);
1591 1590
1592 1591 mutex_exit(&mpt->m_mutex);
1593 1592
1594 1593 /* drain the taskq */
1595 1594 ddi_taskq_wait(mpt->m_event_taskq);
1596 1595 ddi_taskq_wait(mpt->m_dr_taskq);
1597 1596
1598 1597 return (DDI_SUCCESS);
1599 1598 }
1600 1599
1601 1600 #ifdef __sparc
1602 1601 /*ARGSUSED*/
1603 1602 static int
1604 1603 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd)
1605 1604 {
1606 1605 mptsas_t *mpt;
1607 1606 scsi_hba_tran_t *tran;
1608 1607
1609 1608 /*
1610 1609 * If this call is for iport, just return.
1611 1610 */
1612 1611 if (scsi_hba_iport_unit_address(devi))
1613 1612 return (DDI_SUCCESS);
1614 1613
1615 1614 if ((tran = ddi_get_driver_private(devi)) == NULL)
1616 1615 return (DDI_SUCCESS);
1617 1616
1618 1617 if ((mpt = TRAN2MPT(tran)) == NULL)
1619 1618 return (DDI_SUCCESS);
1620 1619
1621 1620 /*
1622 1621 * Send RAID action system shutdown to sync IR. Disable HBA
1623 1622 * interrupts in hardware first.
1624 1623 */
1625 1624 MPTSAS_DISABLE_INTR(mpt);
1626 1625 mptsas_raid_action_system_shutdown(mpt);
1627 1626
1628 1627 return (DDI_SUCCESS);
1629 1628 }
1630 1629 #else /* __sparc */
1631 1630 /*
1632 1631 * quiesce(9E) entry point.
1633 1632 *
1634 1633 * This function is called when the system is single-threaded at high
1635 1634 * PIL with preemption disabled. Therefore, this function must not be
1636 1635 * blocked.
1637 1636 *
1638 1637 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1639 1638 * DDI_FAILURE indicates an error condition and should almost never happen.
1640 1639 */
1641 1640 static int
1642 1641 mptsas_quiesce(dev_info_t *devi)
1643 1642 {
1644 1643 mptsas_t *mpt;
1645 1644 scsi_hba_tran_t *tran;
1646 1645
1647 1646 /*
1648 1647 * If this call is for iport, just return.
1649 1648 */
1650 1649 if (scsi_hba_iport_unit_address(devi))
1651 1650 return (DDI_SUCCESS);
1652 1651
1653 1652 if ((tran = ddi_get_driver_private(devi)) == NULL)
1654 1653 return (DDI_SUCCESS);
1655 1654
1656 1655 if ((mpt = TRAN2MPT(tran)) == NULL)
1657 1656 return (DDI_SUCCESS);
1658 1657
1659 1658 /* Disable HBA interrupts in hardware */
1660 1659 MPTSAS_DISABLE_INTR(mpt);
1661 1660 /* Send RAID action system shutdonw to sync IR */
1662 1661 mptsas_raid_action_system_shutdown(mpt);
1663 1662
1664 1663 return (DDI_SUCCESS);
1665 1664 }
1666 1665 #endif /* __sparc */
1667 1666
1668 1667 /*
1669 1668 * detach(9E). Remove all device allocations and system resources;
1670 1669 * disable device interrupts.
1671 1670 * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
1672 1671 */
1673 1672 static int
1674 1673 mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1675 1674 {
1676 1675 /* CONSTCOND */
1677 1676 ASSERT(NO_COMPETING_THREADS);
1678 1677 NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi, (void *)cmd));
1679 1678
1680 1679 switch (cmd) {
1681 1680 case DDI_DETACH:
1682 1681 return (mptsas_do_detach(devi));
1683 1682
1684 1683 case DDI_SUSPEND:
1685 1684 return (mptsas_suspend(devi));
1686 1685
1687 1686 default:
1688 1687 return (DDI_FAILURE);
1689 1688 }
1690 1689 /* NOTREACHED */
1691 1690 }
1692 1691
1693 1692 static int
1694 1693 mptsas_do_detach(dev_info_t *dip)
1695 1694 {
1696 1695 mptsas_t *mpt;
1697 1696 scsi_hba_tran_t *tran;
1698 1697 int circ = 0;
1699 1698 int circ1 = 0;
1700 1699 mdi_pathinfo_t *pip = NULL;
1701 1700 int i;
1702 1701 int doneq_thread_num = 0;
1703 1702
1704 1703 NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1705 1704
1706 1705 if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1707 1706 return (DDI_FAILURE);
1708 1707
1709 1708 mpt = TRAN2MPT(tran);
1710 1709 if (!mpt) {
1711 1710 return (DDI_FAILURE);
1712 1711 }
1713 1712 /*
1714 1713 * Still have pathinfo child, should not detach mpt driver
1715 1714 */
1716 1715 if (scsi_hba_iport_unit_address(dip)) {
1717 1716 if (mpt->m_mpxio_enable) {
1718 1717 /*
1719 1718 * MPxIO enabled for the iport
1720 1719 */
1721 1720 ndi_devi_enter(scsi_vhci_dip, &circ1);
1722 1721 ndi_devi_enter(dip, &circ);
1723 1722 while (pip = mdi_get_next_client_path(dip, NULL)) {
1724 1723 if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1725 1724 continue;
1726 1725 }
1727 1726 ndi_devi_exit(dip, circ);
1728 1727 ndi_devi_exit(scsi_vhci_dip, circ1);
1729 1728 NDBG12(("detach failed because of "
1730 1729 "outstanding path info"));
1731 1730 return (DDI_FAILURE);
1732 1731 }
1733 1732 ndi_devi_exit(dip, circ);
1734 1733 ndi_devi_exit(scsi_vhci_dip, circ1);
1735 1734 (void) mdi_phci_unregister(dip, 0);
1736 1735 }
1737 1736
1738 1737 ddi_prop_remove_all(dip);
1739 1738
1740 1739 return (DDI_SUCCESS);
1741 1740 }
1742 1741
1743 1742 /* Make sure power level is D0 before accessing registers */
1744 1743 if (mpt->m_options & MPTSAS_OPT_PM) {
1745 1744 (void) pm_busy_component(dip, 0);
1746 1745 if (mpt->m_power_level != PM_LEVEL_D0) {
1747 1746 if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1748 1747 DDI_SUCCESS) {
1749 1748 mptsas_log(mpt, CE_WARN,
1750 1749 "mptsas%d: Raise power request failed.",
1751 1750 mpt->m_instance);
1752 1751 (void) pm_idle_component(dip, 0);
1753 1752 return (DDI_FAILURE);
1754 1753 }
1755 1754 }
1756 1755 }
1757 1756
1758 1757 /*
1759 1758 * Send RAID action system shutdown to sync IR. After action, send a
1760 1759 * Message Unit Reset. Since after that DMA resource will be freed,
1761 1760 * set ioc to READY state will avoid HBA initiated DMA operation.
1762 1761 */
1763 1762 mutex_enter(&mpt->m_mutex);
1764 1763 MPTSAS_DISABLE_INTR(mpt);
1765 1764 mptsas_raid_action_system_shutdown(mpt);
1766 1765 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1767 1766 (void) mptsas_ioc_reset(mpt, FALSE);
1768 1767 mutex_exit(&mpt->m_mutex);
1769 1768 mptsas_rem_intrs(mpt);
1770 1769 ddi_taskq_destroy(mpt->m_event_taskq);
1771 1770 ddi_taskq_destroy(mpt->m_dr_taskq);
1772 1771
1773 1772 if (mpt->m_doneq_thread_n) {
1774 1773 mutex_enter(&mpt->m_doneq_mutex);
1775 1774 doneq_thread_num = mpt->m_doneq_thread_n;
1776 1775 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1777 1776 mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1778 1777 mpt->m_doneq_thread_id[i].flag &=
1779 1778 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1780 1779 cv_signal(&mpt->m_doneq_thread_id[i].cv);
1781 1780 mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1782 1781 }
1783 1782 while (mpt->m_doneq_thread_n) {
1784 1783 cv_wait(&mpt->m_doneq_thread_cv,
1785 1784 &mpt->m_doneq_mutex);
1786 1785 }
1787 1786 for (i = 0; i < doneq_thread_num; i++) {
1788 1787 cv_destroy(&mpt->m_doneq_thread_id[i].cv);
1789 1788 mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);
1790 1789 }
1791 1790 kmem_free(mpt->m_doneq_thread_id,
1792 1791 sizeof (mptsas_doneq_thread_list_t)
1793 1792 * doneq_thread_num);
1794 1793 mutex_exit(&mpt->m_doneq_mutex);
1795 1794 cv_destroy(&mpt->m_doneq_thread_cv);
1796 1795 mutex_destroy(&mpt->m_doneq_mutex);
1797 1796 }
1798 1797
1799 1798 scsi_hba_reset_notify_tear_down(mpt->m_reset_notify_listf);
1800 1799
1801 1800 mptsas_list_del(mpt);
1802 1801
1803 1802 /*
1804 1803 * Cancel timeout threads for this mpt
1805 1804 */
1806 1805 mutex_enter(&mpt->m_mutex);
1807 1806 if (mpt->m_quiesce_timeid) {
1808 1807 timeout_id_t tid = mpt->m_quiesce_timeid;
1809 1808 mpt->m_quiesce_timeid = 0;
1810 1809 mutex_exit(&mpt->m_mutex);
1811 1810 (void) untimeout(tid);
1812 1811 mutex_enter(&mpt->m_mutex);
1813 1812 }
1814 1813
1815 1814 if (mpt->m_restart_cmd_timeid) {
1816 1815 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1817 1816 mpt->m_restart_cmd_timeid = 0;
1818 1817 mutex_exit(&mpt->m_mutex);
1819 1818 (void) untimeout(tid);
1820 1819 mutex_enter(&mpt->m_mutex);
1821 1820 }
1822 1821
1823 1822 mutex_exit(&mpt->m_mutex);
1824 1823
1825 1824 /*
1826 1825 * last mpt? ... if active, CANCEL watch threads.
1827 1826 */
1828 1827 mutex_enter(&mptsas_global_mutex);
1829 1828 if (mptsas_head == NULL) {
1830 1829 timeout_id_t tid;
1831 1830 /*
1832 1831 * Clear mptsas_timeouts_enable so that the watch thread
1833 1832 * gets restarted on DDI_ATTACH
1834 1833 */
1835 1834 mptsas_timeouts_enabled = 0;
1836 1835 if (mptsas_timeout_id) {
1837 1836 tid = mptsas_timeout_id;
1838 1837 mptsas_timeout_id = 0;
1839 1838 mutex_exit(&mptsas_global_mutex);
1840 1839 (void) untimeout(tid);
1841 1840 mutex_enter(&mptsas_global_mutex);
1842 1841 }
1843 1842 if (mptsas_reset_watch) {
1844 1843 tid = mptsas_reset_watch;
1845 1844 mptsas_reset_watch = 0;
1846 1845 mutex_exit(&mptsas_global_mutex);
1847 1846 (void) untimeout(tid);
1848 1847 mutex_enter(&mptsas_global_mutex);
1849 1848 }
1850 1849 }
1851 1850 mutex_exit(&mptsas_global_mutex);
1852 1851
1853 1852 /*
1854 1853 * Delete Phy stats
1855 1854 */
1856 1855 mptsas_destroy_phy_stats(mpt);
1857 1856
1858 1857 /*
1859 1858 * Delete nt_active.
1860 1859 */
1861 1860 mutex_enter(&mpt->m_mutex);
1862 1861 mptsas_hash_uninit(&mpt->m_active->m_tgttbl, sizeof (mptsas_target_t));
1863 1862 mptsas_hash_uninit(&mpt->m_active->m_smptbl, sizeof (mptsas_smp_t));
1864 1863 mptsas_free_active_slots(mpt);
1865 1864 mutex_exit(&mpt->m_mutex);
1866 1865
1867 1866 /* deallocate everything that was allocated in mptsas_attach */
1868 1867 mptsas_cache_destroy(mpt);
1869 1868
1870 1869 mptsas_hba_fini(mpt);
1871 1870 mptsas_cfg_fini(mpt);
1872 1871
1873 1872 /* Lower the power informing PM Framework */
1874 1873 if (mpt->m_options & MPTSAS_OPT_PM) {
1875 1874 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
1876 1875 mptsas_log(mpt, CE_WARN,
1877 1876 "!mptsas%d: Lower power request failed "
1878 1877 "during detach, ignoring.",
1879 1878 mpt->m_instance);
1880 1879 }
1881 1880
1882 1881 mutex_destroy(&mpt->m_intr_mutex);
1883 1882 mutex_destroy(&mpt->m_passthru_mutex);
1884 1883 mutex_destroy(&mpt->m_mutex);
1885 1884 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1886 1885 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
1887 1886 }
1888 1887 cv_destroy(&mpt->m_cv);
1889 1888 cv_destroy(&mpt->m_passthru_cv);
1890 1889 cv_destroy(&mpt->m_fw_cv);
1891 1890 cv_destroy(&mpt->m_config_cv);
1892 1891 cv_destroy(&mpt->m_fw_diag_cv);
1893 1892
1894 1893
1895 1894 mptsas_smp_teardown(mpt);
1896 1895 mptsas_hba_teardown(mpt);
1897 1896
1898 1897 mptsas_config_space_fini(mpt);
1899 1898
1900 1899 mptsas_free_handshake_msg(mpt);
1901 1900
1902 1901 mptsas_fm_fini(mpt);
1903 1902 ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
1904 1903 ddi_prop_remove_all(dip);
1905 1904
1906 1905 return (DDI_SUCCESS);
1907 1906 }
1908 1907
1909 1908 static void
1910 1909 mptsas_list_add(mptsas_t *mpt)
1911 1910 {
1912 1911 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1913 1912
1914 1913 if (mptsas_head == NULL) {
1915 1914 mptsas_head = mpt;
1916 1915 } else {
1917 1916 mptsas_tail->m_next = mpt;
1918 1917 }
1919 1918 mptsas_tail = mpt;
1920 1919 rw_exit(&mptsas_global_rwlock);
1921 1920 }
1922 1921
1923 1922 static void
1924 1923 mptsas_list_del(mptsas_t *mpt)
1925 1924 {
1926 1925 mptsas_t *m;
1927 1926 /*
1928 1927 * Remove device instance from the global linked list
1929 1928 */
1930 1929 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1931 1930 if (mptsas_head == mpt) {
1932 1931 m = mptsas_head = mpt->m_next;
1933 1932 } else {
1934 1933 for (m = mptsas_head; m != NULL; m = m->m_next) {
1935 1934 if (m->m_next == mpt) {
1936 1935 m->m_next = mpt->m_next;
1937 1936 break;
1938 1937 }
1939 1938 }
1940 1939 if (m == NULL) {
1941 1940 mptsas_log(mpt, CE_PANIC, "Not in softc list!");
1942 1941 }
1943 1942 }
1944 1943
1945 1944 if (mptsas_tail == mpt) {
1946 1945 mptsas_tail = m;
1947 1946 }
1948 1947 rw_exit(&mptsas_global_rwlock);
1949 1948 }
1950 1949
1951 1950 static int
1952 1951 mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size)
1953 1952 {
1954 1953 ddi_dma_attr_t task_dma_attrs;
1955 1954
1956 1955 task_dma_attrs = mpt->m_msg_dma_attr;
1957 1956 task_dma_attrs.dma_attr_sgllen = 1;
1958 1957 task_dma_attrs.dma_attr_granular = (uint32_t)(alloc_size);
1959 1958
1960 1959 /* allocate Task Management ddi_dma resources */
1961 1960 if (mptsas_dma_addr_create(mpt, task_dma_attrs,
1962 1961 &mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl, &mpt->m_hshk_memp,
1963 1962 alloc_size, NULL) == FALSE) {
1964 1963 return (DDI_FAILURE);
1965 1964 }
1966 1965 mpt->m_hshk_dma_size = alloc_size;
1967 1966
1968 1967 return (DDI_SUCCESS);
1969 1968 }
1970 1969
1971 1970 static void
1972 1971 mptsas_free_handshake_msg(mptsas_t *mpt)
1973 1972 {
1974 1973 mptsas_dma_addr_destroy(&mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl);
1975 1974 mpt->m_hshk_dma_size = 0;
1976 1975 }
1977 1976
1978 1977 static int
1979 1978 mptsas_hba_setup(mptsas_t *mpt)
1980 1979 {
1981 1980 scsi_hba_tran_t *hba_tran;
1982 1981 int tran_flags;
1983 1982
1984 1983 /* Allocate a transport structure */
1985 1984 hba_tran = mpt->m_tran = scsi_hba_tran_alloc(mpt->m_dip,
1986 1985 SCSI_HBA_CANSLEEP);
1987 1986 ASSERT(mpt->m_tran != NULL);
1988 1987
1989 1988 hba_tran->tran_hba_private = mpt;
1990 1989 hba_tran->tran_tgt_private = NULL;
1991 1990
1992 1991 hba_tran->tran_tgt_init = mptsas_scsi_tgt_init;
1993 1992 hba_tran->tran_tgt_free = mptsas_scsi_tgt_free;
1994 1993
1995 1994 hba_tran->tran_start = mptsas_scsi_start;
1996 1995 hba_tran->tran_reset = mptsas_scsi_reset;
1997 1996 hba_tran->tran_abort = mptsas_scsi_abort;
1998 1997 hba_tran->tran_getcap = mptsas_scsi_getcap;
1999 1998 hba_tran->tran_setcap = mptsas_scsi_setcap;
2000 1999 hba_tran->tran_init_pkt = mptsas_scsi_init_pkt;
2001 2000 hba_tran->tran_destroy_pkt = mptsas_scsi_destroy_pkt;
2002 2001
2003 2002 hba_tran->tran_dmafree = mptsas_scsi_dmafree;
2004 2003 hba_tran->tran_sync_pkt = mptsas_scsi_sync_pkt;
2005 2004 hba_tran->tran_reset_notify = mptsas_scsi_reset_notify;
2006 2005
2007 2006 hba_tran->tran_get_bus_addr = mptsas_get_bus_addr;
2008 2007 hba_tran->tran_get_name = mptsas_get_name;
2009 2008
2010 2009 hba_tran->tran_quiesce = mptsas_scsi_quiesce;
2011 2010 hba_tran->tran_unquiesce = mptsas_scsi_unquiesce;
2012 2011 hba_tran->tran_bus_reset = NULL;
2013 2012
2014 2013 hba_tran->tran_add_eventcall = NULL;
2015 2014 hba_tran->tran_get_eventcookie = NULL;
2016 2015 hba_tran->tran_post_event = NULL;
2017 2016 hba_tran->tran_remove_eventcall = NULL;
2018 2017
2019 2018 hba_tran->tran_bus_config = mptsas_bus_config;
2020 2019
2021 2020 hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
2022 2021
2023 2022 /*
2024 2023 * All children of the HBA are iports. We need tran was cloned.
2025 2024 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
2026 2025 * inherited to iport's tran vector.
2027 2026 */
2028 2027 tran_flags = (SCSI_HBA_HBA | SCSI_HBA_TRAN_CLONE);
2029 2028
2030 2029 if (scsi_hba_attach_setup(mpt->m_dip, &mpt->m_msg_dma_attr,
2031 2030 hba_tran, tran_flags) != DDI_SUCCESS) {
2032 2031 mptsas_log(mpt, CE_WARN, "hba attach setup failed");
2033 2032 scsi_hba_tran_free(hba_tran);
2034 2033 mpt->m_tran = NULL;
2035 2034 return (FALSE);
2036 2035 }
2037 2036 return (TRUE);
2038 2037 }
2039 2038
2040 2039 static void
2041 2040 mptsas_hba_teardown(mptsas_t *mpt)
2042 2041 {
2043 2042 (void) scsi_hba_detach(mpt->m_dip);
2044 2043 if (mpt->m_tran != NULL) {
2045 2044 scsi_hba_tran_free(mpt->m_tran);
2046 2045 mpt->m_tran = NULL;
2047 2046 }
2048 2047 }
2049 2048
2050 2049 static void
2051 2050 mptsas_iport_register(mptsas_t *mpt)
2052 2051 {
2053 2052 int i, j;
2054 2053 mptsas_phymask_t mask = 0x0;
2055 2054 /*
2056 2055 * initial value of mask is 0
2057 2056 */
2058 2057 mutex_enter(&mpt->m_mutex);
2059 2058 for (i = 0; i < mpt->m_num_phys; i++) {
2060 2059 mptsas_phymask_t phy_mask = 0x0;
2061 2060 char phy_mask_name[MPTSAS_MAX_PHYS];
2062 2061 uint8_t current_port;
2063 2062
2064 2063 if (mpt->m_phy_info[i].attached_devhdl == 0)
2065 2064 continue;
2066 2065
2067 2066 bzero(phy_mask_name, sizeof (phy_mask_name));
2068 2067
2069 2068 current_port = mpt->m_phy_info[i].port_num;
2070 2069
2071 2070 if ((mask & (1 << i)) != 0)
2072 2071 continue;
2073 2072
2074 2073 for (j = 0; j < mpt->m_num_phys; j++) {
2075 2074 if (mpt->m_phy_info[j].attached_devhdl &&
2076 2075 (mpt->m_phy_info[j].port_num == current_port)) {
2077 2076 phy_mask |= (1 << j);
2078 2077 }
2079 2078 }
2080 2079 mask = mask | phy_mask;
2081 2080
2082 2081 for (j = 0; j < mpt->m_num_phys; j++) {
2083 2082 if ((phy_mask >> j) & 0x01) {
2084 2083 mpt->m_phy_info[j].phy_mask = phy_mask;
2085 2084 }
2086 2085 }
2087 2086
2088 2087 (void) sprintf(phy_mask_name, "%x", phy_mask);
2089 2088
2090 2089 mutex_exit(&mpt->m_mutex);
2091 2090 /*
2092 2091 * register a iport
2093 2092 */
2094 2093 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
2095 2094 mutex_enter(&mpt->m_mutex);
2096 2095 }
2097 2096 mutex_exit(&mpt->m_mutex);
2098 2097 /*
2099 2098 * register a virtual port for RAID volume always
2100 2099 */
2101 2100 (void) scsi_hba_iport_register(mpt->m_dip, "v0");
2102 2101
2103 2102 }
2104 2103
2105 2104 static int
2106 2105 mptsas_smp_setup(mptsas_t *mpt)
2107 2106 {
2108 2107 mpt->m_smptran = smp_hba_tran_alloc(mpt->m_dip);
2109 2108 ASSERT(mpt->m_smptran != NULL);
2110 2109 mpt->m_smptran->smp_tran_hba_private = mpt;
2111 2110 mpt->m_smptran->smp_tran_start = mptsas_smp_start;
2112 2111 if (smp_hba_attach_setup(mpt->m_dip, mpt->m_smptran) != DDI_SUCCESS) {
2113 2112 mptsas_log(mpt, CE_WARN, "smp attach setup failed");
2114 2113 smp_hba_tran_free(mpt->m_smptran);
2115 2114 mpt->m_smptran = NULL;
2116 2115 return (FALSE);
2117 2116 }
2118 2117 /*
2119 2118 * Initialize smp hash table
2120 2119 */
2121 2120 mptsas_hash_init(&mpt->m_active->m_smptbl);
2122 2121 mpt->m_smp_devhdl = 0xFFFF;
2123 2122
2124 2123 return (TRUE);
2125 2124 }
2126 2125
2127 2126 static void
2128 2127 mptsas_smp_teardown(mptsas_t *mpt)
2129 2128 {
2130 2129 (void) smp_hba_detach(mpt->m_dip);
2131 2130 if (mpt->m_smptran != NULL) {
2132 2131 smp_hba_tran_free(mpt->m_smptran);
2133 2132 mpt->m_smptran = NULL;
2134 2133 }
2135 2134 mpt->m_smp_devhdl = 0;
2136 2135 }
2137 2136
2138 2137 static int
2139 2138 mptsas_cache_create(mptsas_t *mpt)
2140 2139 {
2141 2140 int instance = mpt->m_instance;
2142 2141 char buf[64];
2143 2142
2144 2143 /*
2145 2144 * create kmem cache for packets
2146 2145 */
2147 2146 (void) sprintf(buf, "mptsas%d_cache", instance);
2148 2147 mpt->m_kmem_cache = kmem_cache_create(buf,
2149 2148 sizeof (struct mptsas_cmd) + scsi_pkt_size(), 8,
2150 2149 mptsas_kmem_cache_constructor, mptsas_kmem_cache_destructor,
2151 2150 NULL, (void *)mpt, NULL, 0);
2152 2151
2153 2152 if (mpt->m_kmem_cache == NULL) {
2154 2153 mptsas_log(mpt, CE_WARN, "creating kmem cache failed");
2155 2154 return (FALSE);
2156 2155 }
2157 2156
2158 2157 /*
2159 2158 * create kmem cache for extra SGL frames if SGL cannot
2160 2159 * be accomodated into main request frame.
2161 2160 */
2162 2161 (void) sprintf(buf, "mptsas%d_cache_frames", instance);
2163 2162 mpt->m_cache_frames = kmem_cache_create(buf,
2164 2163 sizeof (mptsas_cache_frames_t), 8,
2165 2164 mptsas_cache_frames_constructor, mptsas_cache_frames_destructor,
2166 2165 NULL, (void *)mpt, NULL, 0);
2167 2166
2168 2167 if (mpt->m_cache_frames == NULL) {
2169 2168 mptsas_log(mpt, CE_WARN, "creating cache for frames failed");
2170 2169 return (FALSE);
2171 2170 }
2172 2171
2173 2172 return (TRUE);
2174 2173 }
2175 2174
2176 2175 static void
2177 2176 mptsas_cache_destroy(mptsas_t *mpt)
2178 2177 {
2179 2178 /* deallocate in reverse order */
2180 2179 if (mpt->m_cache_frames) {
2181 2180 kmem_cache_destroy(mpt->m_cache_frames);
2182 2181 mpt->m_cache_frames = NULL;
2183 2182 }
2184 2183 if (mpt->m_kmem_cache) {
2185 2184 kmem_cache_destroy(mpt->m_kmem_cache);
2186 2185 mpt->m_kmem_cache = NULL;
2187 2186 }
2188 2187 }
2189 2188
2190 2189 static int
2191 2190 mptsas_power(dev_info_t *dip, int component, int level)
2192 2191 {
2193 2192 #ifndef __lock_lint
2194 2193 _NOTE(ARGUNUSED(component))
2195 2194 #endif
2196 2195 mptsas_t *mpt;
2197 2196 int rval = DDI_SUCCESS;
2198 2197 int polls = 0;
2199 2198 uint32_t ioc_status;
2200 2199
2201 2200 if (scsi_hba_iport_unit_address(dip) != 0)
2202 2201 return (DDI_SUCCESS);
2203 2202
2204 2203 mpt = ddi_get_soft_state(mptsas_state, ddi_get_instance(dip));
2205 2204 if (mpt == NULL) {
2206 2205 return (DDI_FAILURE);
2207 2206 }
2208 2207
2209 2208 mutex_enter(&mpt->m_mutex);
2210 2209
2211 2210 /*
2212 2211 * If the device is busy, don't lower its power level
2213 2212 */
2214 2213 if (mpt->m_busy && (mpt->m_power_level > level)) {
2215 2214 mutex_exit(&mpt->m_mutex);
2216 2215 return (DDI_FAILURE);
2217 2216 }
2218 2217 switch (level) {
2219 2218 case PM_LEVEL_D0:
2220 2219 NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
2221 2220 MPTSAS_POWER_ON(mpt);
2222 2221 /*
2223 2222 * Wait up to 30 seconds for IOC to come out of reset.
2224 2223 */
2225 2224 while (((ioc_status = ddi_get32(mpt->m_datap,
2226 2225 &mpt->m_reg->Doorbell)) &
2227 2226 MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2228 2227 if (polls++ > 3000) {
2229 2228 break;
2230 2229 }
2231 2230 delay(drv_usectohz(10000));
2232 2231 }
2233 2232 /*
2234 2233 * If IOC is not in operational state, try to hard reset it.
2235 2234 */
2236 2235 if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2237 2236 MPI2_IOC_STATE_OPERATIONAL) {
2238 2237 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2239 2238 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2240 2239 mptsas_log(mpt, CE_WARN,
2241 2240 "mptsas_power: hard reset failed");
2242 2241 mutex_exit(&mpt->m_mutex);
2243 2242 return (DDI_FAILURE);
2244 2243 }
2245 2244 }
2246 2245 mutex_enter(&mpt->m_intr_mutex);
2247 2246 mpt->m_power_level = PM_LEVEL_D0;
2248 2247 mutex_exit(&mpt->m_intr_mutex);
2249 2248 break;
2250 2249 case PM_LEVEL_D3:
2251 2250 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2252 2251 MPTSAS_POWER_OFF(mpt);
2253 2252 break;
2254 2253 default:
2255 2254 mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2256 2255 mpt->m_instance, level);
2257 2256 rval = DDI_FAILURE;
2258 2257 break;
2259 2258 }
2260 2259 mutex_exit(&mpt->m_mutex);
2261 2260 return (rval);
2262 2261 }
2263 2262
2264 2263 /*
2265 2264 * Initialize configuration space and figure out which
2266 2265 * chip and revison of the chip the mpt driver is using.
2267 2266 */
2268 2267 static int
2269 2268 mptsas_config_space_init(mptsas_t *mpt)
2270 2269 {
2271 2270 NDBG0(("mptsas_config_space_init"));
2272 2271
2273 2272 if (mpt->m_config_handle != NULL)
2274 2273 return (TRUE);
2275 2274
2276 2275 if (pci_config_setup(mpt->m_dip,
2277 2276 &mpt->m_config_handle) != DDI_SUCCESS) {
2278 2277 mptsas_log(mpt, CE_WARN, "cannot map configuration space.");
2279 2278 return (FALSE);
2280 2279 }
2281 2280
2282 2281 /*
2283 2282 * This is a workaround for a XMITS ASIC bug which does not
2284 2283 * drive the CBE upper bits.
2285 2284 */
2286 2285 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) &
2287 2286 PCI_STAT_PERROR) {
2288 2287 pci_config_put16(mpt->m_config_handle, PCI_CONF_STAT,
2289 2288 PCI_STAT_PERROR);
2290 2289 }
2291 2290
2292 2291 mptsas_setup_cmd_reg(mpt);
2293 2292
2294 2293 /*
2295 2294 * Get the chip device id:
2296 2295 */
2297 2296 mpt->m_devid = pci_config_get16(mpt->m_config_handle, PCI_CONF_DEVID);
2298 2297
2299 2298 /*
2300 2299 * Save the revision.
2301 2300 */
2302 2301 mpt->m_revid = pci_config_get8(mpt->m_config_handle, PCI_CONF_REVID);
2303 2302
2304 2303 /*
2305 2304 * Save the SubSystem Vendor and Device IDs
2306 2305 */
2307 2306 mpt->m_svid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBVENID);
2308 2307 mpt->m_ssid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBSYSID);
2309 2308
2310 2309 /*
2311 2310 * Set the latency timer to 0x40 as specified by the upa -> pci
2312 2311 * bridge chip design team. This may be done by the sparc pci
2313 2312 * bus nexus driver, but the driver should make sure the latency
2314 2313 * timer is correct for performance reasons.
2315 2314 */
2316 2315 pci_config_put8(mpt->m_config_handle, PCI_CONF_LATENCY_TIMER,
2317 2316 MPTSAS_LATENCY_TIMER);
2318 2317
2319 2318 (void) mptsas_get_pci_cap(mpt);
2320 2319 return (TRUE);
2321 2320 }
2322 2321
2323 2322 static void
2324 2323 mptsas_config_space_fini(mptsas_t *mpt)
2325 2324 {
2326 2325 if (mpt->m_config_handle != NULL) {
2327 2326 mptsas_disable_bus_master(mpt);
2328 2327 pci_config_teardown(&mpt->m_config_handle);
2329 2328 mpt->m_config_handle = NULL;
2330 2329 }
2331 2330 }
2332 2331
2333 2332 static void
2334 2333 mptsas_setup_cmd_reg(mptsas_t *mpt)
2335 2334 {
2336 2335 ushort_t cmdreg;
2337 2336
2338 2337 /*
2339 2338 * Set the command register to the needed values.
2340 2339 */
2341 2340 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2342 2341 cmdreg |= (PCI_COMM_ME | PCI_COMM_SERR_ENABLE |
2343 2342 PCI_COMM_PARITY_DETECT | PCI_COMM_MAE);
2344 2343 cmdreg &= ~PCI_COMM_IO;
2345 2344 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2346 2345 }
2347 2346
2348 2347 static void
2349 2348 mptsas_disable_bus_master(mptsas_t *mpt)
2350 2349 {
2351 2350 ushort_t cmdreg;
2352 2351
2353 2352 /*
2354 2353 * Clear the master enable bit in the PCI command register.
2355 2354 * This prevents any bus mastering activity like DMA.
2356 2355 */
2357 2356 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2358 2357 cmdreg &= ~PCI_COMM_ME;
2359 2358 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2360 2359 }
2361 2360
2362 2361 int
2363 2362 mptsas_dma_alloc(mptsas_t *mpt, mptsas_dma_alloc_state_t *dma_statep)
2364 2363 {
2365 2364 ddi_dma_attr_t attrs;
2366 2365
2367 2366 attrs = mpt->m_io_dma_attr;
2368 2367 attrs.dma_attr_sgllen = 1;
2369 2368
2370 2369 ASSERT(dma_statep != NULL);
2371 2370
2372 2371 if (mptsas_dma_addr_create(mpt, attrs, &dma_statep->handle,
2373 2372 &dma_statep->accessp, &dma_statep->memp, dma_statep->size,
2374 2373 &dma_statep->cookie) == FALSE) {
2375 2374 return (DDI_FAILURE);
2376 2375 }
2377 2376
2378 2377 return (DDI_SUCCESS);
2379 2378 }
2380 2379
2381 2380 void
2382 2381 mptsas_dma_free(mptsas_dma_alloc_state_t *dma_statep)
2383 2382 {
2384 2383 ASSERT(dma_statep != NULL);
2385 2384 mptsas_dma_addr_destroy(&dma_statep->handle, &dma_statep->accessp);
2386 2385 dma_statep->size = 0;
2387 2386 }
2388 2387
2389 2388 int
2390 2389 mptsas_do_dma(mptsas_t *mpt, uint32_t size, int var, int (*callback)())
2391 2390 {
2392 2391 ddi_dma_attr_t attrs;
2393 2392 ddi_dma_handle_t dma_handle;
2394 2393 caddr_t memp;
2395 2394 ddi_acc_handle_t accessp;
2396 2395 int rval;
2397 2396
2398 2397 ASSERT(mutex_owned(&mpt->m_mutex));
2399 2398
2400 2399 attrs = mpt->m_msg_dma_attr;
2401 2400 attrs.dma_attr_sgllen = 1;
2402 2401 attrs.dma_attr_granular = size;
2403 2402
2404 2403 if (mptsas_dma_addr_create(mpt, attrs, &dma_handle,
2405 2404 &accessp, &memp, size, NULL) == FALSE) {
2406 2405 return (DDI_FAILURE);
2407 2406 }
2408 2407
2409 2408 rval = (*callback) (mpt, memp, var, accessp);
2410 2409
2411 2410 if ((mptsas_check_dma_handle(dma_handle) != DDI_SUCCESS) ||
2412 2411 (mptsas_check_acc_handle(accessp) != DDI_SUCCESS)) {
2413 2412 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
2414 2413 rval = DDI_FAILURE;
2415 2414 }
2416 2415
2417 2416 mptsas_dma_addr_destroy(&dma_handle, &accessp);
2418 2417 return (rval);
2419 2418
2420 2419 }
2421 2420
2422 2421 static int
2423 2422 mptsas_alloc_request_frames(mptsas_t *mpt)
2424 2423 {
2425 2424 ddi_dma_attr_t frame_dma_attrs;
2426 2425 caddr_t memp;
2427 2426 ddi_dma_cookie_t cookie;
2428 2427 size_t mem_size;
2429 2428
2430 2429 /*
2431 2430 * re-alloc when it has already alloced
2432 2431 */
2433 2432 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2434 2433 &mpt->m_acc_req_frame_hdl);
2435 2434
2436 2435 /*
2437 2436 * The size of the request frame pool is:
2438 2437 * Number of Request Frames * Request Frame Size
2439 2438 */
2440 2439 mem_size = mpt->m_max_requests * mpt->m_req_frame_size;
2441 2440
2442 2441 /*
2443 2442 * set the DMA attributes. System Request Message Frames must be
2444 2443 * aligned on a 16-byte boundry.
2445 2444 */
2446 2445 frame_dma_attrs = mpt->m_msg_dma_attr;
2447 2446 frame_dma_attrs.dma_attr_align = 16;
2448 2447 frame_dma_attrs.dma_attr_sgllen = 1;
2449 2448
2450 2449 /*
2451 2450 * allocate the request frame pool.
2452 2451 */
2453 2452 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2454 2453 &mpt->m_dma_req_frame_hdl, &mpt->m_acc_req_frame_hdl, &memp,
2455 2454 mem_size, &cookie) == FALSE) {
2456 2455 return (DDI_FAILURE);
2457 2456 }
2458 2457
2459 2458 /*
2460 2459 * Store the request frame memory address. This chip uses this
2461 2460 * address to dma to and from the driver's frame. The second
2462 2461 * address is the address mpt uses to fill in the frame.
2463 2462 */
2464 2463 mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2465 2464 mpt->m_req_frame = memp;
2466 2465
2467 2466 /*
2468 2467 * Clear the request frame pool.
2469 2468 */
2470 2469 bzero(mpt->m_req_frame, mem_size);
2471 2470
2472 2471 return (DDI_SUCCESS);
2473 2472 }
2474 2473
2475 2474 static int
2476 2475 mptsas_alloc_reply_frames(mptsas_t *mpt)
2477 2476 {
2478 2477 ddi_dma_attr_t frame_dma_attrs;
2479 2478 caddr_t memp;
2480 2479 ddi_dma_cookie_t cookie;
2481 2480 size_t mem_size;
2482 2481
2483 2482 /*
2484 2483 * re-alloc when it has already alloced
2485 2484 */
2486 2485 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2487 2486 &mpt->m_acc_reply_frame_hdl);
2488 2487
2489 2488 /*
2490 2489 * The size of the reply frame pool is:
2491 2490 * Number of Reply Frames * Reply Frame Size
2492 2491 */
2493 2492 mem_size = mpt->m_max_replies * mpt->m_reply_frame_size;
2494 2493
2495 2494 /*
2496 2495 * set the DMA attributes. System Reply Message Frames must be
2497 2496 * aligned on a 4-byte boundry. This is the default.
2498 2497 */
2499 2498 frame_dma_attrs = mpt->m_msg_dma_attr;
2500 2499 frame_dma_attrs.dma_attr_sgllen = 1;
2501 2500
2502 2501 /*
2503 2502 * allocate the reply frame pool
2504 2503 */
2505 2504 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2506 2505 &mpt->m_dma_reply_frame_hdl, &mpt->m_acc_reply_frame_hdl, &memp,
2507 2506 mem_size, &cookie) == FALSE) {
2508 2507 return (DDI_FAILURE);
2509 2508 }
2510 2509
2511 2510 /*
2512 2511 * Store the reply frame memory address. This chip uses this
2513 2512 * address to dma to and from the driver's frame. The second
2514 2513 * address is the address mpt uses to process the frame.
2515 2514 */
2516 2515 mpt->m_reply_frame_dma_addr = cookie.dmac_laddress;
2517 2516 mpt->m_reply_frame = memp;
2518 2517
2519 2518 /*
2520 2519 * Clear the reply frame pool.
2521 2520 */
2522 2521 bzero(mpt->m_reply_frame, mem_size);
2523 2522
2524 2523 return (DDI_SUCCESS);
2525 2524 }
2526 2525
2527 2526 static int
2528 2527 mptsas_alloc_free_queue(mptsas_t *mpt)
2529 2528 {
2530 2529 ddi_dma_attr_t frame_dma_attrs;
2531 2530 caddr_t memp;
2532 2531 ddi_dma_cookie_t cookie;
2533 2532 size_t mem_size;
2534 2533
2535 2534 /*
2536 2535 * re-alloc when it has already alloced
2537 2536 */
2538 2537 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2539 2538 &mpt->m_acc_free_queue_hdl);
2540 2539
2541 2540 /*
2542 2541 * The reply free queue size is:
2543 2542 * Reply Free Queue Depth * 4
2544 2543 * The "4" is the size of one 32 bit address (low part of 64-bit
2545 2544 * address)
2546 2545 */
2547 2546 mem_size = mpt->m_free_queue_depth * 4;
2548 2547
2549 2548 /*
2550 2549 * set the DMA attributes The Reply Free Queue must be aligned on a
2551 2550 * 16-byte boundry.
2552 2551 */
2553 2552 frame_dma_attrs = mpt->m_msg_dma_attr;
2554 2553 frame_dma_attrs.dma_attr_align = 16;
2555 2554 frame_dma_attrs.dma_attr_sgllen = 1;
2556 2555
2557 2556 /*
2558 2557 * allocate the reply free queue
2559 2558 */
2560 2559 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2561 2560 &mpt->m_dma_free_queue_hdl, &mpt->m_acc_free_queue_hdl, &memp,
2562 2561 mem_size, &cookie) == FALSE) {
2563 2562 return (DDI_FAILURE);
2564 2563 }
2565 2564
2566 2565 /*
2567 2566 * Store the reply free queue memory address. This chip uses this
2568 2567 * address to read from the reply free queue. The second address
2569 2568 * is the address mpt uses to manage the queue.
2570 2569 */
2571 2570 mpt->m_free_queue_dma_addr = cookie.dmac_laddress;
2572 2571 mpt->m_free_queue = memp;
2573 2572
2574 2573 /*
2575 2574 * Clear the reply free queue memory.
2576 2575 */
2577 2576 bzero(mpt->m_free_queue, mem_size);
2578 2577
2579 2578 return (DDI_SUCCESS);
2580 2579 }
2581 2580
2582 2581 static int
2583 2582 mptsas_alloc_post_queue(mptsas_t *mpt)
2584 2583 {
2585 2584 ddi_dma_attr_t frame_dma_attrs;
2586 2585 caddr_t memp;
2587 2586 ddi_dma_cookie_t cookie;
2588 2587 size_t mem_size;
2589 2588
2590 2589 /*
2591 2590 * re-alloc when it has already alloced
2592 2591 */
2593 2592 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2594 2593 &mpt->m_acc_post_queue_hdl);
2595 2594
2596 2595 /*
2597 2596 * The reply descriptor post queue size is:
2598 2597 * Reply Descriptor Post Queue Depth * 8
2599 2598 * The "8" is the size of each descriptor (8 bytes or 64 bits).
2600 2599 */
2601 2600 mem_size = mpt->m_post_queue_depth * 8;
2602 2601
2603 2602 /*
2604 2603 * set the DMA attributes. The Reply Descriptor Post Queue must be
2605 2604 * aligned on a 16-byte boundry.
2606 2605 */
2607 2606 frame_dma_attrs = mpt->m_msg_dma_attr;
2608 2607 frame_dma_attrs.dma_attr_align = 16;
2609 2608 frame_dma_attrs.dma_attr_sgllen = 1;
2610 2609
2611 2610 /*
2612 2611 * allocate the reply post queue
2613 2612 */
2614 2613 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2615 2614 &mpt->m_dma_post_queue_hdl, &mpt->m_acc_post_queue_hdl, &memp,
2616 2615 mem_size, &cookie) == FALSE) {
2617 2616 return (DDI_FAILURE);
2618 2617 }
2619 2618
2620 2619 /*
2621 2620 * Store the reply descriptor post queue memory address. This chip
2622 2621 * uses this address to write to the reply descriptor post queue. The
2623 2622 * second address is the address mpt uses to manage the queue.
2624 2623 */
2625 2624 mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
2626 2625 mpt->m_post_queue = memp;
2627 2626
2628 2627 /*
2629 2628 * Clear the reply post queue memory.
2630 2629 */
2631 2630 bzero(mpt->m_post_queue, mem_size);
2632 2631
2633 2632 return (DDI_SUCCESS);
2634 2633 }
2635 2634
2636 2635 static void
2637 2636 mptsas_alloc_reply_args(mptsas_t *mpt)
2638 2637 {
2639 2638 if (mpt->m_replyh_args != NULL) {
2640 2639 kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
2641 2640 * mpt->m_max_replies);
2642 2641 mpt->m_replyh_args = NULL;
2643 2642 }
2644 2643 mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
2645 2644 mpt->m_max_replies, KM_SLEEP);
2646 2645 }
2647 2646
2648 2647 static int
2649 2648 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2650 2649 {
2651 2650 mptsas_cache_frames_t *frames = NULL;
2652 2651 if (cmd->cmd_extra_frames == NULL) {
2653 2652 frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
2654 2653 if (frames == NULL) {
2655 2654 return (DDI_FAILURE);
2656 2655 }
2657 2656 cmd->cmd_extra_frames = frames;
2658 2657 }
2659 2658 return (DDI_SUCCESS);
2660 2659 }
2661 2660
2662 2661 static void
2663 2662 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2664 2663 {
2665 2664 if (cmd->cmd_extra_frames) {
2666 2665 kmem_cache_free(mpt->m_cache_frames,
2667 2666 (void *)cmd->cmd_extra_frames);
2668 2667 cmd->cmd_extra_frames = NULL;
2669 2668 }
2670 2669 }
2671 2670
2672 2671 static void
2673 2672 mptsas_cfg_fini(mptsas_t *mpt)
2674 2673 {
2675 2674 NDBG0(("mptsas_cfg_fini"));
2676 2675 ddi_regs_map_free(&mpt->m_datap);
2677 2676 }
2678 2677
2679 2678 static void
2680 2679 mptsas_hba_fini(mptsas_t *mpt)
2681 2680 {
2682 2681 NDBG0(("mptsas_hba_fini"));
2683 2682
2684 2683 /*
2685 2684 * Free up any allocated memory
2686 2685 */
2687 2686 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2688 2687 &mpt->m_acc_req_frame_hdl);
2689 2688
2690 2689 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2691 2690 &mpt->m_acc_reply_frame_hdl);
2692 2691
2693 2692 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2694 2693 &mpt->m_acc_free_queue_hdl);
2695 2694
2696 2695 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2697 2696 &mpt->m_acc_post_queue_hdl);
2698 2697
2699 2698 if (mpt->m_replyh_args != NULL) {
2700 2699 kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
2701 2700 * mpt->m_max_replies);
2702 2701 }
2703 2702 }
2704 2703
2705 2704 static int
2706 2705 mptsas_name_child(dev_info_t *lun_dip, char *name, int len)
2707 2706 {
2708 2707 int lun = 0;
2709 2708 char *sas_wwn = NULL;
2710 2709 int phynum = -1;
2711 2710 int reallen = 0;
2712 2711
2713 2712 /* Get the target num */
2714 2713 lun = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
2715 2714 LUN_PROP, 0);
2716 2715
2717 2716 if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
2718 2717 DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
2719 2718 /*
2720 2719 * Stick in the address of form "pPHY,LUN"
2721 2720 */
2722 2721 reallen = snprintf(name, len, "p%x,%x", phynum, lun);
2723 2722 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
2724 2723 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn)
2725 2724 == DDI_PROP_SUCCESS) {
2726 2725 /*
2727 2726 * Stick in the address of the form "wWWN,LUN"
2728 2727 */
2729 2728 reallen = snprintf(name, len, "%s,%x", sas_wwn, lun);
2730 2729 ddi_prop_free(sas_wwn);
2731 2730 } else {
2732 2731 return (DDI_FAILURE);
2733 2732 }
2734 2733
2735 2734 ASSERT(reallen < len);
2736 2735 if (reallen >= len) {
2737 2736 mptsas_log(0, CE_WARN, "!mptsas_get_name: name parameter "
2738 2737 "length too small, it needs to be %d bytes", reallen + 1);
2739 2738 }
2740 2739 return (DDI_SUCCESS);
2741 2740 }
2742 2741
2743 2742 /*
2744 2743 * tran_tgt_init(9E) - target device instance initialization
2745 2744 */
2746 2745 static int
2747 2746 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2748 2747 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2749 2748 {
2750 2749 #ifndef __lock_lint
2751 2750 _NOTE(ARGUNUSED(hba_tran))
2752 2751 #endif
2753 2752
2754 2753 /*
2755 2754 * At this point, the scsi_device structure already exists
2756 2755 * and has been initialized.
2757 2756 *
2758 2757 * Use this function to allocate target-private data structures,
2759 2758 * if needed by this HBA. Add revised flow-control and queue
2760 2759 * properties for child here, if desired and if you can tell they
2761 2760 * support tagged queueing by now.
2762 2761 */
2763 2762 mptsas_t *mpt;
2764 2763 int lun = sd->sd_address.a_lun;
2765 2764 mdi_pathinfo_t *pip = NULL;
2766 2765 mptsas_tgt_private_t *tgt_private = NULL;
2767 2766 mptsas_target_t *ptgt = NULL;
2768 2767 char *psas_wwn = NULL;
2769 2768 int phymask = 0;
2770 2769 uint64_t sas_wwn = 0;
2771 2770 mpt = SDEV2MPT(sd);
2772 2771
2773 2772 ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
2774 2773
2775 2774 NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
2776 2775 (void *)hba_dip, (void *)tgt_dip, lun));
2777 2776
2778 2777 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
2779 2778 (void) ndi_merge_node(tgt_dip, mptsas_name_child);
2780 2779 ddi_set_name_addr(tgt_dip, NULL);
2781 2780 return (DDI_FAILURE);
2782 2781 }
2783 2782 /*
2784 2783 * phymask is 0 means the virtual port for RAID
2785 2784 */
2786 2785 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
2787 2786 "phymask", 0);
2788 2787 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2789 2788 if ((pip = (void *)(sd->sd_private)) == NULL) {
2790 2789 /*
2791 2790 * Very bad news if this occurs. Somehow scsi_vhci has
2792 2791 * lost the pathinfo node for this target.
2793 2792 */
2794 2793 return (DDI_NOT_WELL_FORMED);
2795 2794 }
2796 2795
2797 2796 if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
2798 2797 DDI_PROP_SUCCESS) {
2799 2798 mptsas_log(mpt, CE_WARN, "Get lun property failed\n");
2800 2799 return (DDI_FAILURE);
2801 2800 }
2802 2801
2803 2802 if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
2804 2803 &psas_wwn) == MDI_SUCCESS) {
2805 2804 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2806 2805 sas_wwn = 0;
2807 2806 }
2808 2807 (void) mdi_prop_free(psas_wwn);
2809 2808 }
2810 2809 } else {
2811 2810 lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
2812 2811 DDI_PROP_DONTPASS, LUN_PROP, 0);
2813 2812 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
2814 2813 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
2815 2814 DDI_PROP_SUCCESS) {
2816 2815 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2817 2816 sas_wwn = 0;
2818 2817 }
2819 2818 ddi_prop_free(psas_wwn);
2820 2819 } else {
2821 2820 sas_wwn = 0;
2822 2821 }
2823 2822 }
2824 2823 ASSERT((sas_wwn != 0) || (phymask != 0));
2825 2824 mutex_enter(&mpt->m_mutex);
2826 2825 ptgt = mptsas_hash_search(&mpt->m_active->m_tgttbl, sas_wwn, phymask);
2827 2826 mutex_exit(&mpt->m_mutex);
2828 2827 if (ptgt == NULL) {
2829 2828 mptsas_log(mpt, CE_WARN, "!tgt_init: target doesn't exist or "
2830 2829 "gone already! phymask:%x, saswwn %"PRIx64, phymask,
2831 2830 sas_wwn);
2832 2831 return (DDI_FAILURE);
2833 2832 }
2834 2833 if (hba_tran->tran_tgt_private == NULL) {
2835 2834 tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
2836 2835 KM_SLEEP);
2837 2836 tgt_private->t_lun = lun;
2838 2837 tgt_private->t_private = ptgt;
2839 2838 hba_tran->tran_tgt_private = tgt_private;
2840 2839 }
2841 2840
2842 2841 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2843 2842 return (DDI_SUCCESS);
2844 2843 }
2845 2844 mutex_enter(&mpt->m_mutex);
2846 2845
2847 2846 if (ptgt->m_deviceinfo &
2848 2847 (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
2849 2848 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
2850 2849 uchar_t *inq89 = NULL;
2851 2850 int inq89_len = 0x238;
2852 2851 int reallen = 0;
2853 2852 int rval = 0;
2854 2853 struct sata_id *sid = NULL;
2855 2854 char model[SATA_ID_MODEL_LEN + 1];
2856 2855 char fw[SATA_ID_FW_LEN + 1];
2857 2856 char *vid, *pid;
2858 2857 int i;
2859 2858
2860 2859 mutex_exit(&mpt->m_mutex);
2861 2860 /*
2862 2861 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
2863 2862 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
2864 2863 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
2865 2864 */
2866 2865 inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
2867 2866 rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
2868 2867 inq89, inq89_len, &reallen, 1);
2869 2868
2870 2869 if (rval != 0) {
2871 2870 if (inq89 != NULL) {
2872 2871 kmem_free(inq89, inq89_len);
2873 2872 }
2874 2873
2875 2874 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
2876 2875 "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
2877 2876 return (DDI_SUCCESS);
2878 2877 }
2879 2878 sid = (void *)(&inq89[60]);
2880 2879
2881 2880 swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
2882 2881 swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
2883 2882
2884 2883 model[SATA_ID_MODEL_LEN] = 0;
2885 2884 fw[SATA_ID_FW_LEN] = 0;
2886 2885
2887 2886 /*
2888 2887 * split model into into vid/pid
2889 2888 */
2890 2889 for (i = 0, pid = model; i < SATA_ID_MODEL_LEN; i++, pid++)
2891 2890 if ((*pid == ' ') || (*pid == '\t'))
2892 2891 break;
2893 2892 if (i < SATA_ID_MODEL_LEN) {
2894 2893 vid = model;
2895 2894 /*
2896 2895 * terminate vid, establish pid
2897 2896 */
2898 2897 *pid++ = 0;
2899 2898 } else {
2900 2899 /*
2901 2900 * vid will stay "ATA ", the rule is same
2902 2901 * as sata framework implementation.
2903 2902 */
2904 2903 vid = NULL;
2905 2904 /*
2906 2905 * model is all pid
2907 2906 */
2908 2907 pid = model;
2909 2908 }
2910 2909
2911 2910 /*
2912 2911 * override SCSA "inquiry-*" properties
2913 2912 */
2914 2913 if (vid)
2915 2914 (void) scsi_device_prop_update_inqstring(sd,
2916 2915 INQUIRY_VENDOR_ID, vid, strlen(vid));
2917 2916 if (pid)
2918 2917 (void) scsi_device_prop_update_inqstring(sd,
2919 2918 INQUIRY_PRODUCT_ID, pid, strlen(pid));
2920 2919 (void) scsi_device_prop_update_inqstring(sd,
2921 2920 INQUIRY_REVISION_ID, fw, strlen(fw));
2922 2921
2923 2922 if (inq89 != NULL) {
2924 2923 kmem_free(inq89, inq89_len);
2925 2924 }
2926 2925 } else {
2927 2926 mutex_exit(&mpt->m_mutex);
2928 2927 }
2929 2928
2930 2929 return (DDI_SUCCESS);
2931 2930 }
2932 2931 /*
2933 2932 * tran_tgt_free(9E) - target device instance deallocation
2934 2933 */
2935 2934 static void
2936 2935 mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2937 2936 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2938 2937 {
2939 2938 #ifndef __lock_lint
2940 2939 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran, sd))
2941 2940 #endif
2942 2941
2943 2942 mptsas_tgt_private_t *tgt_private = hba_tran->tran_tgt_private;
2944 2943
2945 2944 if (tgt_private != NULL) {
2946 2945 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
2947 2946 hba_tran->tran_tgt_private = NULL;
2948 2947 }
2949 2948 }
2950 2949
2951 2950 /*
2952 2951 * scsi_pkt handling
2953 2952 *
2954 2953 * Visible to the external world via the transport structure.
2955 2954 */
2956 2955
2957 2956 /*
2958 2957 * Notes:
2959 2958 * - transport the command to the addressed SCSI target/lun device
2960 2959 * - normal operation is to schedule the command to be transported,
2961 2960 * and return TRAN_ACCEPT if this is successful.
2962 2961 * - if NO_INTR, tran_start must poll device for command completion
2963 2962 */
2964 2963 static int
2965 2964 mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
2966 2965 {
2967 2966 #ifndef __lock_lint
2968 2967 _NOTE(ARGUNUSED(ap))
2969 2968 #endif
2970 2969 mptsas_t *mpt = PKT2MPT(pkt);
2971 2970 mptsas_cmd_t *cmd = PKT2CMD(pkt);
2972 2971 int rval;
2973 2972 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
2974 2973
2975 2974 NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt));
2976 2975 ASSERT(ptgt);
2977 2976 if (ptgt == NULL)
2978 2977 return (TRAN_FATAL_ERROR);
2979 2978
2980 2979 /*
2981 2980 * prepare the pkt before taking mutex.
2982 2981 */
2983 2982 rval = mptsas_prepare_pkt(cmd);
2984 2983 if (rval != TRAN_ACCEPT) {
2985 2984 return (rval);
2986 2985 }
2987 2986
2988 2987 /*
2989 2988 * Send the command to target/lun, however your HBA requires it.
2990 2989 * If busy, return TRAN_BUSY; if there's some other formatting error
2991 2990 * in the packet, return TRAN_BADPKT; otherwise, fall through to the
2992 2991 * return of TRAN_ACCEPT.
2993 2992 *
2994 2993 * Remember that access to shared resources, including the mptsas_t
2995 2994 * data structure and the HBA hardware registers, must be protected
2996 2995 * with mutexes, here and everywhere.
2997 2996 *
2998 2997 * Also remember that at interrupt time, you'll get an argument
2999 2998 * to the interrupt handler which is a pointer to your mptsas_t
3000 2999 * structure; you'll have to remember which commands are outstanding
3001 3000 * and which scsi_pkt is the currently-running command so the
3002 3001 * interrupt handler can refer to the pkt to set completion
3003 3002 * status, call the target driver back through pkt_comp, etc.
3004 3003 */
3005 3004
3006 3005 mutex_enter(&ptgt->m_tgt_intr_mutex);
3007 3006 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3008 3007 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3009 3008 /*
3010 3009 * commands should be allowed to retry by
3011 3010 * returning TRAN_BUSY to stall the I/O's
3012 3011 * which come from scsi_vhci since the device/
3013 3012 * path is in unstable state now.
3014 3013 */
3015 3014 mutex_exit(&ptgt->m_tgt_intr_mutex);
3016 3015 return (TRAN_BUSY);
3017 3016 } else {
3018 3017 /*
3019 3018 * The device is offline, just fail the
3020 3019 * command by returning TRAN_FATAL_ERROR.
3021 3020 */
3022 3021 mutex_exit(&ptgt->m_tgt_intr_mutex);
3023 3022 return (TRAN_FATAL_ERROR);
3024 3023 }
3025 3024 }
3026 3025 mutex_exit(&ptgt->m_tgt_intr_mutex);
3027 3026 rval = mptsas_accept_pkt(mpt, cmd);
3028 3027
3029 3028 return (rval);
3030 3029 }
3031 3030
3032 3031 static int
3033 3032 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3034 3033 {
3035 3034 int rval = TRAN_ACCEPT;
3036 3035 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3037 3036
3038 3037 NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3039 3038
3040 3039 if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3041 3040 rval = mptsas_prepare_pkt(cmd);
3042 3041 if (rval != TRAN_ACCEPT) {
3043 3042 cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3044 3043 return (rval);
3045 3044 }
3046 3045 }
3047 3046
3048 3047 /*
3049 3048 * reset the throttle if we were draining
3050 3049 */
3051 3050 mutex_enter(&ptgt->m_tgt_intr_mutex);
3052 3051 if ((ptgt->m_t_ncmds == 0) &&
3053 3052 (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3054 3053 NDBG23(("reset throttle"));
3055 3054 ASSERT(ptgt->m_reset_delay == 0);
3056 3055 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3057 3056 }
3058 3057
3059 3058 /*
3060 3059 * If device handle has already been invalidated, just
3061 3060 * fail the command. In theory, command from scsi_vhci
3062 3061 * client is impossible send down command with invalid
3063 3062 * devhdl since devhdl is set after path offline, target
3064 3063 * driver is not suppose to select a offlined path.
3065 3064 */
3066 3065 if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3067 3066 NDBG20(("rejecting command, it might because invalid devhdl "
3068 3067 "request."));
3069 3068 mutex_exit(&ptgt->m_tgt_intr_mutex);
3070 3069 mutex_enter(&mpt->m_mutex);
3071 3070 /*
3072 3071 * If HBA is being reset, the DevHandles are being
3073 3072 * re-initialized, which means that they could be invalid
3074 3073 * even if the target is still attached. Check if being reset
3075 3074 * and if DevHandle is being re-initialized. If this is the
3076 3075 * case, return BUSY so the I/O can be retried later.
3077 3076 */
3078 3077 if (mpt->m_in_reset) {
3079 3078 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
3080 3079 STAT_BUS_RESET);
3081 3080 if (cmd->cmd_flags & CFLAG_TXQ) {
3082 3081 mptsas_doneq_add(mpt, cmd);
3083 3082 mptsas_doneq_empty(mpt);
3084 3083 mutex_exit(&mpt->m_mutex);
3085 3084 return (rval);
3086 3085 } else {
3087 3086 mutex_exit(&mpt->m_mutex);
3088 3087 return (TRAN_BUSY);
3089 3088 }
3090 3089 }
3091 3090 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3092 3091 if (cmd->cmd_flags & CFLAG_TXQ) {
3093 3092 mptsas_doneq_add(mpt, cmd);
3094 3093 mptsas_doneq_empty(mpt);
3095 3094 mutex_exit(&mpt->m_mutex);
3096 3095 return (rval);
3097 3096 } else {
3098 3097 mutex_exit(&mpt->m_mutex);
3099 3098 return (TRAN_FATAL_ERROR);
3100 3099 }
3101 3100 }
3102 3101 mutex_exit(&ptgt->m_tgt_intr_mutex);
3103 3102 /*
3104 3103 * The first case is the normal case. mpt gets a command from the
3105 3104 * target driver and starts it.
3106 3105 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3107 3106 * commands is m_max_requests - 2.
3108 3107 */
3109 3108 mutex_enter(&ptgt->m_tgt_intr_mutex);
3110 3109 if ((ptgt->m_t_throttle > HOLD_THROTTLE) &&
3111 3110 (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3112 3111 (ptgt->m_reset_delay == 0) &&
3113 3112 (ptgt->m_t_nwait == 0) &&
3114 3113 ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3115 3114 mutex_exit(&ptgt->m_tgt_intr_mutex);
3116 3115 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3117 3116 (void) mptsas_start_cmd0(mpt, cmd);
3118 3117 } else {
3119 3118 mutex_enter(&mpt->m_mutex);
3120 3119 mptsas_waitq_add(mpt, cmd);
3121 3120 mutex_exit(&mpt->m_mutex);
3122 3121 }
3123 3122 } else {
3124 3123 /*
3125 3124 * Add this pkt to the work queue
3126 3125 */
3127 3126 mutex_exit(&ptgt->m_tgt_intr_mutex);
3128 3127 mutex_enter(&mpt->m_mutex);
3129 3128 mptsas_waitq_add(mpt, cmd);
3130 3129
3131 3130 if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3132 3131 (void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3133 3132
3134 3133 /*
3135 3134 * Only flush the doneq if this is not a TM
3136 3135 * cmd. For TM cmds the flushing of the
3137 3136 * doneq will be done in those routines.
3138 3137 */
3139 3138 if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
3140 3139 mptsas_doneq_empty(mpt);
3141 3140 }
3142 3141 }
3143 3142 mutex_exit(&mpt->m_mutex);
3144 3143 }
3145 3144 return (rval);
3146 3145 }
3147 3146
3148 3147 int
3149 3148 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3150 3149 {
3151 3150 mptsas_slots_t *slots;
3152 3151 int slot;
3153 3152 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3154 3153 mptsas_slot_free_e_t *pe;
3155 3154 int qn, qn_first;
3156 3155
3157 3156 slots = mpt->m_active;
3158 3157
3159 3158 /*
3160 3159 * Account for reserved TM request slot and reserved SMID of 0.
3161 3160 */
3162 3161 ASSERT(slots->m_n_slots == (mpt->m_max_requests - 2));
3163 3162
3164 3163 qn = qn_first = CPU->cpu_seqid & (mpt->m_slot_freeq_pair_n - 1);
3165 3164
3166 3165 qpair_retry:
3167 3166 ASSERT(qn < mpt->m_slot_freeq_pair_n);
3168 3167 mutex_enter(&mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_mutex);
3169 3168 pe = list_head(&mpt->m_slot_freeq_pairp[qn].m_slot_allocq.
3170 3169 s.m_fq_list);
3171 3170 if (!pe) { /* switch the allocq and releq */
3172 3171 mutex_enter(&mpt->m_slot_freeq_pairp[qn].m_slot_releq.
3173 3172 s.m_fq_mutex);
3174 3173 if (mpt->m_slot_freeq_pairp[qn].m_slot_releq.s.m_fq_n) {
3175 3174 mpt->m_slot_freeq_pairp[qn].
3176 3175 m_slot_allocq.s.m_fq_n =
3177 3176 mpt->m_slot_freeq_pairp[qn].
3178 3177 m_slot_releq.s.m_fq_n;
3179 3178 mpt->m_slot_freeq_pairp[qn].
3180 3179 m_slot_allocq.s.m_fq_list.list_head.list_next =
3181 3180 mpt->m_slot_freeq_pairp[qn].
3182 3181 m_slot_releq.s.m_fq_list.list_head.list_next;
3183 3182 mpt->m_slot_freeq_pairp[qn].
3184 3183 m_slot_allocq.s.m_fq_list.list_head.list_prev =
3185 3184 mpt->m_slot_freeq_pairp[qn].
3186 3185 m_slot_releq.s.m_fq_list.list_head.list_prev;
3187 3186 mpt->m_slot_freeq_pairp[qn].
3188 3187 m_slot_releq.s.m_fq_list.list_head.list_prev->
3189 3188 list_next =
3190 3189 &mpt->m_slot_freeq_pairp[qn].
3191 3190 m_slot_allocq.s.m_fq_list.list_head;
3192 3191 mpt->m_slot_freeq_pairp[qn].
3193 3192 m_slot_releq.s.m_fq_list.list_head.list_next->
3194 3193 list_prev =
3195 3194 &mpt->m_slot_freeq_pairp[qn].
3196 3195 m_slot_allocq.s.m_fq_list.list_head;
3197 3196
3198 3197 mpt->m_slot_freeq_pairp[qn].
3199 3198 m_slot_releq.s.m_fq_list.list_head.list_next =
3200 3199 mpt->m_slot_freeq_pairp[qn].
3201 3200 m_slot_releq.s.m_fq_list.list_head.list_prev =
3202 3201 &mpt->m_slot_freeq_pairp[qn].
3203 3202 m_slot_releq.s.m_fq_list.list_head;
3204 3203 mpt->m_slot_freeq_pairp[qn].
3205 3204 m_slot_releq.s.m_fq_n = 0;
3206 3205 } else {
3207 3206 mutex_exit(&mpt->m_slot_freeq_pairp[qn].
3208 3207 m_slot_releq.s.m_fq_mutex);
3209 3208 mutex_exit(&mpt->m_slot_freeq_pairp[qn].
3210 3209 m_slot_allocq.s.m_fq_mutex);
3211 3210 qn = (qn + 1) & (mpt->m_slot_freeq_pair_n - 1);
3212 3211 if (qn == qn_first)
3213 3212 return (FALSE);
3214 3213 else
3215 3214 goto qpair_retry;
3216 3215 }
3217 3216 mutex_exit(&mpt->m_slot_freeq_pairp[qn].
3218 3217 m_slot_releq.s.m_fq_mutex);
3219 3218 pe = list_head(&mpt->m_slot_freeq_pairp[qn].
3220 3219 m_slot_allocq.s.m_fq_list);
3221 3220 ASSERT(pe);
3222 3221 }
3223 3222 list_remove(&mpt->m_slot_freeq_pairp[qn].
3224 3223 m_slot_allocq.s.m_fq_list, pe);
3225 3224 slot = pe->slot;
3226 3225 /*
3227 3226 * Make sure SMID is not using reserved value of 0
3228 3227 * and the TM request slot.
3229 3228 */
3230 3229 ASSERT((slot > 0) && (slot <= slots->m_n_slots) &&
3231 3230 mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_n > 0);
3232 3231 cmd->cmd_slot = slot;
3233 3232 mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_n--;
3234 3233 ASSERT(mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_n >= 0);
3235 3234
3236 3235 mutex_exit(&mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_mutex);
3237 3236 /*
3238 3237 * only increment per target ncmds if this is not a
3239 3238 * command that has no target associated with it (i.e. a
3240 3239 * event acknoledgment)
3241 3240 */
3242 3241 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3243 3242 mutex_enter(&ptgt->m_tgt_intr_mutex);
3244 3243 ptgt->m_t_ncmds++;
3245 3244 mutex_exit(&ptgt->m_tgt_intr_mutex);
3246 3245 }
3247 3246 cmd->cmd_active_timeout = cmd->cmd_pkt->pkt_time;
3248 3247
3249 3248 /*
3250 3249 * If initial timout is less than or equal to one tick, bump
3251 3250 * the timeout by a tick so that command doesn't timeout before
3252 3251 * its allotted time.
3253 3252 */
3254 3253 if (cmd->cmd_active_timeout <= mptsas_scsi_watchdog_tick) {
3255 3254 cmd->cmd_active_timeout += mptsas_scsi_watchdog_tick;
3256 3255 }
3257 3256 return (TRUE);
3258 3257 }
3259 3258
3260 3259 /*
3261 3260 * prepare the pkt:
3262 3261 * the pkt may have been resubmitted or just reused so
3263 3262 * initialize some fields and do some checks.
3264 3263 */
3265 3264 static int
3266 3265 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3267 3266 {
3268 3267 struct scsi_pkt *pkt = CMD2PKT(cmd);
3269 3268
3270 3269 NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3271 3270
3272 3271 /*
3273 3272 * Reinitialize some fields that need it; the packet may
3274 3273 * have been resubmitted
3275 3274 */
3276 3275 pkt->pkt_reason = CMD_CMPLT;
3277 3276 pkt->pkt_state = 0;
3278 3277 pkt->pkt_statistics = 0;
3279 3278 pkt->pkt_resid = 0;
3280 3279 cmd->cmd_age = 0;
3281 3280 cmd->cmd_pkt_flags = pkt->pkt_flags;
3282 3281
3283 3282 /*
3284 3283 * zero status byte.
3285 3284 */
3286 3285 *(pkt->pkt_scbp) = 0;
3287 3286
3288 3287 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3289 3288 pkt->pkt_resid = cmd->cmd_dmacount;
3290 3289
3291 3290 /*
3292 3291 * consistent packets need to be sync'ed first
3293 3292 * (only for data going out)
3294 3293 */
3295 3294 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
3296 3295 (cmd->cmd_flags & CFLAG_DMASEND)) {
3297 3296 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
3298 3297 DDI_DMA_SYNC_FORDEV);
3299 3298 }
3300 3299 }
3301 3300
3302 3301 cmd->cmd_flags =
3303 3302 (cmd->cmd_flags & ~(CFLAG_TRANFLAG)) |
3304 3303 CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
3305 3304
3306 3305 return (TRAN_ACCEPT);
3307 3306 }
3308 3307
3309 3308 /*
3310 3309 * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3311 3310 *
3312 3311 * One of three possibilities:
3313 3312 * - allocate scsi_pkt
3314 3313 * - allocate scsi_pkt and DMA resources
3315 3314 * - allocate DMA resources to an already-allocated pkt
3316 3315 */
3317 3316 static struct scsi_pkt *
3318 3317 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3319 3318 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3320 3319 int (*callback)(), caddr_t arg)
3321 3320 {
3322 3321 mptsas_cmd_t *cmd, *new_cmd;
3323 3322 mptsas_t *mpt = ADDR2MPT(ap);
3324 3323 int failure = 1;
3325 3324 #ifndef __sparc
3326 3325 uint_t oldcookiec;
3327 3326 #endif /* __sparc */
3328 3327 mptsas_target_t *ptgt = NULL;
3329 3328 int rval;
3330 3329 mptsas_tgt_private_t *tgt_private;
3331 3330 int kf;
3332 3331
3333 3332 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3334 3333
3335 3334 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3336 3335 tran_tgt_private;
3337 3336 ASSERT(tgt_private != NULL);
3338 3337 if (tgt_private == NULL) {
3339 3338 return (NULL);
3340 3339 }
3341 3340 ptgt = tgt_private->t_private;
3342 3341 ASSERT(ptgt != NULL);
3343 3342 if (ptgt == NULL)
3344 3343 return (NULL);
3345 3344 ap->a_target = ptgt->m_devhdl;
3346 3345 ap->a_lun = tgt_private->t_lun;
3347 3346
3348 3347 ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3349 3348 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3350 3349 statuslen *= 100; tgtlen *= 4;
3351 3350 #endif
3352 3351 NDBG3(("mptsas_scsi_init_pkt:\n"
3353 3352 "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3354 3353 ap->a_target, (void *)pkt, (void *)bp,
3355 3354 cmdlen, statuslen, tgtlen, flags));
3356 3355
3357 3356 /*
3358 3357 * Allocate the new packet.
3359 3358 */
3360 3359 if (pkt == NULL) {
3361 3360 ddi_dma_handle_t save_dma_handle;
3362 3361 ddi_dma_handle_t save_arq_dma_handle;
3363 3362 struct buf *save_arq_bp;
3364 3363 ddi_dma_cookie_t save_arqcookie;
3365 3364 #ifdef __sparc
3366 3365 mptti_t *save_sg;
3367 3366 #endif /* __sparc */
3368 3367
3369 3368 cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3370 3369
3371 3370 if (cmd) {
3372 3371 save_dma_handle = cmd->cmd_dmahandle;
3373 3372 save_arq_dma_handle = cmd->cmd_arqhandle;
3374 3373 save_arq_bp = cmd->cmd_arq_buf;
3375 3374 save_arqcookie = cmd->cmd_arqcookie;
3376 3375 #ifdef __sparc
3377 3376 save_sg = cmd->cmd_sg;
3378 3377 #endif /* __sparc */
3379 3378 bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3380 3379 cmd->cmd_dmahandle = save_dma_handle;
3381 3380 cmd->cmd_arqhandle = save_arq_dma_handle;
3382 3381 cmd->cmd_arq_buf = save_arq_bp;
3383 3382 cmd->cmd_arqcookie = save_arqcookie;
3384 3383 #ifdef __sparc
3385 3384 cmd->cmd_sg = save_sg;
3386 3385 #endif /* __sparc */
3387 3386 pkt = (void *)((uchar_t *)cmd +
3388 3387 sizeof (struct mptsas_cmd));
3389 3388 pkt->pkt_ha_private = (opaque_t)cmd;
3390 3389 pkt->pkt_address = *ap;
3391 3390 pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3392 3391 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3393 3392 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3394 3393 cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3395 3394 cmd->cmd_cdblen = (uchar_t)cmdlen;
3396 3395 cmd->cmd_scblen = statuslen;
3397 3396 cmd->cmd_rqslen = SENSE_LENGTH;
3398 3397 cmd->cmd_tgt_addr = ptgt;
3399 3398 failure = 0;
3400 3399 }
3401 3400
3402 3401 if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
3403 3402 (tgtlen > PKT_PRIV_LEN) ||
3404 3403 (statuslen > EXTCMDS_STATUS_SIZE)) {
3405 3404 if (failure == 0) {
3406 3405 /*
3407 3406 * if extern alloc fails, all will be
3408 3407 * deallocated, including cmd
3409 3408 */
3410 3409 failure = mptsas_pkt_alloc_extern(mpt, cmd,
3411 3410 cmdlen, tgtlen, statuslen, kf);
3412 3411 }
3413 3412 if (failure) {
3414 3413 /*
3415 3414 * if extern allocation fails, it will
3416 3415 * deallocate the new pkt as well
3417 3416 */
3418 3417 return (NULL);
3419 3418 }
3420 3419 }
3421 3420 new_cmd = cmd;
3422 3421
3423 3422 } else {
3424 3423 cmd = PKT2CMD(pkt);
3425 3424 new_cmd = NULL;
3426 3425 }
3427 3426
3428 3427
3429 3428 #ifndef __sparc
3430 3429 /* grab cmd->cmd_cookiec here as oldcookiec */
3431 3430
3432 3431 oldcookiec = cmd->cmd_cookiec;
3433 3432 #endif /* __sparc */
3434 3433
3435 3434 /*
3436 3435 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3437 3436 * greater than 0 and we'll need to grab the next dma window
3438 3437 */
3439 3438 /*
3440 3439 * SLM-not doing extra command frame right now; may add later
3441 3440 */
3442 3441
3443 3442 if (cmd->cmd_nwin > 0) {
3444 3443
3445 3444 /*
3446 3445 * Make sure we havn't gone past the the total number
3447 3446 * of windows
3448 3447 */
3449 3448 if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3450 3449 return (NULL);
3451 3450 }
3452 3451 if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3453 3452 &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
3454 3453 &cmd->cmd_cookie, &cmd->cmd_cookiec) == DDI_FAILURE) {
3455 3454 return (NULL);
3456 3455 }
3457 3456 goto get_dma_cookies;
3458 3457 }
3459 3458
3460 3459
3461 3460 if (flags & PKT_XARQ) {
3462 3461 cmd->cmd_flags |= CFLAG_XARQ;
3463 3462 }
3464 3463
3465 3464 /*
3466 3465 * DMA resource allocation. This version assumes your
3467 3466 * HBA has some sort of bus-mastering or onboard DMA capability, with a
3468 3467 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
3469 3468 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
3470 3469 */
3471 3470 if (bp && (bp->b_bcount != 0) &&
3472 3471 (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
3473 3472
3474 3473 int cnt, dma_flags;
3475 3474 mptti_t *dmap; /* ptr to the S/G list */
3476 3475
3477 3476 /*
3478 3477 * Set up DMA memory and position to the next DMA segment.
3479 3478 */
3480 3479 ASSERT(cmd->cmd_dmahandle != NULL);
3481 3480
3482 3481 if (bp->b_flags & B_READ) {
3483 3482 dma_flags = DDI_DMA_READ;
3484 3483 cmd->cmd_flags &= ~CFLAG_DMASEND;
3485 3484 } else {
3486 3485 dma_flags = DDI_DMA_WRITE;
3487 3486 cmd->cmd_flags |= CFLAG_DMASEND;
3488 3487 }
3489 3488 if (flags & PKT_CONSISTENT) {
3490 3489 cmd->cmd_flags |= CFLAG_CMDIOPB;
3491 3490 dma_flags |= DDI_DMA_CONSISTENT;
3492 3491 }
3493 3492
3494 3493 if (flags & PKT_DMA_PARTIAL) {
3495 3494 dma_flags |= DDI_DMA_PARTIAL;
3496 3495 }
3497 3496
3498 3497 /*
3499 3498 * workaround for byte hole issue on psycho and
3500 3499 * schizo pre 2.1
3501 3500 */
3502 3501 if ((bp->b_flags & B_READ) && ((bp->b_flags &
3503 3502 (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
3504 3503 ((uintptr_t)bp->b_un.b_addr & 0x7)) {
3505 3504 dma_flags |= DDI_DMA_CONSISTENT;
3506 3505 }
3507 3506
3508 3507 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
3509 3508 dma_flags, callback, arg,
3510 3509 &cmd->cmd_cookie, &cmd->cmd_cookiec);
3511 3510 if (rval == DDI_DMA_PARTIAL_MAP) {
3512 3511 (void) ddi_dma_numwin(cmd->cmd_dmahandle,
3513 3512 &cmd->cmd_nwin);
3514 3513 cmd->cmd_winindex = 0;
3515 3514 (void) ddi_dma_getwin(cmd->cmd_dmahandle,
3516 3515 cmd->cmd_winindex, &cmd->cmd_dma_offset,
3517 3516 &cmd->cmd_dma_len, &cmd->cmd_cookie,
3518 3517 &cmd->cmd_cookiec);
3519 3518 } else if (rval && (rval != DDI_DMA_MAPPED)) {
3520 3519 switch (rval) {
3521 3520 case DDI_DMA_NORESOURCES:
3522 3521 bioerror(bp, 0);
3523 3522 break;
3524 3523 case DDI_DMA_BADATTR:
3525 3524 case DDI_DMA_NOMAPPING:
3526 3525 bioerror(bp, EFAULT);
3527 3526 break;
3528 3527 case DDI_DMA_TOOBIG:
3529 3528 default:
3530 3529 bioerror(bp, EINVAL);
3531 3530 break;
3532 3531 }
3533 3532 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3534 3533 if (new_cmd) {
3535 3534 mptsas_scsi_destroy_pkt(ap, pkt);
3536 3535 }
3537 3536 return ((struct scsi_pkt *)NULL);
3538 3537 }
3539 3538
3540 3539 get_dma_cookies:
3541 3540 cmd->cmd_flags |= CFLAG_DMAVALID;
3542 3541 ASSERT(cmd->cmd_cookiec > 0);
3543 3542
3544 3543 if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
3545 3544 mptsas_log(mpt, CE_NOTE, "large cookiec received %d\n",
3546 3545 cmd->cmd_cookiec);
3547 3546 bioerror(bp, EINVAL);
3548 3547 if (new_cmd) {
3549 3548 mptsas_scsi_destroy_pkt(ap, pkt);
3550 3549 }
3551 3550 return ((struct scsi_pkt *)NULL);
3552 3551 }
3553 3552
3554 3553 /*
3555 3554 * Allocate extra SGL buffer if needed.
3556 3555 */
3557 3556 if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
3558 3557 (cmd->cmd_extra_frames == NULL)) {
3559 3558 if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
3560 3559 DDI_FAILURE) {
3561 3560 mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
3562 3561 "failed");
3563 3562 bioerror(bp, ENOMEM);
3564 3563 if (new_cmd) {
3565 3564 mptsas_scsi_destroy_pkt(ap, pkt);
3566 3565 }
3567 3566 return ((struct scsi_pkt *)NULL);
3568 3567 }
3569 3568 }
3570 3569
3571 3570 /*
3572 3571 * Always use scatter-gather transfer
3573 3572 * Use the loop below to store physical addresses of
3574 3573 * DMA segments, from the DMA cookies, into your HBA's
3575 3574 * scatter-gather list.
3576 3575 * We need to ensure we have enough kmem alloc'd
3577 3576 * for the sg entries since we are no longer using an
3578 3577 * array inside mptsas_cmd_t.
3579 3578 *
3580 3579 * We check cmd->cmd_cookiec against oldcookiec so
3581 3580 * the scatter-gather list is correctly allocated
3582 3581 */
3583 3582 #ifndef __sparc
3584 3583 if (oldcookiec != cmd->cmd_cookiec) {
3585 3584 if (cmd->cmd_sg != (mptti_t *)NULL) {
3586 3585 kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
3587 3586 oldcookiec);
3588 3587 cmd->cmd_sg = NULL;
3589 3588 }
3590 3589 }
3591 3590
3592 3591 if (cmd->cmd_sg == (mptti_t *)NULL) {
3593 3592 cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3594 3593 cmd->cmd_cookiec), kf);
3595 3594
3596 3595 if (cmd->cmd_sg == (mptti_t *)NULL) {
3597 3596 mptsas_log(mpt, CE_WARN,
3598 3597 "unable to kmem_alloc enough memory "
3599 3598 "for scatter/gather list");
3600 3599 /*
3601 3600 * if we have an ENOMEM condition we need to behave
3602 3601 * the same way as the rest of this routine
3603 3602 */
3604 3603
3605 3604 bioerror(bp, ENOMEM);
3606 3605 if (new_cmd) {
3607 3606 mptsas_scsi_destroy_pkt(ap, pkt);
3608 3607 }
3609 3608 return ((struct scsi_pkt *)NULL);
3610 3609 }
3611 3610 }
3612 3611 #endif /* __sparc */
3613 3612 dmap = cmd->cmd_sg;
3614 3613
3615 3614 ASSERT(cmd->cmd_cookie.dmac_size != 0);
3616 3615
3617 3616 /*
3618 3617 * store the first segment into the S/G list
3619 3618 */
3620 3619 dmap->count = cmd->cmd_cookie.dmac_size;
3621 3620 dmap->addr.address64.Low = (uint32_t)
3622 3621 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3623 3622 dmap->addr.address64.High = (uint32_t)
3624 3623 (cmd->cmd_cookie.dmac_laddress >> 32);
3625 3624
3626 3625 /*
3627 3626 * dmacount counts the size of the dma for this window
3628 3627 * (if partial dma is being used). totaldmacount
3629 3628 * keeps track of the total amount of dma we have
3630 3629 * transferred for all the windows (needed to calculate
3631 3630 * the resid value below).
3632 3631 */
3633 3632 cmd->cmd_dmacount = cmd->cmd_cookie.dmac_size;
3634 3633 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3635 3634
3636 3635 /*
3637 3636 * We already stored the first DMA scatter gather segment,
3638 3637 * start at 1 if we need to store more.
3639 3638 */
3640 3639 for (cnt = 1; cnt < cmd->cmd_cookiec; cnt++) {
3641 3640 /*
3642 3641 * Get next DMA cookie
3643 3642 */
3644 3643 ddi_dma_nextcookie(cmd->cmd_dmahandle,
3645 3644 &cmd->cmd_cookie);
3646 3645 dmap++;
3647 3646
3648 3647 cmd->cmd_dmacount += cmd->cmd_cookie.dmac_size;
3649 3648 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3650 3649
3651 3650 /*
3652 3651 * store the segment parms into the S/G list
3653 3652 */
3654 3653 dmap->count = cmd->cmd_cookie.dmac_size;
3655 3654 dmap->addr.address64.Low = (uint32_t)
3656 3655 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3657 3656 dmap->addr.address64.High = (uint32_t)
3658 3657 (cmd->cmd_cookie.dmac_laddress >> 32);
3659 3658 }
3660 3659
3661 3660 /*
3662 3661 * If this was partially allocated we set the resid
3663 3662 * the amount of data NOT transferred in this window
3664 3663 * If there is only one window, the resid will be 0
3665 3664 */
3666 3665 pkt->pkt_resid = (bp->b_bcount - cmd->cmd_totaldmacount);
3667 3666 NDBG16(("mptsas_dmaget: cmd_dmacount=%d.", cmd->cmd_dmacount));
3668 3667 }
3669 3668 return (pkt);
3670 3669 }
3671 3670
3672 3671 /*
3673 3672 * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
3674 3673 *
3675 3674 * Notes:
3676 3675 * - also frees DMA resources if allocated
3677 3676 * - implicit DMA synchonization
3678 3677 */
3679 3678 static void
3680 3679 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
3681 3680 {
3682 3681 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3683 3682 mptsas_t *mpt = ADDR2MPT(ap);
3684 3683
3685 3684 NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
3686 3685 ap->a_target, (void *)pkt));
3687 3686
3688 3687 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3689 3688 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
3690 3689 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3691 3690 }
3692 3691 #ifndef __sparc
3693 3692 if (cmd->cmd_sg) {
3694 3693 kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
3695 3694 cmd->cmd_sg = NULL;
3696 3695 }
3697 3696 #endif /* __sparc */
3698 3697 mptsas_free_extra_sgl_frame(mpt, cmd);
3699 3698
3700 3699 if ((cmd->cmd_flags &
3701 3700 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
3702 3701 CFLAG_SCBEXTERN)) == 0) {
3703 3702 cmd->cmd_flags = CFLAG_FREE;
3704 3703 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
3705 3704 } else {
3706 3705 mptsas_pkt_destroy_extern(mpt, cmd);
3707 3706 }
3708 3707 }
3709 3708
3710 3709 /*
3711 3710 * kmem cache constructor and destructor:
3712 3711 * When constructing, we bzero the cmd and allocate the dma handle
3713 3712 * When destructing, just free the dma handle
3714 3713 */
3715 3714 static int
3716 3715 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
3717 3716 {
3718 3717 mptsas_cmd_t *cmd = buf;
3719 3718 mptsas_t *mpt = cdrarg;
3720 3719 struct scsi_address ap;
3721 3720 uint_t cookiec;
3722 3721 ddi_dma_attr_t arq_dma_attr;
3723 3722 int (*callback)(caddr_t);
3724 3723
3725 3724 callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3726 3725
3727 3726 NDBG4(("mptsas_kmem_cache_constructor"));
3728 3727
3729 3728 ap.a_hba_tran = mpt->m_tran;
3730 3729 ap.a_target = 0;
3731 3730 ap.a_lun = 0;
3732 3731
3733 3732 /*
3734 3733 * allocate a dma handle
3735 3734 */
3736 3735 if ((ddi_dma_alloc_handle(mpt->m_dip, &mpt->m_io_dma_attr, callback,
3737 3736 NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
3738 3737 cmd->cmd_dmahandle = NULL;
3739 3738 return (-1);
3740 3739 }
3741 3740
3742 3741 cmd->cmd_arq_buf = scsi_alloc_consistent_buf(&ap, (struct buf *)NULL,
3743 3742 SENSE_LENGTH, B_READ, callback, NULL);
3744 3743 if (cmd->cmd_arq_buf == NULL) {
3745 3744 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3746 3745 cmd->cmd_dmahandle = NULL;
3747 3746 return (-1);
3748 3747 }
3749 3748
3750 3749 /*
3751 3750 * allocate a arq handle
3752 3751 */
3753 3752 arq_dma_attr = mpt->m_msg_dma_attr;
3754 3753 arq_dma_attr.dma_attr_sgllen = 1;
3755 3754 if ((ddi_dma_alloc_handle(mpt->m_dip, &arq_dma_attr, callback,
3756 3755 NULL, &cmd->cmd_arqhandle)) != DDI_SUCCESS) {
3757 3756 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3758 3757 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3759 3758 cmd->cmd_dmahandle = NULL;
3760 3759 cmd->cmd_arqhandle = NULL;
3761 3760 return (-1);
3762 3761 }
3763 3762
3764 3763 if (ddi_dma_buf_bind_handle(cmd->cmd_arqhandle,
3765 3764 cmd->cmd_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
3766 3765 callback, NULL, &cmd->cmd_arqcookie, &cookiec) != DDI_SUCCESS) {
3767 3766 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3768 3767 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3769 3768 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3770 3769 cmd->cmd_dmahandle = NULL;
3771 3770 cmd->cmd_arqhandle = NULL;
3772 3771 cmd->cmd_arq_buf = NULL;
3773 3772 return (-1);
3774 3773 }
3775 3774 /*
3776 3775 * In sparc, the sgl length in most of the cases would be 1, so we
3777 3776 * pre-allocate it in cache. On x86, the max number would be 256,
3778 3777 * pre-allocate a maximum would waste a lot of memory especially
3779 3778 * when many cmds are put onto waitq.
3780 3779 */
3781 3780 #ifdef __sparc
3782 3781 cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3783 3782 MPTSAS_MAX_CMD_SEGS), KM_SLEEP);
3784 3783 #endif /* __sparc */
3785 3784
3786 3785 return (0);
3787 3786 }
3788 3787
3789 3788 static void
3790 3789 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
3791 3790 {
3792 3791 #ifndef __lock_lint
3793 3792 _NOTE(ARGUNUSED(cdrarg))
3794 3793 #endif
3795 3794 mptsas_cmd_t *cmd = buf;
3796 3795
3797 3796 NDBG4(("mptsas_kmem_cache_destructor"));
3798 3797
3799 3798 if (cmd->cmd_arqhandle) {
3800 3799 (void) ddi_dma_unbind_handle(cmd->cmd_arqhandle);
3801 3800 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3802 3801 cmd->cmd_arqhandle = NULL;
3803 3802 }
3804 3803 if (cmd->cmd_arq_buf) {
3805 3804 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3806 3805 cmd->cmd_arq_buf = NULL;
3807 3806 }
3808 3807 if (cmd->cmd_dmahandle) {
3809 3808 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3810 3809 cmd->cmd_dmahandle = NULL;
3811 3810 }
3812 3811 #ifdef __sparc
3813 3812 if (cmd->cmd_sg) {
3814 3813 kmem_free(cmd->cmd_sg, sizeof (mptti_t)* MPTSAS_MAX_CMD_SEGS);
3815 3814 cmd->cmd_sg = NULL;
3816 3815 }
3817 3816 #endif /* __sparc */
3818 3817 }
3819 3818
3820 3819 static int
3821 3820 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
3822 3821 {
3823 3822 mptsas_cache_frames_t *p = buf;
3824 3823 mptsas_t *mpt = cdrarg;
3825 3824 ddi_dma_attr_t frame_dma_attr;
3826 3825 size_t mem_size, alloc_len;
3827 3826 ddi_dma_cookie_t cookie;
3828 3827 uint_t ncookie;
3829 3828 int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
3830 3829 ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3831 3830
3832 3831 frame_dma_attr = mpt->m_msg_dma_attr;
3833 3832 frame_dma_attr.dma_attr_align = 0x10;
3834 3833 frame_dma_attr.dma_attr_sgllen = 1;
3835 3834
3836 3835 if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
3837 3836 &p->m_dma_hdl) != DDI_SUCCESS) {
3838 3837 mptsas_log(mpt, CE_WARN, "Unable to allocate dma handle for"
3839 3838 " extra SGL.");
3840 3839 return (DDI_FAILURE);
3841 3840 }
3842 3841
3843 3842 mem_size = (mpt->m_max_request_frames - 1) * mpt->m_req_frame_size;
3844 3843
3845 3844 if (ddi_dma_mem_alloc(p->m_dma_hdl, mem_size, &mpt->m_dev_acc_attr,
3846 3845 DDI_DMA_CONSISTENT, callback, NULL, (caddr_t *)&p->m_frames_addr,
3847 3846 &alloc_len, &p->m_acc_hdl) != DDI_SUCCESS) {
3848 3847 ddi_dma_free_handle(&p->m_dma_hdl);
3849 3848 p->m_dma_hdl = NULL;
3850 3849 mptsas_log(mpt, CE_WARN, "Unable to allocate dma memory for"
3851 3850 " extra SGL.");
3852 3851 return (DDI_FAILURE);
3853 3852 }
3854 3853
3855 3854 if (ddi_dma_addr_bind_handle(p->m_dma_hdl, NULL, p->m_frames_addr,
3856 3855 alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
3857 3856 &cookie, &ncookie) != DDI_DMA_MAPPED) {
3858 3857 (void) ddi_dma_mem_free(&p->m_acc_hdl);
3859 3858 ddi_dma_free_handle(&p->m_dma_hdl);
3860 3859 p->m_dma_hdl = NULL;
3861 3860 mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources for"
3862 3861 " extra SGL");
3863 3862 return (DDI_FAILURE);
3864 3863 }
3865 3864
3866 3865 /*
3867 3866 * Store the SGL memory address. This chip uses this
3868 3867 * address to dma to and from the driver. The second
3869 3868 * address is the address mpt uses to fill in the SGL.
3870 3869 */
3871 3870 p->m_phys_addr = cookie.dmac_address;
3872 3871
3873 3872 return (DDI_SUCCESS);
3874 3873 }
3875 3874
3876 3875 static void
3877 3876 mptsas_cache_frames_destructor(void *buf, void *cdrarg)
3878 3877 {
3879 3878 #ifndef __lock_lint
3880 3879 _NOTE(ARGUNUSED(cdrarg))
3881 3880 #endif
3882 3881 mptsas_cache_frames_t *p = buf;
3883 3882 if (p->m_dma_hdl != NULL) {
3884 3883 (void) ddi_dma_unbind_handle(p->m_dma_hdl);
3885 3884 (void) ddi_dma_mem_free(&p->m_acc_hdl);
3886 3885 ddi_dma_free_handle(&p->m_dma_hdl);
3887 3886 p->m_phys_addr = NULL;
3888 3887 p->m_frames_addr = NULL;
3889 3888 p->m_dma_hdl = NULL;
3890 3889 p->m_acc_hdl = NULL;
3891 3890 }
3892 3891
3893 3892 }
3894 3893
3895 3894 /*
3896 3895 * allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
3897 3896 * for non-standard length cdb, pkt_private, status areas
3898 3897 * if allocation fails, then deallocate all external space and the pkt
3899 3898 */
3900 3899 /* ARGSUSED */
3901 3900 static int
3902 3901 mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
3903 3902 int cmdlen, int tgtlen, int statuslen, int kf)
3904 3903 {
3905 3904 caddr_t cdbp, scbp, tgt;
3906 3905 int (*callback)(caddr_t) = (kf == KM_SLEEP) ?
3907 3906 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
3908 3907 struct scsi_address ap;
3909 3908 size_t senselength;
3910 3909 ddi_dma_attr_t ext_arq_dma_attr;
3911 3910 uint_t cookiec;
3912 3911
3913 3912 NDBG3(("mptsas_pkt_alloc_extern: "
3914 3913 "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
3915 3914 (void *)cmd, cmdlen, tgtlen, statuslen, kf));
3916 3915
3917 3916 tgt = cdbp = scbp = NULL;
3918 3917 cmd->cmd_scblen = statuslen;
3919 3918 cmd->cmd_privlen = (uchar_t)tgtlen;
3920 3919
3921 3920 if (cmdlen > sizeof (cmd->cmd_cdb)) {
3922 3921 if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
3923 3922 goto fail;
3924 3923 }
3925 3924 cmd->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
3926 3925 cmd->cmd_flags |= CFLAG_CDBEXTERN;
3927 3926 }
3928 3927 if (tgtlen > PKT_PRIV_LEN) {
3929 3928 if ((tgt = kmem_zalloc((size_t)tgtlen, kf)) == NULL) {
3930 3929 goto fail;
3931 3930 }
3932 3931 cmd->cmd_flags |= CFLAG_PRIVEXTERN;
3933 3932 cmd->cmd_pkt->pkt_private = tgt;
3934 3933 }
3935 3934 if (statuslen > EXTCMDS_STATUS_SIZE) {
3936 3935 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
3937 3936 goto fail;
3938 3937 }
3939 3938 cmd->cmd_flags |= CFLAG_SCBEXTERN;
3940 3939 cmd->cmd_pkt->pkt_scbp = (opaque_t)scbp;
3941 3940
3942 3941 /* allocate sense data buf for DMA */
3943 3942
3944 3943 senselength = statuslen - MPTSAS_GET_ITEM_OFF(
3945 3944 struct scsi_arq_status, sts_sensedata);
3946 3945 cmd->cmd_rqslen = (uchar_t)senselength;
3947 3946
3948 3947 ap.a_hba_tran = mpt->m_tran;
3949 3948 ap.a_target = 0;
3950 3949 ap.a_lun = 0;
3951 3950
3952 3951 cmd->cmd_ext_arq_buf = scsi_alloc_consistent_buf(&ap,
3953 3952 (struct buf *)NULL, senselength, B_READ,
3954 3953 callback, NULL);
3955 3954
3956 3955 if (cmd->cmd_ext_arq_buf == NULL) {
3957 3956 goto fail;
3958 3957 }
3959 3958 /*
3960 3959 * allocate a extern arq handle and bind the buf
3961 3960 */
3962 3961 ext_arq_dma_attr = mpt->m_msg_dma_attr;
3963 3962 ext_arq_dma_attr.dma_attr_sgllen = 1;
3964 3963 if ((ddi_dma_alloc_handle(mpt->m_dip,
3965 3964 &ext_arq_dma_attr, callback,
3966 3965 NULL, &cmd->cmd_ext_arqhandle)) != DDI_SUCCESS) {
3967 3966 goto fail;
3968 3967 }
3969 3968
3970 3969 if (ddi_dma_buf_bind_handle(cmd->cmd_ext_arqhandle,
3971 3970 cmd->cmd_ext_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
3972 3971 callback, NULL, &cmd->cmd_ext_arqcookie,
3973 3972 &cookiec)
3974 3973 != DDI_SUCCESS) {
3975 3974 goto fail;
3976 3975 }
3977 3976 cmd->cmd_flags |= CFLAG_EXTARQBUFVALID;
3978 3977 }
3979 3978 return (0);
3980 3979 fail:
3981 3980 mptsas_pkt_destroy_extern(mpt, cmd);
3982 3981 return (1);
3983 3982 }
3984 3983
3985 3984 /*
3986 3985 * deallocate external pkt space and deallocate the pkt
3987 3986 */
3988 3987 static void
3989 3988 mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd)
3990 3989 {
3991 3990 NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd));
3992 3991
3993 3992 if (cmd->cmd_flags & CFLAG_FREE) {
3994 3993 mptsas_log(mpt, CE_PANIC,
3995 3994 "mptsas_pkt_destroy_extern: freeing free packet");
3996 3995 _NOTE(NOT_REACHED)
3997 3996 /* NOTREACHED */
3998 3997 }
3999 3998 if (cmd->cmd_flags & CFLAG_CDBEXTERN) {
4000 3999 kmem_free(cmd->cmd_pkt->pkt_cdbp, (size_t)cmd->cmd_cdblen);
4001 4000 }
4002 4001 if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
4003 4002 kmem_free(cmd->cmd_pkt->pkt_scbp, (size_t)cmd->cmd_scblen);
4004 4003 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4005 4004 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4006 4005 }
4007 4006 if (cmd->cmd_ext_arqhandle) {
4008 4007 ddi_dma_free_handle(&cmd->cmd_ext_arqhandle);
4009 4008 cmd->cmd_ext_arqhandle = NULL;
4010 4009 }
4011 4010 if (cmd->cmd_ext_arq_buf)
4012 4011 scsi_free_consistent_buf(cmd->cmd_ext_arq_buf);
4013 4012 }
4014 4013 if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
4015 4014 kmem_free(cmd->cmd_pkt->pkt_private, (size_t)cmd->cmd_privlen);
4016 4015 }
4017 4016 cmd->cmd_flags = CFLAG_FREE;
4018 4017 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4019 4018 }
4020 4019
4021 4020 /*
4022 4021 * tran_sync_pkt(9E) - explicit DMA synchronization
4023 4022 */
4024 4023 /*ARGSUSED*/
4025 4024 static void
4026 4025 mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4027 4026 {
4028 4027 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4029 4028
4030 4029 NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
4031 4030 ap->a_target, (void *)pkt));
4032 4031
4033 4032 if (cmd->cmd_dmahandle) {
4034 4033 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4035 4034 (cmd->cmd_flags & CFLAG_DMASEND) ?
4036 4035 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
4037 4036 }
4038 4037 }
4039 4038
4040 4039 /*
4041 4040 * tran_dmafree(9E) - deallocate DMA resources allocated for command
4042 4041 */
4043 4042 /*ARGSUSED*/
4044 4043 static void
4045 4044 mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
4046 4045 {
4047 4046 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4048 4047 mptsas_t *mpt = ADDR2MPT(ap);
4049 4048
4050 4049 NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
4051 4050 ap->a_target, (void *)pkt));
4052 4051
4053 4052 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4054 4053 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4055 4054 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4056 4055 }
4057 4056
4058 4057 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4059 4058 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4060 4059 cmd->cmd_flags &= ~CFLAG_EXTARQBUFVALID;
4061 4060 }
4062 4061
4063 4062 mptsas_free_extra_sgl_frame(mpt, cmd);
4064 4063 }
4065 4064
4066 4065 static void
4067 4066 mptsas_pkt_comp(struct scsi_pkt *pkt, mptsas_cmd_t *cmd)
4068 4067 {
4069 4068 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
4070 4069 (!(cmd->cmd_flags & CFLAG_DMASEND))) {
4071 4070 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4072 4071 DDI_DMA_SYNC_FORCPU);
4073 4072 }
4074 4073 (*pkt->pkt_comp)(pkt);
4075 4074 }
4076 4075
4077 4076 static void
4078 4077 mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control,
4079 4078 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4080 4079 {
4081 4080 uint_t cookiec;
4082 4081 mptti_t *dmap;
4083 4082 uint32_t flags;
4084 4083 pMpi2SGESimple64_t sge;
4085 4084 pMpi2SGEChain64_t sgechain;
4086 4085 ASSERT(cmd->cmd_flags & CFLAG_DMAVALID);
4087 4086
4088 4087 /*
4089 4088 * Save the number of entries in the DMA
4090 4089 * Scatter/Gather list
4091 4090 */
4092 4091 cookiec = cmd->cmd_cookiec;
4093 4092
4094 4093 NDBG1(("mptsas_sge_setup: cookiec=%d", cookiec));
4095 4094
4096 4095 /*
4097 4096 * Set read/write bit in control.
4098 4097 */
4099 4098 if (cmd->cmd_flags & CFLAG_DMASEND) {
4100 4099 *control |= MPI2_SCSIIO_CONTROL_WRITE;
4101 4100 } else {
4102 4101 *control |= MPI2_SCSIIO_CONTROL_READ;
4103 4102 }
4104 4103
4105 4104 ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount);
4106 4105
4107 4106 /*
4108 4107 * We have 2 cases here. First where we can fit all the
4109 4108 * SG elements into the main frame, and the case
4110 4109 * where we can't.
4111 4110 * If we have more cookies than we can attach to a frame
4112 4111 * we will need to use a chain element to point
4113 4112 * a location of memory where the rest of the S/G
4114 4113 * elements reside.
4115 4114 */
4116 4115 if (cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) {
4117 4116 dmap = cmd->cmd_sg;
4118 4117 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4119 4118 while (cookiec--) {
4120 4119 ddi_put32(acc_hdl,
4121 4120 &sge->Address.Low, dmap->addr.address64.Low);
4122 4121 ddi_put32(acc_hdl,
4123 4122 &sge->Address.High, dmap->addr.address64.High);
4124 4123 ddi_put32(acc_hdl, &sge->FlagsLength,
4125 4124 dmap->count);
4126 4125 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4127 4126 flags |= ((uint32_t)
4128 4127 (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4129 4128 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4130 4129 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4131 4130 MPI2_SGE_FLAGS_SHIFT);
4132 4131
4133 4132 /*
4134 4133 * If this is the last cookie, we set the flags
4135 4134 * to indicate so
4136 4135 */
4137 4136 if (cookiec == 0) {
4138 4137 flags |=
4139 4138 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
4140 4139 | MPI2_SGE_FLAGS_END_OF_BUFFER
4141 4140 | MPI2_SGE_FLAGS_END_OF_LIST) <<
4142 4141 MPI2_SGE_FLAGS_SHIFT);
4143 4142 }
4144 4143 if (cmd->cmd_flags & CFLAG_DMASEND) {
4145 4144 flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4146 4145 MPI2_SGE_FLAGS_SHIFT);
4147 4146 } else {
4148 4147 flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4149 4148 MPI2_SGE_FLAGS_SHIFT);
4150 4149 }
4151 4150 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4152 4151 dmap++;
4153 4152 sge++;
4154 4153 }
4155 4154 } else {
4156 4155 /*
4157 4156 * Hereby we start to deal with multiple frames.
4158 4157 * The process is as follows:
4159 4158 * 1. Determine how many frames are needed for SGL element
4160 4159 * storage; Note that all frames are stored in contiguous
4161 4160 * memory space and in 64-bit DMA mode each element is
4162 4161 * 3 double-words (12 bytes) long.
4163 4162 * 2. Fill up the main frame. We need to do this separately
4164 4163 * since it contains the SCSI IO request header and needs
4165 4164 * dedicated processing. Note that the last 4 double-words
4166 4165 * of the SCSI IO header is for SGL element storage
4167 4166 * (MPI2_SGE_IO_UNION).
4168 4167 * 3. Fill the chain element in the main frame, so the DMA
4169 4168 * engine can use the following frames.
4170 4169 * 4. Enter a loop to fill the remaining frames. Note that the
4171 4170 * last frame contains no chain element. The remaining
4172 4171 * frames go into the mpt SGL buffer allocated on the fly,
4173 4172 * not immediately following the main message frame, as in
4174 4173 * Gen1.
4175 4174 * Some restrictions:
4176 4175 * 1. For 64-bit DMA, the simple element and chain element
4177 4176 * are both of 3 double-words (12 bytes) in size, even
4178 4177 * though all frames are stored in the first 4G of mem
4179 4178 * range and the higher 32-bits of the address are always 0.
4180 4179 * 2. On some controllers (like the 1064/1068), a frame can
4181 4180 * hold SGL elements with the last 1 or 2 double-words
4182 4181 * (4 or 8 bytes) un-used. On these controllers, we should
4183 4182 * recognize that there's not enough room for another SGL
4184 4183 * element and move the sge pointer to the next frame.
4185 4184 */
4186 4185 int i, j, k, l, frames, sgemax;
4187 4186 int temp;
4188 4187 uint8_t chainflags;
4189 4188 uint16_t chainlength;
4190 4189 mptsas_cache_frames_t *p;
4191 4190
4192 4191 /*
4193 4192 * Sgemax is the number of SGE's that will fit
4194 4193 * each extra frame and frames is total
4195 4194 * number of frames we'll need. 1 sge entry per
4196 4195 * frame is reseverd for the chain element thus the -1 below.
4197 4196 */
4198 4197 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64))
4199 4198 - 1);
4200 4199 temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4201 4200
4202 4201 /*
4203 4202 * A little check to see if we need to round up the number
4204 4203 * of frames we need
4205 4204 */
4206 4205 if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4207 4206 sgemax) > 1) {
4208 4207 frames = (temp + 1);
4209 4208 } else {
4210 4209 frames = temp;
4211 4210 }
4212 4211 dmap = cmd->cmd_sg;
4213 4212 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4214 4213
4215 4214 /*
4216 4215 * First fill in the main frame
4217 4216 */
4218 4217 for (j = 1; j < MPTSAS_MAX_FRAME_SGES64(mpt); j++) {
4219 4218 ddi_put32(acc_hdl, &sge->Address.Low,
4220 4219 dmap->addr.address64.Low);
4221 4220 ddi_put32(acc_hdl, &sge->Address.High,
4222 4221 dmap->addr.address64.High);
4223 4222 ddi_put32(acc_hdl, &sge->FlagsLength, dmap->count);
4224 4223 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4225 4224 flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4226 4225 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4227 4226 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4228 4227 MPI2_SGE_FLAGS_SHIFT);
4229 4228
4230 4229 /*
4231 4230 * If this is the last SGE of this frame
4232 4231 * we set the end of list flag
4233 4232 */
4234 4233 if (j == (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) {
4235 4234 flags |= ((uint32_t)
4236 4235 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4237 4236 MPI2_SGE_FLAGS_SHIFT);
4238 4237 }
4239 4238 if (cmd->cmd_flags & CFLAG_DMASEND) {
4240 4239 flags |=
4241 4240 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4242 4241 MPI2_SGE_FLAGS_SHIFT);
4243 4242 } else {
4244 4243 flags |=
4245 4244 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4246 4245 MPI2_SGE_FLAGS_SHIFT);
4247 4246 }
4248 4247 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4249 4248 dmap++;
4250 4249 sge++;
4251 4250 }
4252 4251
4253 4252 /*
4254 4253 * Fill in the chain element in the main frame.
4255 4254 * About calculation on ChainOffset:
4256 4255 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4257 4256 * in the end reserved for SGL element storage
4258 4257 * (MPI2_SGE_IO_UNION); we should count it in our
4259 4258 * calculation. See its definition in the header file.
4260 4259 * 2. Constant j is the counter of the current SGL element
4261 4260 * that will be processed, and (j - 1) is the number of
4262 4261 * SGL elements that have been processed (stored in the
4263 4262 * main frame).
4264 4263 * 3. ChainOffset value should be in units of double-words (4
4265 4264 * bytes) so the last value should be divided by 4.
4266 4265 */
4267 4266 ddi_put8(acc_hdl, &frame->ChainOffset,
4268 4267 (sizeof (MPI2_SCSI_IO_REQUEST) -
4269 4268 sizeof (MPI2_SGE_IO_UNION) +
4270 4269 (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4271 4270 sgechain = (pMpi2SGEChain64_t)sge;
4272 4271 chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4273 4272 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4274 4273 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4275 4274 ddi_put8(acc_hdl, &sgechain->Flags, chainflags);
4276 4275
4277 4276 /*
4278 4277 * The size of the next frame is the accurate size of space
4279 4278 * (in bytes) used to store the SGL elements. j is the counter
4280 4279 * of SGL elements. (j - 1) is the number of SGL elements that
4281 4280 * have been processed (stored in frames).
4282 4281 */
4283 4282 if (frames >= 2) {
4284 4283 chainlength = mpt->m_req_frame_size /
4285 4284 sizeof (MPI2_SGE_SIMPLE64) *
4286 4285 sizeof (MPI2_SGE_SIMPLE64);
4287 4286 } else {
4288 4287 chainlength = ((cookiec - (j - 1)) *
4289 4288 sizeof (MPI2_SGE_SIMPLE64));
4290 4289 }
4291 4290
4292 4291 p = cmd->cmd_extra_frames;
4293 4292
4294 4293 ddi_put16(acc_hdl, &sgechain->Length, chainlength);
4295 4294 ddi_put32(acc_hdl, &sgechain->Address.Low,
4296 4295 p->m_phys_addr);
4297 4296 /* SGL is allocated in the first 4G mem range */
4298 4297 ddi_put32(acc_hdl, &sgechain->Address.High, 0);
4299 4298
4300 4299 /*
4301 4300 * If there are more than 2 frames left we have to
4302 4301 * fill in the next chain offset to the location of
4303 4302 * the chain element in the next frame.
4304 4303 * sgemax is the number of simple elements in an extra
4305 4304 * frame. Note that the value NextChainOffset should be
4306 4305 * in double-words (4 bytes).
4307 4306 */
4308 4307 if (frames >= 2) {
4309 4308 ddi_put8(acc_hdl, &sgechain->NextChainOffset,
4310 4309 (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4311 4310 } else {
4312 4311 ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0);
4313 4312 }
4314 4313
4315 4314 /*
4316 4315 * Jump to next frame;
4317 4316 * Starting here, chain buffers go into the per command SGL.
4318 4317 * This buffer is allocated when chain buffers are needed.
4319 4318 */
4320 4319 sge = (pMpi2SGESimple64_t)p->m_frames_addr;
4321 4320 i = cookiec;
4322 4321
4323 4322 /*
4324 4323 * Start filling in frames with SGE's. If we
4325 4324 * reach the end of frame and still have SGE's
4326 4325 * to fill we need to add a chain element and
4327 4326 * use another frame. j will be our counter
4328 4327 * for what cookie we are at and i will be
4329 4328 * the total cookiec. k is the current frame
4330 4329 */
4331 4330 for (k = 1; k <= frames; k++) {
4332 4331 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4333 4332
4334 4333 /*
4335 4334 * If we have reached the end of frame
4336 4335 * and we have more SGE's to fill in
4337 4336 * we have to fill the final entry
4338 4337 * with a chain element and then
4339 4338 * continue to the next frame
4340 4339 */
4341 4340 if ((l == (sgemax + 1)) && (k != frames)) {
4342 4341 sgechain = (pMpi2SGEChain64_t)sge;
4343 4342 j--;
4344 4343 chainflags = (
4345 4344 MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4346 4345 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4347 4346 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4348 4347 ddi_put8(p->m_acc_hdl,
4349 4348 &sgechain->Flags, chainflags);
4350 4349 /*
4351 4350 * k is the frame counter and (k + 1)
4352 4351 * is the number of the next frame.
4353 4352 * Note that frames are in contiguous
4354 4353 * memory space.
4355 4354 */
4356 4355 ddi_put32(p->m_acc_hdl,
4357 4356 &sgechain->Address.Low,
4358 4357 (p->m_phys_addr +
4359 4358 (mpt->m_req_frame_size * k)));
4360 4359 ddi_put32(p->m_acc_hdl,
4361 4360 &sgechain->Address.High, 0);
4362 4361
4363 4362 /*
4364 4363 * If there are more than 2 frames left
4365 4364 * we have to next chain offset to
4366 4365 * the location of the chain element
4367 4366 * in the next frame and fill in the
4368 4367 * length of the next chain
4369 4368 */
4370 4369 if ((frames - k) >= 2) {
4371 4370 ddi_put8(p->m_acc_hdl,
4372 4371 &sgechain->NextChainOffset,
4373 4372 (sgemax *
4374 4373 sizeof (MPI2_SGE_SIMPLE64))
4375 4374 >> 2);
4376 4375 ddi_put16(p->m_acc_hdl,
4377 4376 &sgechain->Length,
4378 4377 mpt->m_req_frame_size /
4379 4378 sizeof (MPI2_SGE_SIMPLE64) *
4380 4379 sizeof (MPI2_SGE_SIMPLE64));
4381 4380 } else {
4382 4381 /*
4383 4382 * This is the last frame. Set
4384 4383 * the NextChainOffset to 0 and
4385 4384 * Length is the total size of
4386 4385 * all remaining simple elements
4387 4386 */
4388 4387 ddi_put8(p->m_acc_hdl,
4389 4388 &sgechain->NextChainOffset,
4390 4389 0);
4391 4390 ddi_put16(p->m_acc_hdl,
4392 4391 &sgechain->Length,
4393 4392 (cookiec - j) *
4394 4393 sizeof (MPI2_SGE_SIMPLE64));
4395 4394 }
4396 4395
4397 4396 /* Jump to the next frame */
4398 4397 sge = (pMpi2SGESimple64_t)
4399 4398 ((char *)p->m_frames_addr +
4400 4399 (int)mpt->m_req_frame_size * k);
4401 4400
4402 4401 continue;
4403 4402 }
4404 4403
4405 4404 ddi_put32(p->m_acc_hdl,
4406 4405 &sge->Address.Low,
4407 4406 dmap->addr.address64.Low);
4408 4407 ddi_put32(p->m_acc_hdl,
4409 4408 &sge->Address.High,
4410 4409 dmap->addr.address64.High);
4411 4410 ddi_put32(p->m_acc_hdl,
4412 4411 &sge->FlagsLength, dmap->count);
4413 4412 flags = ddi_get32(p->m_acc_hdl,
4414 4413 &sge->FlagsLength);
4415 4414 flags |= ((uint32_t)(
4416 4415 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4417 4416 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4418 4417 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4419 4418 MPI2_SGE_FLAGS_SHIFT);
4420 4419
4421 4420 /*
4422 4421 * If we are at the end of the frame and
4423 4422 * there is another frame to fill in
4424 4423 * we set the last simple element as last
4425 4424 * element
4426 4425 */
4427 4426 if ((l == sgemax) && (k != frames)) {
4428 4427 flags |= ((uint32_t)
4429 4428 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4430 4429 MPI2_SGE_FLAGS_SHIFT);
4431 4430 }
4432 4431
4433 4432 /*
4434 4433 * If this is the final cookie we
4435 4434 * indicate it by setting the flags
4436 4435 */
4437 4436 if (j == i) {
4438 4437 flags |= ((uint32_t)
4439 4438 (MPI2_SGE_FLAGS_LAST_ELEMENT |
4440 4439 MPI2_SGE_FLAGS_END_OF_BUFFER |
4441 4440 MPI2_SGE_FLAGS_END_OF_LIST) <<
4442 4441 MPI2_SGE_FLAGS_SHIFT);
4443 4442 }
4444 4443 if (cmd->cmd_flags & CFLAG_DMASEND) {
4445 4444 flags |=
4446 4445 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4447 4446 MPI2_SGE_FLAGS_SHIFT);
4448 4447 } else {
4449 4448 flags |=
4450 4449 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4451 4450 MPI2_SGE_FLAGS_SHIFT);
4452 4451 }
4453 4452 ddi_put32(p->m_acc_hdl,
4454 4453 &sge->FlagsLength, flags);
4455 4454 dmap++;
4456 4455 sge++;
4457 4456 }
4458 4457 }
4459 4458
4460 4459 /*
4461 4460 * Sync DMA with the chain buffers that were just created
4462 4461 */
4463 4462 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4464 4463 }
4465 4464 }
4466 4465
4467 4466 /*
4468 4467 * Interrupt handling
4469 4468 * Utility routine. Poll for status of a command sent to HBA
4470 4469 * without interrupts (a FLAG_NOINTR command).
4471 4470 */
4472 4471 int
4473 4472 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
4474 4473 {
4475 4474 int rval = TRUE;
4476 4475
4477 4476 NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
4478 4477
4479 4478 /*
4480 4479 * In order to avoid using m_mutex in ISR(a new separate mutex
4481 4480 * m_intr_mutex is introduced) and keep the same lock logic,
4482 4481 * the m_intr_mutex should be used to protect the getting and
4483 4482 * setting of the ReplyDescriptorIndex.
4484 4483 *
4485 4484 * Since the m_intr_mutex would be released during processing the poll
4486 4485 * cmd, so we should set the poll flag earlier here to make sure the
4487 4486 * polled cmd be handled in this thread/context. A side effect is other
4488 4487 * cmds during the period between the flag set and reset are also
4489 4488 * handled in this thread and not the ISR. Since the poll cmd is not
4490 4489 * so common, so the performance degradation in this case is not a big
4491 4490 * issue.
4492 4491 */
4493 4492 mutex_enter(&mpt->m_intr_mutex);
4494 4493 mpt->m_polled_intr = 1;
4495 4494 mutex_exit(&mpt->m_intr_mutex);
4496 4495
4497 4496 if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
4498 4497 mptsas_restart_hba(mpt);
4499 4498 }
4500 4499
4501 4500 /*
4502 4501 * Wait, using drv_usecwait(), long enough for the command to
4503 4502 * reasonably return from the target if the target isn't
4504 4503 * "dead". A polled command may well be sent from scsi_poll, and
4505 4504 * there are retries built in to scsi_poll if the transport
4506 4505 * accepted the packet (TRAN_ACCEPT). scsi_poll waits 1 second
4507 4506 * and retries the transport up to scsi_poll_busycnt times
4508 4507 * (currently 60) if
4509 4508 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
4510 4509 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
4511 4510 *
4512 4511 * limit the waiting to avoid a hang in the event that the
4513 4512 * cmd never gets started but we are still receiving interrupts
4514 4513 */
4515 4514 while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
4516 4515 if (mptsas_wait_intr(mpt, polltime) == FALSE) {
4517 4516 NDBG5(("mptsas_poll: command incomplete"));
4518 4517 rval = FALSE;
4519 4518 break;
4520 4519 }
4521 4520 }
4522 4521
4523 4522 mutex_enter(&mpt->m_intr_mutex);
4524 4523 mpt->m_polled_intr = 0;
4525 4524 mutex_exit(&mpt->m_intr_mutex);
4526 4525
4527 4526 if (rval == FALSE) {
4528 4527
4529 4528 /*
4530 4529 * this isn't supposed to happen, the hba must be wedged
4531 4530 * Mark this cmd as a timeout.
4532 4531 */
4533 4532 mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
4534 4533 (STAT_TIMEOUT|STAT_ABORTED));
4535 4534
4536 4535 if (poll_cmd->cmd_queued == FALSE) {
4537 4536
4538 4537 NDBG5(("mptsas_poll: not on waitq"));
4539 4538
4540 4539 poll_cmd->cmd_pkt->pkt_state |=
4541 4540 (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
4542 4541 } else {
4543 4542
4544 4543 /* find and remove it from the waitq */
4545 4544 NDBG5(("mptsas_poll: delete from waitq"));
4546 4545 mptsas_waitq_delete(mpt, poll_cmd);
4547 4546 }
4548 4547
4549 4548 }
4550 4549 mptsas_fma_check(mpt, poll_cmd);
4551 4550 NDBG5(("mptsas_poll: done"));
4552 4551 return (rval);
4553 4552 }
4554 4553
4555 4554 /*
4556 4555 * Used for polling cmds and TM function
4557 4556 */
4558 4557 static int
4559 4558 mptsas_wait_intr(mptsas_t *mpt, int polltime)
4560 4559 {
4561 4560 int cnt;
4562 4561 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
4563 4562 Mpi2ReplyDescriptorsUnion_t reply_desc_union_v;
4564 4563 uint32_t int_mask;
4565 4564 uint8_t reply_type;
4566 4565
4567 4566 NDBG5(("mptsas_wait_intr"));
4568 4567
4569 4568
4570 4569 /*
4571 4570 * Get the current interrupt mask and disable interrupts. When
4572 4571 * re-enabling ints, set mask to saved value.
4573 4572 */
4574 4573 int_mask = ddi_get32(mpt->m_datap, &mpt->m_reg->HostInterruptMask);
4575 4574 MPTSAS_DISABLE_INTR(mpt);
4576 4575
4577 4576 /*
4578 4577 * Keep polling for at least (polltime * 1000) seconds
4579 4578 */
4580 4579 for (cnt = 0; cnt < polltime; cnt++) {
4581 4580 mutex_enter(&mpt->m_intr_mutex);
4582 4581 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
4583 4582 DDI_DMA_SYNC_FORCPU);
4584 4583
4585 4584 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
4586 4585 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
4587 4586
4588 4587 if (ddi_get32(mpt->m_acc_post_queue_hdl,
4589 4588 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
4590 4589 ddi_get32(mpt->m_acc_post_queue_hdl,
4591 4590 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
4592 4591 mutex_exit(&mpt->m_intr_mutex);
4593 4592 drv_usecwait(1000);
4594 4593 continue;
4595 4594 }
4596 4595
4597 4596 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
4598 4597 &reply_desc_union->Default.ReplyFlags);
4599 4598 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
4600 4599 reply_desc_union_v.Default.ReplyFlags = reply_type;
4601 4600 if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
4602 4601 reply_desc_union_v.SCSIIOSuccess.SMID =
4603 4602 ddi_get16(mpt->m_acc_post_queue_hdl,
4604 4603 &reply_desc_union->SCSIIOSuccess.SMID);
4605 4604 } else if (reply_type ==
4606 4605 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
4607 4606 reply_desc_union_v.AddressReply.ReplyFrameAddress =
4608 4607 ddi_get32(mpt->m_acc_post_queue_hdl,
4609 4608 &reply_desc_union->AddressReply.ReplyFrameAddress);
4610 4609 reply_desc_union_v.AddressReply.SMID =
4611 4610 ddi_get16(mpt->m_acc_post_queue_hdl,
4612 4611 &reply_desc_union->AddressReply.SMID);
4613 4612 }
4614 4613 /*
4615 4614 * Clear the reply descriptor for re-use and increment
4616 4615 * index.
4617 4616 */
4618 4617 ddi_put64(mpt->m_acc_post_queue_hdl,
4619 4618 &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
4620 4619 0xFFFFFFFFFFFFFFFF);
4621 4620 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
4622 4621 DDI_DMA_SYNC_FORDEV);
4623 4622
4624 4623 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
4625 4624 mpt->m_post_index = 0;
4626 4625 }
4627 4626
4628 4627 /*
4629 4628 * Update the global reply index
4630 4629 */
4631 4630 ddi_put32(mpt->m_datap,
4632 4631 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
4633 4632 mutex_exit(&mpt->m_intr_mutex);
4634 4633
4635 4634 /*
4636 4635 * The reply is valid, process it according to its
4637 4636 * type.
4638 4637 */
4639 4638 mptsas_process_intr(mpt, &reply_desc_union_v);
4640 4639
4641 4640
4642 4641 /*
4643 4642 * Re-enable interrupts and quit.
4644 4643 */
4645 4644 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask,
4646 4645 int_mask);
4647 4646 return (TRUE);
4648 4647
4649 4648 }
4650 4649
4651 4650 /*
4652 4651 * Clear polling flag, re-enable interrupts and quit.
4653 4652 */
4654 4653 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask);
4655 4654 return (FALSE);
4656 4655 }
4657 4656
4658 4657 /*
4659 4658 * For fastpath, the m_intr_mutex should be held from the begining to the end,
4660 4659 * so we only treat those cmds that need not release m_intr_mutex(even just for
4661 4660 * a moment) as candidate for fast processing. otherwise, we don't handle them
4662 4661 * and just return, then in ISR, those cmds would be handled later with m_mutex
4663 4662 * held and m_intr_mutex not held.
4664 4663 */
4665 4664 static int
4666 4665 mptsas_handle_io_fastpath(mptsas_t *mpt,
4667 4666 uint16_t SMID)
4668 4667 {
4669 4668 mptsas_slots_t *slots = mpt->m_active;
4670 4669 mptsas_cmd_t *cmd = NULL;
4671 4670 struct scsi_pkt *pkt;
4672 4671
4673 4672 /*
4674 4673 * This is a success reply so just complete the IO. First, do a sanity
4675 4674 * check on the SMID. The final slot is used for TM requests, which
4676 4675 * would not come into this reply handler.
4677 4676 */
4678 4677 if ((SMID == 0) || (SMID > slots->m_n_slots)) {
4679 4678 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
4680 4679 SMID);
4681 4680 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4682 4681 return (TRUE);
4683 4682 }
4684 4683
4685 4684 cmd = slots->m_slot[SMID];
4686 4685
4687 4686 /*
4688 4687 * print warning and return if the slot is empty
4689 4688 */
4690 4689 if (cmd == NULL) {
4691 4690 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
4692 4691 "in slot %d", SMID);
4693 4692 return (TRUE);
4694 4693 }
4695 4694
4696 4695 pkt = CMD2PKT(cmd);
4697 4696 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
4698 4697 STATE_GOT_STATUS);
4699 4698 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4700 4699 pkt->pkt_state |= STATE_XFERRED_DATA;
4701 4700 }
4702 4701 pkt->pkt_resid = 0;
4703 4702
4704 4703 /*
4705 4704 * If the cmd is a IOC, or a passthrough, then we don't process it in
4706 4705 * fastpath, and later it would be handled by mptsas_process_intr()
4707 4706 * with m_mutex protected.
4708 4707 */
4709 4708 if (cmd->cmd_flags & (CFLAG_PASSTHRU | CFLAG_CMDIOC)) {
4710 4709 return (FALSE);
4711 4710 } else {
4712 4711 mptsas_remove_cmd0(mpt, cmd);
4713 4712 }
4714 4713
4715 4714 if (cmd->cmd_flags & CFLAG_RETRY) {
4716 4715 /*
4717 4716 * The target returned QFULL or busy, do not add tihs
4718 4717 * pkt to the doneq since the hba will retry
4719 4718 * this cmd.
4720 4719 *
4721 4720 * The pkt has already been resubmitted in
4722 4721 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4723 4722 * Remove this cmd_flag here.
4724 4723 */
4725 4724 cmd->cmd_flags &= ~CFLAG_RETRY;
4726 4725 } else {
4727 4726 mptsas_doneq_add0(mpt, cmd);
4728 4727 }
4729 4728
4730 4729 /*
4731 4730 * In fastpath, the cmd should only be a context reply, so just check
4732 4731 * the post queue of the reply descriptor and the dmahandle of the cmd
4733 4732 * is enough. No sense data in this case and no need to check the dma
4734 4733 * handle where sense data dma info is saved, the dma handle of the
4735 4734 * reply frame, and the dma handle of the reply free queue.
4736 4735 * For the dma handle of the request queue. Check fma here since we
4737 4736 * are sure the request must have already been sent/DMAed correctly.
4738 4737 * otherwise checking in mptsas_scsi_start() is not correct since
4739 4738 * at that time the dma may not start.
4740 4739 */
4741 4740 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
4742 4741 DDI_SUCCESS) ||
4743 4742 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
4744 4743 DDI_SUCCESS)) {
4745 4744 ddi_fm_service_impact(mpt->m_dip,
4746 4745 DDI_SERVICE_UNAFFECTED);
4747 4746 pkt->pkt_reason = CMD_TRAN_ERR;
4748 4747 pkt->pkt_statistics = 0;
4749 4748 }
4750 4749 if (cmd->cmd_dmahandle &&
4751 4750 (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
4752 4751 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4753 4752 pkt->pkt_reason = CMD_TRAN_ERR;
4754 4753 pkt->pkt_statistics = 0;
4755 4754 }
4756 4755 if ((cmd->cmd_extra_frames &&
4757 4756 ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
4758 4757 DDI_SUCCESS) ||
4759 4758 (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
4760 4759 DDI_SUCCESS)))) {
4761 4760 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4762 4761 pkt->pkt_reason = CMD_TRAN_ERR;
4763 4762 pkt->pkt_statistics = 0;
4764 4763 }
4765 4764
4766 4765 return (TRUE);
4767 4766 }
4768 4767
4769 4768 static void
4770 4769 mptsas_handle_scsi_io_success(mptsas_t *mpt,
4771 4770 pMpi2ReplyDescriptorsUnion_t reply_desc)
4772 4771 {
4773 4772 pMpi2SCSIIOSuccessReplyDescriptor_t scsi_io_success;
4774 4773 uint16_t SMID;
4775 4774 mptsas_slots_t *slots = mpt->m_active;
4776 4775 mptsas_cmd_t *cmd = NULL;
4777 4776 struct scsi_pkt *pkt;
4778 4777
4779 4778 scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
4780 4779 SMID = scsi_io_success->SMID;
4781 4780
4782 4781 /*
4783 4782 * This is a success reply so just complete the IO. First, do a sanity
4784 4783 * check on the SMID. The final slot is used for TM requests, which
4785 4784 * would not come into this reply handler.
4786 4785 */
4787 4786 if ((SMID == 0) || (SMID > slots->m_n_slots)) {
4788 4787 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
4789 4788 SMID);
4790 4789 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4791 4790 return;
4792 4791 }
4793 4792
4794 4793 cmd = slots->m_slot[SMID];
4795 4794
4796 4795 /*
4797 4796 * print warning and return if the slot is empty
4798 4797 */
4799 4798 if (cmd == NULL) {
4800 4799 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
4801 4800 "in slot %d", SMID);
4802 4801 return;
4803 4802 }
4804 4803
4805 4804 pkt = CMD2PKT(cmd);
4806 4805 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
4807 4806 STATE_GOT_STATUS);
4808 4807 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4809 4808 pkt->pkt_state |= STATE_XFERRED_DATA;
4810 4809 }
4811 4810 pkt->pkt_resid = 0;
4812 4811
4813 4812 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
4814 4813 cmd->cmd_flags |= CFLAG_FINISHED;
4815 4814 cv_broadcast(&mpt->m_passthru_cv);
4816 4815 return;
4817 4816 } else {
4818 4817 mptsas_remove_cmd(mpt, cmd);
4819 4818 }
4820 4819
4821 4820 if (cmd->cmd_flags & CFLAG_RETRY) {
4822 4821 /*
4823 4822 * The target returned QFULL or busy, do not add tihs
4824 4823 * pkt to the doneq since the hba will retry
4825 4824 * this cmd.
4826 4825 *
4827 4826 * The pkt has already been resubmitted in
4828 4827 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4829 4828 * Remove this cmd_flag here.
4830 4829 */
4831 4830 cmd->cmd_flags &= ~CFLAG_RETRY;
4832 4831 } else {
4833 4832 mptsas_doneq_add(mpt, cmd);
4834 4833 }
4835 4834 }
4836 4835
4837 4836 static void
4838 4837 mptsas_handle_address_reply(mptsas_t *mpt,
4839 4838 pMpi2ReplyDescriptorsUnion_t reply_desc)
4840 4839 {
4841 4840 pMpi2AddressReplyDescriptor_t address_reply;
4842 4841 pMPI2DefaultReply_t reply;
4843 4842 mptsas_fw_diagnostic_buffer_t *pBuffer;
4844 4843 uint32_t reply_addr;
4845 4844 uint16_t SMID, iocstatus;
4846 4845 mptsas_slots_t *slots = mpt->m_active;
4847 4846 mptsas_cmd_t *cmd = NULL;
4848 4847 uint8_t function, buffer_type;
4849 4848 m_replyh_arg_t *args;
4850 4849 int reply_frame_no;
4851 4850
4852 4851 ASSERT(mutex_owned(&mpt->m_mutex));
4853 4852
4854 4853 address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
4855 4854
4856 4855 reply_addr = address_reply->ReplyFrameAddress;
4857 4856 SMID = address_reply->SMID;
4858 4857 /*
4859 4858 * If reply frame is not in the proper range we should ignore this
4860 4859 * message and exit the interrupt handler.
4861 4860 */
4862 4861 if ((reply_addr < mpt->m_reply_frame_dma_addr) ||
4863 4862 (reply_addr >= (mpt->m_reply_frame_dma_addr +
4864 4863 (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
4865 4864 ((reply_addr - mpt->m_reply_frame_dma_addr) %
4866 4865 mpt->m_reply_frame_size != 0)) {
4867 4866 mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
4868 4867 "address 0x%x\n", reply_addr);
4869 4868 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4870 4869 return;
4871 4870 }
4872 4871
4873 4872 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
4874 4873 DDI_DMA_SYNC_FORCPU);
4875 4874 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
4876 4875 mpt->m_reply_frame_dma_addr));
4877 4876 function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
4878 4877
4879 4878 /*
4880 4879 * don't get slot information and command for events since these values
4881 4880 * don't exist
4882 4881 */
4883 4882 if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) &&
4884 4883 (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) {
4885 4884 /*
4886 4885 * This could be a TM reply, which use the last allocated SMID,
4887 4886 * so allow for that.
4888 4887 */
4889 4888 if ((SMID == 0) || (SMID > (slots->m_n_slots + 1))) {
4890 4889 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of "
4891 4890 "%d\n", SMID);
4892 4891 ddi_fm_service_impact(mpt->m_dip,
4893 4892 DDI_SERVICE_UNAFFECTED);
4894 4893 return;
4895 4894 }
4896 4895
4897 4896 cmd = slots->m_slot[SMID];
4898 4897
4899 4898 /*
4900 4899 * print warning and return if the slot is empty
4901 4900 */
4902 4901 if (cmd == NULL) {
4903 4902 mptsas_log(mpt, CE_WARN, "?NULL command for address "
4904 4903 "reply in slot %d", SMID);
4905 4904 return;
4906 4905 }
4907 4906 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
4908 4907 (cmd->cmd_flags & CFLAG_CONFIG) ||
4909 4908 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
4910 4909 cmd->cmd_rfm = reply_addr;
4911 4910 cmd->cmd_flags |= CFLAG_FINISHED;
4912 4911 cv_broadcast(&mpt->m_passthru_cv);
4913 4912 cv_broadcast(&mpt->m_config_cv);
4914 4913 cv_broadcast(&mpt->m_fw_diag_cv);
4915 4914 return;
4916 4915 } else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
4917 4916 mptsas_remove_cmd(mpt, cmd);
4918 4917 }
4919 4918 NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
4920 4919 }
4921 4920 /*
4922 4921 * Depending on the function, we need to handle
4923 4922 * the reply frame (and cmd) differently.
4924 4923 */
4925 4924 switch (function) {
4926 4925 case MPI2_FUNCTION_SCSI_IO_REQUEST:
4927 4926 mptsas_check_scsi_io_error(mpt, (pMpi2SCSIIOReply_t)reply, cmd);
4928 4927 break;
4929 4928 case MPI2_FUNCTION_SCSI_TASK_MGMT:
4930 4929 cmd->cmd_rfm = reply_addr;
4931 4930 mptsas_check_task_mgt(mpt, (pMpi2SCSIManagementReply_t)reply,
4932 4931 cmd);
4933 4932 break;
4934 4933 case MPI2_FUNCTION_FW_DOWNLOAD:
4935 4934 cmd->cmd_flags |= CFLAG_FINISHED;
4936 4935 cv_signal(&mpt->m_fw_cv);
4937 4936 break;
4938 4937 case MPI2_FUNCTION_EVENT_NOTIFICATION:
4939 4938 reply_frame_no = (reply_addr - mpt->m_reply_frame_dma_addr) /
4940 4939 mpt->m_reply_frame_size;
4941 4940 args = &mpt->m_replyh_args[reply_frame_no];
4942 4941 args->mpt = (void *)mpt;
4943 4942 args->rfm = reply_addr;
4944 4943
4945 4944 /*
4946 4945 * Record the event if its type is enabled in
4947 4946 * this mpt instance by ioctl.
4948 4947 */
4949 4948 mptsas_record_event(args);
4950 4949
4951 4950 /*
4952 4951 * Handle time critical events
4953 4952 * NOT_RESPONDING/ADDED only now
4954 4953 */
4955 4954 if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
4956 4955 /*
4957 4956 * Would not return main process,
4958 4957 * just let taskq resolve ack action
4959 4958 * and ack would be sent in taskq thread
4960 4959 */
4961 4960 NDBG20(("send mptsas_handle_event_sync success"));
4962 4961 }
4963 4962 if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
4964 4963 (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
4965 4964 mptsas_log(mpt, CE_WARN, "No memory available"
4966 4965 "for dispatch taskq");
4967 4966 /*
4968 4967 * Return the reply frame to the free queue.
4969 4968 */
4970 4969 ddi_put32(mpt->m_acc_free_queue_hdl,
4971 4970 &((uint32_t *)(void *)
4972 4971 mpt->m_free_queue)[mpt->m_free_index], reply_addr);
4973 4972 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
4974 4973 DDI_DMA_SYNC_FORDEV);
4975 4974 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
4976 4975 mpt->m_free_index = 0;
4977 4976 }
4978 4977
4979 4978 ddi_put32(mpt->m_datap,
4980 4979 &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
4981 4980 }
4982 4981 return;
4983 4982 case MPI2_FUNCTION_DIAG_BUFFER_POST:
4984 4983 /*
4985 4984 * If SMID is 0, this implies that the reply is due to a
4986 4985 * release function with a status that the buffer has been
4987 4986 * released. Set the buffer flags accordingly.
4988 4987 */
4989 4988 if (SMID == 0) {
4990 4989 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
4991 4990 &reply->IOCStatus);
4992 4991 buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl,
4993 4992 &(((pMpi2DiagBufferPostReply_t)reply)->BufferType));
4994 4993 if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) {
4995 4994 pBuffer =
4996 4995 &mpt->m_fw_diag_buffer_list[buffer_type];
4997 4996 pBuffer->valid_data = TRUE;
4998 4997 pBuffer->owned_by_firmware = FALSE;
4999 4998 pBuffer->immediate = FALSE;
5000 4999 }
5001 5000 } else {
5002 5001 /*
5003 5002 * Normal handling of diag post reply with SMID.
5004 5003 */
5005 5004 cmd = slots->m_slot[SMID];
5006 5005
5007 5006 /*
5008 5007 * print warning and return if the slot is empty
5009 5008 */
5010 5009 if (cmd == NULL) {
5011 5010 mptsas_log(mpt, CE_WARN, "?NULL command for "
5012 5011 "address reply in slot %d", SMID);
5013 5012 return;
5014 5013 }
5015 5014 cmd->cmd_rfm = reply_addr;
5016 5015 cmd->cmd_flags |= CFLAG_FINISHED;
5017 5016 cv_broadcast(&mpt->m_fw_diag_cv);
5018 5017 }
5019 5018 return;
5020 5019 default:
5021 5020 mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
5022 5021 break;
5023 5022 }
5024 5023
5025 5024 /*
5026 5025 * Return the reply frame to the free queue.
5027 5026 */
5028 5027 ddi_put32(mpt->m_acc_free_queue_hdl,
5029 5028 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
5030 5029 reply_addr);
5031 5030 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5032 5031 DDI_DMA_SYNC_FORDEV);
5033 5032 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5034 5033 mpt->m_free_index = 0;
5035 5034 }
5036 5035 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
5037 5036 mpt->m_free_index);
5038 5037
5039 5038 if (cmd->cmd_flags & CFLAG_FW_CMD)
5040 5039 return;
5041 5040
5042 5041 if (cmd->cmd_flags & CFLAG_RETRY) {
5043 5042 /*
5044 5043 * The target returned QFULL or busy, do not add tihs
5045 5044 * pkt to the doneq since the hba will retry
5046 5045 * this cmd.
5047 5046 *
5048 5047 * The pkt has already been resubmitted in
5049 5048 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5050 5049 * Remove this cmd_flag here.
5051 5050 */
5052 5051 cmd->cmd_flags &= ~CFLAG_RETRY;
5053 5052 } else {
5054 5053 mptsas_doneq_add(mpt, cmd);
5055 5054 }
5056 5055 }
5057 5056
5058 5057 static void
5059 5058 mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
5060 5059 mptsas_cmd_t *cmd)
5061 5060 {
5062 5061 uint8_t scsi_status, scsi_state;
5063 5062 uint16_t ioc_status;
5064 5063 uint32_t xferred, sensecount, responsedata, loginfo = 0;
5065 5064 struct scsi_pkt *pkt;
5066 5065 struct scsi_arq_status *arqstat;
5067 5066 struct buf *bp;
5068 5067 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5069 5068 uint8_t *sensedata = NULL;
5070 5069
5071 5070 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
5072 5071 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
5073 5072 bp = cmd->cmd_ext_arq_buf;
5074 5073 } else {
5075 5074 bp = cmd->cmd_arq_buf;
5076 5075 }
5077 5076
5078 5077 scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
5079 5078 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5080 5079 scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
5081 5080 xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
5082 5081 sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
5083 5082 responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl,
5084 5083 &reply->ResponseInfo);
5085 5084
5086 5085 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
5087 5086 loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
5088 5087 &reply->IOCLogInfo);
5089 5088 mptsas_log(mpt, CE_NOTE,
5090 5089 "?Log info 0x%x received for target %d.\n"
5091 5090 "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5092 5091 loginfo, Tgt(cmd), scsi_status, ioc_status,
5093 5092 scsi_state);
5094 5093 }
5095 5094
5096 5095 NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5097 5096 scsi_status, ioc_status, scsi_state));
5098 5097
5099 5098 pkt = CMD2PKT(cmd);
5100 5099 *(pkt->pkt_scbp) = scsi_status;
5101 5100
5102 5101 if (loginfo == 0x31170000) {
5103 5102 /*
5104 5103 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
5105 5104 * 0x31170000 comes, that means the device missing delay
5106 5105 * is in progressing, the command need retry later.
5107 5106 */
5108 5107 *(pkt->pkt_scbp) = STATUS_BUSY;
5109 5108 return;
5110 5109 }
5111 5110
5112 5111 if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
5113 5112 ((ioc_status & MPI2_IOCSTATUS_MASK) ==
5114 5113 MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
5115 5114 pkt->pkt_reason = CMD_INCOMPLETE;
5116 5115 pkt->pkt_state |= STATE_GOT_BUS;
5117 5116 mutex_enter(&ptgt->m_tgt_intr_mutex);
5118 5117 if (ptgt->m_reset_delay == 0) {
5119 5118 mptsas_set_throttle(mpt, ptgt,
5120 5119 DRAIN_THROTTLE);
5121 5120 }
5122 5121 mutex_exit(&ptgt->m_tgt_intr_mutex);
5123 5122 return;
5124 5123 }
5125 5124
5126 5125 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5127 5126 responsedata &= 0x000000FF;
5128 5127 if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
5129 5128 mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n");
5130 5129 pkt->pkt_reason = CMD_TLR_OFF;
5131 5130 return;
5132 5131 }
5133 5132 }
5134 5133
5135 5134
5136 5135 switch (scsi_status) {
5137 5136 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5138 5137 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5139 5138 arqstat = (void*)(pkt->pkt_scbp);
5140 5139 arqstat->sts_rqpkt_status = *((struct scsi_status *)
5141 5140 (pkt->pkt_scbp));
5142 5141 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
5143 5142 STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
5144 5143 if (cmd->cmd_flags & CFLAG_XARQ) {
5145 5144 pkt->pkt_state |= STATE_XARQ_DONE;
5146 5145 }
5147 5146 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5148 5147 pkt->pkt_state |= STATE_XFERRED_DATA;
5149 5148 }
5150 5149 arqstat->sts_rqpkt_reason = pkt->pkt_reason;
5151 5150 arqstat->sts_rqpkt_state = pkt->pkt_state;
5152 5151 arqstat->sts_rqpkt_state |= STATE_XFERRED_DATA;
5153 5152 arqstat->sts_rqpkt_statistics = pkt->pkt_statistics;
5154 5153 sensedata = (uint8_t *)&arqstat->sts_sensedata;
5155 5154
5156 5155 bcopy((uchar_t *)bp->b_un.b_addr, sensedata,
5157 5156 ((cmd->cmd_rqslen >= sensecount) ? sensecount :
5158 5157 cmd->cmd_rqslen));
5159 5158 arqstat->sts_rqpkt_resid = (cmd->cmd_rqslen - sensecount);
5160 5159 cmd->cmd_flags |= CFLAG_CMDARQ;
5161 5160 /*
5162 5161 * Set proper status for pkt if autosense was valid
5163 5162 */
5164 5163 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5165 5164 struct scsi_status zero_status = { 0 };
5166 5165 arqstat->sts_rqpkt_status = zero_status;
5167 5166 }
5168 5167
5169 5168 /*
5170 5169 * ASC=0x47 is parity error
5171 5170 * ASC=0x48 is initiator detected error received
5172 5171 */
5173 5172 if ((scsi_sense_key(sensedata) == KEY_ABORTED_COMMAND) &&
5174 5173 ((scsi_sense_asc(sensedata) == 0x47) ||
5175 5174 (scsi_sense_asc(sensedata) == 0x48))) {
5176 5175 mptsas_log(mpt, CE_NOTE, "Aborted_command!");
5177 5176 }
5178 5177
5179 5178 /*
5180 5179 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5181 5180 * ASC/ASCQ=0x25/0x00 means invalid lun
5182 5181 */
5183 5182 if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
5184 5183 (scsi_sense_asc(sensedata) == 0x3F) &&
5185 5184 (scsi_sense_ascq(sensedata) == 0x0E)) ||
5186 5185 ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
5187 5186 (scsi_sense_asc(sensedata) == 0x25) &&
5188 5187 (scsi_sense_ascq(sensedata) == 0x00))) {
5189 5188 mptsas_topo_change_list_t *topo_node = NULL;
5190 5189
5191 5190 topo_node = kmem_zalloc(
5192 5191 sizeof (mptsas_topo_change_list_t),
5193 5192 KM_NOSLEEP);
5194 5193 if (topo_node == NULL) {
5195 5194 mptsas_log(mpt, CE_NOTE, "No memory"
5196 5195 "resource for handle SAS dynamic"
5197 5196 "reconfigure.\n");
5198 5197 break;
5199 5198 }
5200 5199 topo_node->mpt = mpt;
5201 5200 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
5202 5201 topo_node->un.phymask = ptgt->m_phymask;
5203 5202 topo_node->devhdl = ptgt->m_devhdl;
5204 5203 topo_node->object = (void *)ptgt;
5205 5204 topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5206 5205
5207 5206 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5208 5207 mptsas_handle_dr,
5209 5208 (void *)topo_node,
5210 5209 DDI_NOSLEEP)) != DDI_SUCCESS) {
5211 5210 mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5212 5211 "for handle SAS dynamic reconfigure"
5213 5212 "failed. \n");
5214 5213 }
5215 5214 }
5216 5215 break;
5217 5216 case MPI2_SCSI_STATUS_GOOD:
5218 5217 switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5219 5218 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5220 5219 pkt->pkt_reason = CMD_DEV_GONE;
5221 5220 pkt->pkt_state |= STATE_GOT_BUS;
5222 5221 mutex_enter(&ptgt->m_tgt_intr_mutex);
5223 5222 if (ptgt->m_reset_delay == 0) {
5224 5223 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5225 5224 }
5226 5225 mutex_exit(&ptgt->m_tgt_intr_mutex);
5227 5226 NDBG31(("lost disk for target%d, command:%x",
5228 5227 Tgt(cmd), pkt->pkt_cdbp[0]));
5229 5228 break;
5230 5229 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5231 5230 NDBG31(("data overrun: xferred=%d", xferred));
5232 5231 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5233 5232 pkt->pkt_reason = CMD_DATA_OVR;
5234 5233 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5235 5234 | STATE_SENT_CMD | STATE_GOT_STATUS
5236 5235 | STATE_XFERRED_DATA);
5237 5236 pkt->pkt_resid = 0;
5238 5237 break;
5239 5238 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5240 5239 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5241 5240 NDBG31(("data underrun: xferred=%d", xferred));
5242 5241 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5243 5242 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5244 5243 | STATE_SENT_CMD | STATE_GOT_STATUS);
5245 5244 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5246 5245 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5247 5246 pkt->pkt_state |= STATE_XFERRED_DATA;
5248 5247 }
5249 5248 break;
5250 5249 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5251 5250 mptsas_set_pkt_reason(mpt,
5252 5251 cmd, CMD_RESET, STAT_BUS_RESET);
5253 5252 break;
5254 5253 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5255 5254 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5256 5255 mptsas_set_pkt_reason(mpt,
5257 5256 cmd, CMD_RESET, STAT_DEV_RESET);
5258 5257 break;
5259 5258 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5260 5259 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5261 5260 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5262 5261 mptsas_set_pkt_reason(mpt,
5263 5262 cmd, CMD_TERMINATED, STAT_TERMINATED);
5264 5263 break;
5265 5264 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5266 5265 case MPI2_IOCSTATUS_BUSY:
5267 5266 /*
5268 5267 * set throttles to drain
5269 5268 */
5270 5269 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5271 5270 &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
5272 5271 while (ptgt != NULL) {
5273 5272 mutex_enter(&ptgt->m_tgt_intr_mutex);
5274 5273 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5275 5274 mutex_exit(&ptgt->m_tgt_intr_mutex);
5276 5275
5277 5276 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5278 5277 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
5279 5278 }
5280 5279
5281 5280 /*
5282 5281 * retry command
5283 5282 */
5284 5283 cmd->cmd_flags |= CFLAG_RETRY;
5285 5284 cmd->cmd_pkt_flags |= FLAG_HEAD;
5286 5285
5287 5286 mutex_exit(&mpt->m_mutex);
5288 5287 (void) mptsas_accept_pkt(mpt, cmd);
5289 5288 mutex_enter(&mpt->m_mutex);
5290 5289 break;
5291 5290 default:
5292 5291 mptsas_log(mpt, CE_WARN,
5293 5292 "unknown ioc_status = %x\n", ioc_status);
5294 5293 mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5295 5294 "count = %x, scsi_status = %x", scsi_state,
5296 5295 xferred, scsi_status);
5297 5296 break;
5298 5297 }
5299 5298 break;
5300 5299 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5301 5300 mptsas_handle_qfull(mpt, cmd);
5302 5301 break;
5303 5302 case MPI2_SCSI_STATUS_BUSY:
5304 5303 NDBG31(("scsi_status busy received"));
5305 5304 break;
5306 5305 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5307 5306 NDBG31(("scsi_status reservation conflict received"));
5308 5307 break;
5309 5308 default:
5310 5309 mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x\n",
5311 5310 scsi_status, ioc_status);
5312 5311 mptsas_log(mpt, CE_WARN,
5313 5312 "mptsas_process_intr: invalid scsi status\n");
5314 5313 break;
5315 5314 }
5316 5315 }
5317 5316
5318 5317 static void
5319 5318 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5320 5319 mptsas_cmd_t *cmd)
5321 5320 {
5322 5321 uint8_t task_type;
5323 5322 uint16_t ioc_status;
5324 5323 uint32_t log_info;
5325 5324 uint16_t dev_handle;
5326 5325 struct scsi_pkt *pkt = CMD2PKT(cmd);
5327 5326
5328 5327 task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5329 5328 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5330 5329 log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5331 5330 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5332 5331
5333 5332 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5334 5333 mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5335 5334 "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
5336 5335 task_type, ioc_status, log_info, dev_handle);
5337 5336 pkt->pkt_reason = CMD_INCOMPLETE;
5338 5337 return;
5339 5338 }
5340 5339
5341 5340 switch (task_type) {
5342 5341 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5343 5342 case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5344 5343 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5345 5344 case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5346 5345 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5347 5346 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5348 5347 break;
5349 5348 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5350 5349 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5351 5350 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5352 5351 /*
5353 5352 * Check for invalid DevHandle of 0 in case application
5354 5353 * sends bad command. DevHandle of 0 could cause problems.
5355 5354 */
5356 5355 if (dev_handle == 0) {
5357 5356 mptsas_log(mpt, CE_WARN, "!Can't flush target with"
5358 5357 " DevHandle of 0.");
5359 5358 } else {
5360 5359 mptsas_flush_target(mpt, dev_handle, Lun(cmd),
5361 5360 task_type);
5362 5361 }
5363 5362 break;
5364 5363 default:
5365 5364 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5366 5365 task_type);
5367 5366 mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5368 5367 break;
5369 5368 }
5370 5369 }
5371 5370
5372 5371 static void
5373 5372 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5374 5373 {
5375 5374 mptsas_t *mpt = arg->mpt;
5376 5375 uint64_t t = arg->t;
5377 5376 mptsas_cmd_t *cmd;
5378 5377 struct scsi_pkt *pkt;
5379 5378 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
5380 5379
5381 5380 mutex_enter(&item->mutex);
5382 5381 while (item->flag & MPTSAS_DONEQ_THREAD_ACTIVE) {
5383 5382 if (!item->doneq) {
5384 5383 cv_wait(&item->cv, &item->mutex);
5385 5384 }
5386 5385 pkt = NULL;
5387 5386 if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5388 5387 cmd->cmd_flags |= CFLAG_COMPLETED;
5389 5388 pkt = CMD2PKT(cmd);
5390 5389 }
5391 5390 mutex_exit(&item->mutex);
5392 5391 if (pkt) {
5393 5392 mptsas_pkt_comp(pkt, cmd);
5394 5393 }
5395 5394 mutex_enter(&item->mutex);
5396 5395 }
5397 5396 mutex_exit(&item->mutex);
5398 5397 mutex_enter(&mpt->m_doneq_mutex);
5399 5398 mpt->m_doneq_thread_n--;
5400 5399 cv_broadcast(&mpt->m_doneq_thread_cv);
5401 5400 mutex_exit(&mpt->m_doneq_mutex);
5402 5401 }
5403 5402
5404 5403 /*
5405 5404 * mpt interrupt handler.
5406 5405 */
5407 5406 static uint_t
5408 5407 mptsas_intr(caddr_t arg1, caddr_t arg2)
5409 5408 {
5410 5409 mptsas_t *mpt = (void *)arg1;
5411 5410 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
5412 5411 uchar_t did_reply = FALSE;
5413 5412 int i = 0, j;
5414 5413 uint8_t reply_type;
5415 5414 uint16_t SMID;
5416 5415
5417 5416 NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5418 5417
5419 5418 /*
5420 5419 * 1.
5421 5420 * To avoid using m_mutex in the ISR(ISR referes not only mptsas_intr,
5422 5421 * but all of the recursive called functions in it. the same below),
5423 5422 * separate mutexs are introduced to protect the elements shown in ISR.
5424 5423 * 3 type of mutex are involved here:
5425 5424 * a)per instance mutex m_intr_mutex.
5426 5425 * b)per target mutex m_tgt_intr_mutex.
5427 5426 * c)mutex that protect the free slot.
5428 5427 *
5429 5428 * a)per instance mutex m_intr_mutex:
5430 5429 * used to protect m_options, m_power, m_waitq, etc that would be
5431 5430 * checked/modified in ISR; protect the getting and setting the reply
5432 5431 * descriptor index; protect the m_slots[];
5433 5432 *
5434 5433 * b)per target mutex m_tgt_intr_mutex:
5435 5434 * used to protect per target element which has relationship to ISR.
5436 5435 * contention for the new per target mutex is just as high as it in
5437 5436 * sd(7d) driver.
5438 5437 *
5439 5438 * c)mutexs that protect the free slots:
5440 5439 * those mutexs are introduced to minimize the mutex contentions
5441 5440 * between the IO request threads where free slots are allocated
5442 5441 * for sending cmds and ISR where slots holding outstanding cmds
5443 5442 * are returned to the free pool.
5444 5443 * the idea is like this:
5445 5444 * 1) Partition all of the free slot into NCPU groups. For example,
5446 5445 * In system where we have 15 slots, and 4 CPU, then slot s1,s5,s9,s13
5447 5446 * are marked belonging to CPU1, s2,s6,s10,s14 to CPU2, s3,s7,s11,s15
5448 5447 * to CPU3, and s4,s8,s12 to CPU4.
5449 5448 * 2) In each of the group, an alloc/release queue pair is created,
5450 5449 * and both the allocq and the releaseq have a dedicated mutex.
5451 5450 * 3) When init, all of the slots in a CPU group are inserted into the
5452 5451 * allocq of its CPU's pair.
5453 5452 * 4) When doing IO,
5454 5453 * mptsas_scsi_start()
5455 5454 * {
5456 5455 * cpuid = the cpu NO of the cpu where this thread is running on
5457 5456 * retry:
5458 5457 * mutex_enter(&allocq[cpuid]);
5459 5458 * if (get free slot = success) {
5460 5459 * remove the slot from the allocq
5461 5460 * mutex_exit(&allocq[cpuid]);
5462 5461 * return(success);
5463 5462 * } else { // exchange allocq and releaseq and try again
5464 5463 * mutex_enter(&releq[cpuid]);
5465 5464 * exchange the allocq and releaseq of this pair;
5466 5465 * mutex_exit(&releq[cpuid]);
5467 5466 * if (try to get free slot again = success) {
5468 5467 * remove the slot from the allocq
5469 5468 * mutex_exit(&allocq[cpuid]);
5470 5469 * return(success);
5471 5470 * } else {
5472 5471 * MOD(cpuid)++;
5473 5472 * goto retry;
5474 5473 * if (all CPU groups tried)
5475 5474 * mutex_exit(&allocq[cpuid]);
5476 5475 * return(failure);
5477 5476 * }
5478 5477 * }
5479 5478 * }
5480 5479 * ISR()
5481 5480 * {
5482 5481 * cpuid = the CPU group id where the slot sending the
5483 5482 * cmd belongs;
5484 5483 * mutex_enter(&releq[cpuid]);
5485 5484 * remove the slot from the releaseq
5486 5485 * mutex_exit(&releq[cpuid]);
5487 5486 * }
5488 5487 * This way, only when the queue pair doing exchange have mutex
5489 5488 * contentions.
5490 5489 *
5491 5490 * For mutex m_intr_mutex and m_tgt_intr_mutex, there are 2 scenarios:
5492 5491 *
5493 5492 * a)If the elements are only checked but not modified in the ISR, then
5494 5493 * only the places where those elements are modifed(outside of ISR)
5495 5494 * need to be protected by the new introduced mutex.
5496 5495 * For example, data A is only read/checked in ISR, then we need do
5497 5496 * like this:
5498 5497 * In ISR:
5499 5498 * {
5500 5499 * mutex_enter(&new_mutex);
5501 5500 * read(A);
5502 5501 * mutex_exit(&new_mutex);
5503 5502 * //the new_mutex here is either the m_tgt_intr_mutex or
5504 5503 * //the m_intr_mutex.
5505 5504 * }
5506 5505 * In non-ISR
5507 5506 * {
5508 5507 * mutex_enter(&m_mutex); //the stock driver already did this
5509 5508 * mutex_enter(&new_mutex);
5510 5509 * write(A);
5511 5510 * mutex_exit(&new_mutex);
5512 5511 * mutex_exit(&m_mutex); //the stock driver already did this
5513 5512 *
5514 5513 * read(A);
5515 5514 * // read(A) in non-ISR is not required to be protected by new
5516 5515 * // mutex since 'A' has already been protected by m_mutex
5517 5516 * // outside of the ISR
5518 5517 * }
5519 5518 *
5520 5519 * Those fields in mptsas_target_t/ptgt which are only read in ISR
5521 5520 * fall into this catergory. So they, together with the fields which
5522 5521 * are never read in ISR, are not necessary to be protected by
5523 5522 * m_tgt_intr_mutex, don't bother.
5524 5523 * checking of m_waitq also falls into this catergory. so all of the
5525 5524 * place outside of ISR where the m_waitq is modified, such as in
5526 5525 * mptsas_waitq_add(), mptsas_waitq_delete(), mptsas_waitq_rm(),
5527 5526 * m_intr_mutex should be used.
5528 5527 *
5529 5528 * b)If the elements are modified in the ISR, then each place where
5530 5529 * those elements are referred(outside of ISR) need to be protected
5531 5530 * by the new introduced mutex. Of course, if those elements only
5532 5531 * appear in the non-key code path, that is, they don't affect
5533 5532 * performance, then the m_mutex can still be used as before.
5534 5533 * For example, data B is modified in key code path in ISR, and data C
5535 5534 * is modified in non-key code path in ISR, then we can do like this:
5536 5535 * In ISR:
5537 5536 * {
5538 5537 * mutex_enter(&new_mutex);
5539 5538 * wirte(B);
5540 5539 * mutex_exit(&new_mutex);
5541 5540 * if (seldom happen) {
5542 5541 * mutex_enter(&m_mutex);
5543 5542 * write(C);
5544 5543 * mutex_exit(&m_mutex);
5545 5544 * }
5546 5545 * //the new_mutex here is either the m_tgt_intr_mutex or
5547 5546 * //the m_intr_mutex.
5548 5547 * }
5549 5548 * In non-ISR
5550 5549 * {
5551 5550 * mutex_enter(&new_mutex);
5552 5551 * write(B);
5553 5552 * mutex_exit(&new_mutex);
5554 5553 *
5555 5554 * mutex_enter(&new_mutex);
5556 5555 * read(B);
5557 5556 * mutex_exit(&new_mutex);
5558 5557 * // both write(B) and read(B) in non-ISR is required to be
5559 5558 * // protected by new mutex outside of the ISR
5560 5559 *
5561 5560 * mutex_enter(&m_mutex); //the stock driver already did this
5562 5561 * read(C);
5563 5562 * write(C);
5564 5563 * mutex_exit(&m_mutex); //the stock driver already did this
5565 5564 * // both write(C) and read(C) in non-ISR have been already
5566 5565 * // been protected by m_mutex outside of the ISR
5567 5566 * }
5568 5567 *
5569 5568 * For example, ptgt->m_t_ncmds fall into 'B' of this catergory, and
5570 5569 * elements shown in address reply, restart_hba, passthrough, IOC
5571 5570 * fall into 'C' of this catergory.
5572 5571 *
5573 5572 * In any case where mutexs are nested, make sure in the following
5574 5573 * order:
5575 5574 * m_mutex -> m_intr_mutex -> m_tgt_intr_mutex
5576 5575 * m_intr_mutex -> m_tgt_intr_mutex
5577 5576 * m_mutex -> m_intr_mutex
5578 5577 * m_mutex -> m_tgt_intr_mutex
5579 5578 *
5580 5579 * 2.
5581 5580 * Make sure at any time, getting the ReplyDescriptor by m_post_index
5582 5581 * and setting m_post_index to the ReplyDescriptorIndex register are
5583 5582 * atomic. Since m_mutex is not used for this purpose in ISR, the new
5584 5583 * mutex m_intr_mutex must play this role. So mptsas_poll(), where this
5585 5584 * kind of getting/setting is also performed, must use m_intr_mutex.
5586 5585 * Note, since context reply in ISR/process_intr is the only code path
5587 5586 * which affect performance, a fast path is introduced to only handle
5588 5587 * the read/write IO having context reply. For other IOs such as
5589 5588 * passthrough and IOC with context reply and all address reply, we
5590 5589 * use the as-is process_intr() to handle them. In order to keep the
5591 5590 * same semantics in process_intr(), make sure any new mutex is not held
5592 5591 * before enterring it.
5593 5592 */
5594 5593
5595 5594 mutex_enter(&mpt->m_intr_mutex);
5596 5595
5597 5596 /*
5598 5597 * If interrupts are shared by two channels then check whether this
5599 5598 * interrupt is genuinely for this channel by making sure first the
5600 5599 * chip is in high power state.
5601 5600 */
5602 5601 if ((mpt->m_options & MPTSAS_OPT_PM) &&
5603 5602 (mpt->m_power_level != PM_LEVEL_D0)) {
5604 5603 mutex_exit(&mpt->m_intr_mutex);
5605 5604 return (DDI_INTR_UNCLAIMED);
5606 5605 }
5607 5606
5608 5607 /*
5609 5608 * If polling, interrupt was triggered by some shared interrupt because
5610 5609 * IOC interrupts are disabled during polling, so polling routine will
5611 5610 * handle any replies. Considering this, if polling is happening,
5612 5611 * return with interrupt unclaimed.
5613 5612 */
5614 5613 if (mpt->m_polled_intr) {
5615 5614 mutex_exit(&mpt->m_intr_mutex);
5616 5615 mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt");
5617 5616 return (DDI_INTR_UNCLAIMED);
5618 5617 }
5619 5618
5620 5619 /*
5621 5620 * Read the istat register.
5622 5621 */
5623 5622 if ((INTPENDING(mpt)) != 0) {
5624 5623 /*
5625 5624 * read fifo until empty.
5626 5625 */
5627 5626 #ifndef __lock_lint
5628 5627 _NOTE(CONSTCOND)
5629 5628 #endif
5630 5629 while (TRUE) {
5631 5630 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5632 5631 DDI_DMA_SYNC_FORCPU);
5633 5632 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5634 5633 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5635 5634
5636 5635 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5637 5636 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5638 5637 ddi_get32(mpt->m_acc_post_queue_hdl,
5639 5638 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5640 5639 break;
5641 5640 }
5642 5641
5643 5642 /*
5644 5643 * The reply is valid, process it according to its
5645 5644 * type. Also, set a flag for updating the reply index
5646 5645 * after they've all been processed.
5647 5646 */
5648 5647 did_reply = TRUE;
5649 5648
5650 5649 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
5651 5650 &reply_desc_union->Default.ReplyFlags);
5652 5651 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
5653 5652 mpt->m_reply[i].Default.ReplyFlags = reply_type;
5654 5653 if (reply_type ==
5655 5654 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
5656 5655 SMID = ddi_get16(mpt->m_acc_post_queue_hdl,
5657 5656 &reply_desc_union->SCSIIOSuccess.SMID);
5658 5657 if (mptsas_handle_io_fastpath(mpt, SMID) !=
5659 5658 TRUE) {
5660 5659 mpt->m_reply[i].SCSIIOSuccess.SMID =
5661 5660 SMID;
5662 5661 i++;
5663 5662 }
5664 5663 } else if (reply_type ==
5665 5664 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
5666 5665 mpt->m_reply[i].AddressReply.ReplyFrameAddress =
5667 5666 ddi_get32(mpt->m_acc_post_queue_hdl,
5668 5667 &reply_desc_union->AddressReply.
5669 5668 ReplyFrameAddress);
5670 5669 mpt->m_reply[i].AddressReply.SMID =
5671 5670 ddi_get16(mpt->m_acc_post_queue_hdl,
5672 5671 &reply_desc_union->AddressReply.SMID);
5673 5672 i++;
5674 5673 }
5675 5674 /*
5676 5675 * Clear the reply descriptor for re-use and increment
5677 5676 * index.
5678 5677 */
5679 5678 ddi_put64(mpt->m_acc_post_queue_hdl,
5680 5679 &((uint64_t *)(void *)mpt->m_post_queue)
5681 5680 [mpt->m_post_index], 0xFFFFFFFFFFFFFFFF);
5682 5681 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5683 5682 DDI_DMA_SYNC_FORDEV);
5684 5683
5685 5684 /*
5686 5685 * Increment post index and roll over if needed.
5687 5686 */
5688 5687 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5689 5688 mpt->m_post_index = 0;
5690 5689 }
5691 5690 if (i >= MPI_ADDRESS_COALSCE_MAX)
5692 5691 break;
5693 5692 }
5694 5693
5695 5694 /*
5696 5695 * Update the global reply index if at least one reply was
5697 5696 * processed.
5698 5697 */
5699 5698 if (did_reply) {
5700 5699 ddi_put32(mpt->m_datap,
5701 5700 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5702 5701
5703 5702 /*
5704 5703 * For fma, only check the PIO is required and enough
5705 5704 * here. Those cases where fastpath is not hit, the
5706 5705 * mptsas_fma_check() check all of the types of
5707 5706 * fma. That is not necessary and sometimes not
5708 5707 * correct. fma check should only be done after
5709 5708 * the PIO and/or dma is performed.
5710 5709 */
5711 5710 if ((mptsas_check_acc_handle(mpt->m_datap) !=
5712 5711 DDI_SUCCESS)) {
5713 5712 ddi_fm_service_impact(mpt->m_dip,
5714 5713 DDI_SERVICE_UNAFFECTED);
5715 5714 }
5716 5715
5717 5716 }
5718 5717 } else {
5719 5718 mutex_exit(&mpt->m_intr_mutex);
5720 5719 return (DDI_INTR_UNCLAIMED);
5721 5720 }
5722 5721 NDBG1(("mptsas_intr complete"));
5723 5722 mutex_exit(&mpt->m_intr_mutex);
5724 5723
5725 5724 /*
5726 5725 * Since most of the cmds(read and write IO with success return.)
5727 5726 * have already been processed in fast path in which the m_mutex
5728 5727 * is not held, handling here the address reply and other context reply
5729 5728 * such as passthrough and IOC cmd with m_mutex held should be a big
5730 5729 * issue for performance.
5731 5730 * If holding m_mutex to process these cmds was still an obvious issue,
5732 5731 * we can process them in a taskq.
5733 5732 */
5734 5733 for (j = 0; j < i; j++) {
5735 5734 mutex_enter(&mpt->m_mutex);
5736 5735 mptsas_process_intr(mpt, &mpt->m_reply[j]);
5737 5736 mutex_exit(&mpt->m_mutex);
5738 5737 }
5739 5738
5740 5739 /*
5741 5740 * If no helper threads are created, process the doneq in ISR. If
5742 5741 * helpers are created, use the doneq length as a metric to measure the
5743 5742 * load on the interrupt CPU. If it is long enough, which indicates the
5744 5743 * load is heavy, then we deliver the IO completions to the helpers.
5745 5744 * This measurement has some limitations, although it is simple and
5746 5745 * straightforward and works well for most of the cases at present.
5747 5746 */
5748 5747 if (!mpt->m_doneq_thread_n) {
5749 5748 mptsas_doneq_empty(mpt);
5750 5749 } else {
5751 5750 int helper = 1;
5752 5751 mutex_enter(&mpt->m_intr_mutex);
5753 5752 if (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)
5754 5753 helper = 0;
5755 5754 mutex_exit(&mpt->m_intr_mutex);
5756 5755 if (helper) {
5757 5756 mptsas_deliver_doneq_thread(mpt);
5758 5757 } else {
5759 5758 mptsas_doneq_empty(mpt);
5760 5759 }
5761 5760 }
5762 5761
5763 5762 /*
5764 5763 * If there are queued cmd, start them now.
5765 5764 */
5766 5765 mutex_enter(&mpt->m_intr_mutex);
5767 5766 if (mpt->m_waitq != NULL) {
5768 5767 mutex_exit(&mpt->m_intr_mutex);
5769 5768 mutex_enter(&mpt->m_mutex);
5770 5769 mptsas_restart_hba(mpt);
5771 5770 mutex_exit(&mpt->m_mutex);
5772 5771 return (DDI_INTR_CLAIMED);
5773 5772 }
5774 5773 mutex_exit(&mpt->m_intr_mutex);
5775 5774 return (DDI_INTR_CLAIMED);
5776 5775 }
5777 5776
5778 5777 /*
5779 5778 * In ISR, the successfully completed read and write IO are processed in a
5780 5779 * fast path. This function is only used to handle non-fastpath IO, including
5781 5780 * all of the address reply, and the context reply for IOC cmd, passthrough,
5782 5781 * etc.
5783 5782 * This function is also used to process polled cmd.
5784 5783 */
5785 5784 static void
5786 5785 mptsas_process_intr(mptsas_t *mpt,
5787 5786 pMpi2ReplyDescriptorsUnion_t reply_desc_union)
5788 5787 {
5789 5788 uint8_t reply_type;
5790 5789
5791 5790 /*
5792 5791 * The reply is valid, process it according to its
5793 5792 * type. Also, set a flag for updated the reply index
5794 5793 * after they've all been processed.
5795 5794 */
5796 5795 reply_type = reply_desc_union->Default.ReplyFlags;
5797 5796 if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
5798 5797 mptsas_handle_scsi_io_success(mpt, reply_desc_union);
5799 5798 } else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
5800 5799 mptsas_handle_address_reply(mpt, reply_desc_union);
5801 5800 } else {
5802 5801 mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
5803 5802 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5804 5803 }
5805 5804 }
5806 5805
5807 5806 /*
5808 5807 * handle qfull condition
5809 5808 */
5810 5809 static void
5811 5810 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
5812 5811 {
5813 5812 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5814 5813
5815 5814 if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
5816 5815 (ptgt->m_qfull_retries == 0)) {
5817 5816 /*
5818 5817 * We have exhausted the retries on QFULL, or,
5819 5818 * the target driver has indicated that it
5820 5819 * wants to handle QFULL itself by setting
5821 5820 * qfull-retries capability to 0. In either case
5822 5821 * we want the target driver's QFULL handling
5823 5822 * to kick in. We do this by having pkt_reason
5824 5823 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
5825 5824 */
5826 5825 mutex_enter(&ptgt->m_tgt_intr_mutex);
5827 5826 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5828 5827 mutex_exit(&ptgt->m_tgt_intr_mutex);
5829 5828 } else {
5830 5829 mutex_enter(&ptgt->m_tgt_intr_mutex);
5831 5830 if (ptgt->m_reset_delay == 0) {
5832 5831 ptgt->m_t_throttle =
5833 5832 max((ptgt->m_t_ncmds - 2), 0);
5834 5833 }
5835 5834 mutex_exit(&ptgt->m_tgt_intr_mutex);
5836 5835
5837 5836 cmd->cmd_pkt_flags |= FLAG_HEAD;
5838 5837 cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
5839 5838 cmd->cmd_flags |= CFLAG_RETRY;
5840 5839
5841 5840 mutex_exit(&mpt->m_mutex);
5842 5841 (void) mptsas_accept_pkt(mpt, cmd);
5843 5842 mutex_enter(&mpt->m_mutex);
5844 5843
5845 5844 /*
5846 5845 * when target gives queue full status with no commands
5847 5846 * outstanding (m_t_ncmds == 0), throttle is set to 0
5848 5847 * (HOLD_THROTTLE), and the queue full handling start
5849 5848 * (see psarc/1994/313); if there are commands outstanding,
5850 5849 * throttle is set to (m_t_ncmds - 2)
5851 5850 */
5852 5851 mutex_enter(&ptgt->m_tgt_intr_mutex);
5853 5852 if (ptgt->m_t_throttle == HOLD_THROTTLE) {
5854 5853 /*
5855 5854 * By setting throttle to QFULL_THROTTLE, we
5856 5855 * avoid submitting new commands and in
5857 5856 * mptsas_restart_cmd find out slots which need
5858 5857 * their throttles to be cleared.
5859 5858 */
5860 5859 mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
5861 5860 if (mpt->m_restart_cmd_timeid == 0) {
5862 5861 mpt->m_restart_cmd_timeid =
5863 5862 timeout(mptsas_restart_cmd, mpt,
5864 5863 ptgt->m_qfull_retry_interval);
5865 5864 }
5866 5865 }
5867 5866 mutex_exit(&ptgt->m_tgt_intr_mutex);
5868 5867 }
5869 5868 }
5870 5869
5871 5870 mptsas_phymask_t
5872 5871 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
5873 5872 {
5874 5873 mptsas_phymask_t phy_mask = 0;
5875 5874 uint8_t i = 0;
5876 5875
5877 5876 NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
5878 5877
5879 5878 ASSERT(mutex_owned(&mpt->m_mutex));
5880 5879
5881 5880 /*
5882 5881 * If physport is 0xFF, this is a RAID volume. Use phymask of 0.
5883 5882 */
5884 5883 if (physport == 0xFF) {
5885 5884 return (0);
5886 5885 }
5887 5886
5888 5887 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
5889 5888 if (mpt->m_phy_info[i].attached_devhdl &&
5890 5889 (mpt->m_phy_info[i].phy_mask != 0) &&
5891 5890 (mpt->m_phy_info[i].port_num == physport)) {
5892 5891 phy_mask = mpt->m_phy_info[i].phy_mask;
5893 5892 break;
5894 5893 }
5895 5894 }
5896 5895 NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
5897 5896 mpt->m_instance, physport, phy_mask));
5898 5897 return (phy_mask);
5899 5898 }
5900 5899
5901 5900 /*
5902 5901 * mpt free device handle after device gone, by use of passthrough
5903 5902 */
5904 5903 static int
5905 5904 mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl)
5906 5905 {
5907 5906 Mpi2SasIoUnitControlRequest_t req;
5908 5907 Mpi2SasIoUnitControlReply_t rep;
5909 5908 int ret;
5910 5909
5911 5910 ASSERT(mutex_owned(&mpt->m_mutex));
5912 5911
5913 5912 /*
5914 5913 * Need to compose a SAS IO Unit Control request message
5915 5914 * and call mptsas_do_passthru() function
5916 5915 */
5917 5916 bzero(&req, sizeof (req));
5918 5917 bzero(&rep, sizeof (rep));
5919 5918
5920 5919 req.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
5921 5920 req.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
5922 5921 req.DevHandle = LE_16(devhdl);
5923 5922
5924 5923 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
5925 5924 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
5926 5925 if (ret != 0) {
5927 5926 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5928 5927 "Control error %d", ret);
5929 5928 return (DDI_FAILURE);
5930 5929 }
5931 5930
5932 5931 /* do passthrough success, check the ioc status */
5933 5932 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
5934 5933 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5935 5934 "Control IOCStatus %d", LE_16(rep.IOCStatus));
5936 5935 return (DDI_FAILURE);
5937 5936 }
5938 5937
5939 5938 return (DDI_SUCCESS);
5940 5939 }
5941 5940
5942 5941 static void
5943 5942 mptsas_update_phymask(mptsas_t *mpt)
5944 5943 {
5945 5944 mptsas_phymask_t mask = 0, phy_mask;
5946 5945 char *phy_mask_name;
5947 5946 uint8_t current_port;
5948 5947 int i, j;
5949 5948
5950 5949 NDBG20(("mptsas%d update phymask ", mpt->m_instance));
5951 5950
5952 5951 ASSERT(mutex_owned(&mpt->m_mutex));
5953 5952
5954 5953 (void) mptsas_get_sas_io_unit_page(mpt);
5955 5954
5956 5955 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5957 5956
5958 5957 for (i = 0; i < mpt->m_num_phys; i++) {
5959 5958 phy_mask = 0x00;
5960 5959
5961 5960 if (mpt->m_phy_info[i].attached_devhdl == 0)
5962 5961 continue;
5963 5962
5964 5963 bzero(phy_mask_name, sizeof (phy_mask_name));
5965 5964
5966 5965 current_port = mpt->m_phy_info[i].port_num;
5967 5966
5968 5967 if ((mask & (1 << i)) != 0)
5969 5968 continue;
5970 5969
5971 5970 for (j = 0; j < mpt->m_num_phys; j++) {
5972 5971 if (mpt->m_phy_info[j].attached_devhdl &&
5973 5972 (mpt->m_phy_info[j].port_num == current_port)) {
5974 5973 phy_mask |= (1 << j);
5975 5974 }
5976 5975 }
5977 5976 mask = mask | phy_mask;
5978 5977
5979 5978 for (j = 0; j < mpt->m_num_phys; j++) {
5980 5979 if ((phy_mask >> j) & 0x01) {
5981 5980 mpt->m_phy_info[j].phy_mask = phy_mask;
5982 5981 }
5983 5982 }
5984 5983
5985 5984 (void) sprintf(phy_mask_name, "%x", phy_mask);
5986 5985
5987 5986 mutex_exit(&mpt->m_mutex);
5988 5987 /*
5989 5988 * register a iport, if the port has already been existed
5990 5989 * SCSA will do nothing and just return.
5991 5990 */
5992 5991 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
5993 5992 mutex_enter(&mpt->m_mutex);
5994 5993 }
5995 5994 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5996 5995 NDBG20(("mptsas%d update phymask return", mpt->m_instance));
5997 5996 }
5998 5997
5999 5998 /*
6000 5999 * mptsas_handle_dr is a task handler for DR, the DR action includes:
6001 6000 * 1. Directly attched Device Added/Removed.
6002 6001 * 2. Expander Device Added/Removed.
6003 6002 * 3. Indirectly Attached Device Added/Expander.
6004 6003 * 4. LUNs of a existing device status change.
6005 6004 * 5. RAID volume created/deleted.
6006 6005 * 6. Member of RAID volume is released because of RAID deletion.
6007 6006 * 7. Physical disks are removed because of RAID creation.
6008 6007 */
6009 6008 static void
6010 6009 mptsas_handle_dr(void *args) {
6011 6010 mptsas_topo_change_list_t *topo_node = NULL;
6012 6011 mptsas_topo_change_list_t *save_node = NULL;
6013 6012 mptsas_t *mpt;
6014 6013 dev_info_t *parent = NULL;
6015 6014 mptsas_phymask_t phymask = 0;
6016 6015 char *phy_mask_name;
6017 6016 uint8_t flags = 0, physport = 0xff;
6018 6017 uint8_t port_update = 0;
6019 6018 uint_t event;
6020 6019
6021 6020 topo_node = (mptsas_topo_change_list_t *)args;
6022 6021
6023 6022 mpt = topo_node->mpt;
6024 6023 event = topo_node->event;
6025 6024 flags = topo_node->flags;
6026 6025
6027 6026 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6028 6027
6029 6028 NDBG20(("mptsas%d handle_dr enter", mpt->m_instance));
6030 6029
6031 6030 switch (event) {
6032 6031 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6033 6032 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6034 6033 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE) ||
6035 6034 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6036 6035 /*
6037 6036 * Direct attached or expander attached device added
6038 6037 * into system or a Phys Disk that is being unhidden.
6039 6038 */
6040 6039 port_update = 1;
6041 6040 }
6042 6041 break;
6043 6042 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6044 6043 /*
6045 6044 * New expander added into system, it must be the head
6046 6045 * of topo_change_list_t
6047 6046 */
6048 6047 port_update = 1;
6049 6048 break;
6050 6049 default:
6051 6050 port_update = 0;
6052 6051 break;
6053 6052 }
6054 6053 /*
6055 6054 * All cases port_update == 1 may cause initiator port form change
6056 6055 */
6057 6056 mutex_enter(&mpt->m_mutex);
6058 6057 if (mpt->m_port_chng && port_update) {
6059 6058 /*
6060 6059 * mpt->m_port_chng flag indicates some PHYs of initiator
6061 6060 * port have changed to online. So when expander added or
6062 6061 * directly attached device online event come, we force to
6063 6062 * update port information by issueing SAS IO Unit Page and
6064 6063 * update PHYMASKs.
6065 6064 */
6066 6065 (void) mptsas_update_phymask(mpt);
6067 6066 mpt->m_port_chng = 0;
6068 6067
6069 6068 }
6070 6069 mutex_exit(&mpt->m_mutex);
6071 6070 while (topo_node) {
6072 6071 phymask = 0;
6073 6072 if (parent == NULL) {
6074 6073 physport = topo_node->un.physport;
6075 6074 event = topo_node->event;
6076 6075 flags = topo_node->flags;
6077 6076 if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET |
6078 6077 MPTSAS_DR_EVENT_OFFLINE_SMP)) {
6079 6078 /*
6080 6079 * For all offline events, phymask is known
6081 6080 */
6082 6081 phymask = topo_node->un.phymask;
6083 6082 goto find_parent;
6084 6083 }
6085 6084 if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6086 6085 goto handle_topo_change;
6087 6086 }
6088 6087 if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) {
6089 6088 phymask = topo_node->un.phymask;
6090 6089 goto find_parent;
6091 6090 }
6092 6091
6093 6092 if ((flags ==
6094 6093 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) &&
6095 6094 (event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) {
6096 6095 /*
6097 6096 * There is no any field in IR_CONFIG_CHANGE
6098 6097 * event indicate physport/phynum, let's get
6099 6098 * parent after SAS Device Page0 request.
6100 6099 */
6101 6100 goto handle_topo_change;
6102 6101 }
6103 6102
6104 6103 mutex_enter(&mpt->m_mutex);
6105 6104 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6106 6105 /*
6107 6106 * If the direct attached device added or a
6108 6107 * phys disk is being unhidden, argument
6109 6108 * physport actually is PHY#, so we have to get
6110 6109 * phymask according PHY#.
6111 6110 */
6112 6111 physport = mpt->m_phy_info[physport].port_num;
6113 6112 }
6114 6113
6115 6114 /*
6116 6115 * Translate physport to phymask so that we can search
6117 6116 * parent dip.
6118 6117 */
6119 6118 phymask = mptsas_physport_to_phymask(mpt,
6120 6119 physport);
6121 6120 mutex_exit(&mpt->m_mutex);
6122 6121
6123 6122 find_parent:
6124 6123 bzero(phy_mask_name, MPTSAS_MAX_PHYS);
6125 6124 /*
6126 6125 * For RAID topology change node, write the iport name
6127 6126 * as v0.
6128 6127 */
6129 6128 if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6130 6129 (void) sprintf(phy_mask_name, "v0");
6131 6130 } else {
6132 6131 /*
6133 6132 * phymask can bo 0 if the drive has been
6134 6133 * pulled by the time an add event is
6135 6134 * processed. If phymask is 0, just skip this
6136 6135 * event and continue.
6137 6136 */
6138 6137 if (phymask == 0) {
6139 6138 mutex_enter(&mpt->m_mutex);
6140 6139 save_node = topo_node;
6141 6140 topo_node = topo_node->next;
6142 6141 ASSERT(save_node);
6143 6142 kmem_free(save_node,
6144 6143 sizeof (mptsas_topo_change_list_t));
6145 6144 mutex_exit(&mpt->m_mutex);
6146 6145
6147 6146 parent = NULL;
6148 6147 continue;
6149 6148 }
6150 6149 (void) sprintf(phy_mask_name, "%x", phymask);
6151 6150 }
6152 6151 parent = scsi_hba_iport_find(mpt->m_dip,
6153 6152 phy_mask_name);
6154 6153 if (parent == NULL) {
6155 6154 mptsas_log(mpt, CE_WARN, "Failed to find an "
6156 6155 "iport, should not happen!");
6157 6156 goto out;
6158 6157 }
6159 6158
6160 6159 }
6161 6160 ASSERT(parent);
6162 6161 handle_topo_change:
6163 6162
6164 6163 mutex_enter(&mpt->m_mutex);
6165 6164
6166 6165 mptsas_handle_topo_change(topo_node, parent);
6167 6166 save_node = topo_node;
6168 6167 topo_node = topo_node->next;
6169 6168 ASSERT(save_node);
6170 6169 kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
6171 6170 mutex_exit(&mpt->m_mutex);
6172 6171
6173 6172 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6174 6173 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
6175 6174 (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
6176 6175 /*
6177 6176 * If direct attached device associated, make sure
6178 6177 * reset the parent before start the next one. But
6179 6178 * all devices associated with expander shares the
6180 6179 * parent. Also, reset parent if this is for RAID.
6181 6180 */
6182 6181 parent = NULL;
6183 6182 }
6184 6183 }
6185 6184 out:
6186 6185 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6187 6186 }
6188 6187
6189 6188 static void
6190 6189 mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
6191 6190 dev_info_t *parent)
6192 6191 {
6193 6192 mptsas_target_t *ptgt = NULL;
6194 6193 mptsas_smp_t *psmp = NULL;
6195 6194 mptsas_t *mpt = (void *)topo_node->mpt;
6196 6195 uint16_t devhdl;
6197 6196 uint16_t attached_devhdl;
6198 6197 uint64_t sas_wwn = 0;
6199 6198 int rval = 0;
6200 6199 uint32_t page_address;
6201 6200 uint8_t phy, flags;
6202 6201 char *addr = NULL;
6203 6202 dev_info_t *lundip;
6204 6203 int circ = 0, circ1 = 0;
6205 6204 char attached_wwnstr[MPTSAS_WWN_STRLEN];
6206 6205
6207 6206 NDBG20(("mptsas%d handle_topo_change enter", mpt->m_instance));
6208 6207
6209 6208 ASSERT(mutex_owned(&mpt->m_mutex));
6210 6209
6211 6210 switch (topo_node->event) {
6212 6211 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6213 6212 {
6214 6213 char *phy_mask_name;
6215 6214 mptsas_phymask_t phymask = 0;
6216 6215
6217 6216 if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6218 6217 /*
6219 6218 * Get latest RAID info.
6220 6219 */
6221 6220 (void) mptsas_get_raid_info(mpt);
6222 6221 ptgt = mptsas_search_by_devhdl(
6223 6222 &mpt->m_active->m_tgttbl, topo_node->devhdl);
6224 6223 if (ptgt == NULL)
6225 6224 break;
6226 6225 } else {
6227 6226 ptgt = (void *)topo_node->object;
6228 6227 }
6229 6228
6230 6229 if (ptgt == NULL) {
6231 6230 /*
6232 6231 * If a Phys Disk was deleted, RAID info needs to be
6233 6232 * updated to reflect the new topology.
6234 6233 */
6235 6234 (void) mptsas_get_raid_info(mpt);
6236 6235
6237 6236 /*
6238 6237 * Get sas device page 0 by DevHandle to make sure if
6239 6238 * SSP/SATA end device exist.
6240 6239 */
6241 6240 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
6242 6241 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
6243 6242 topo_node->devhdl;
6244 6243
6245 6244 rval = mptsas_get_target_device_info(mpt, page_address,
6246 6245 &devhdl, &ptgt);
6247 6246 if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
6248 6247 mptsas_log(mpt, CE_NOTE,
6249 6248 "mptsas_handle_topo_change: target %d is "
6250 6249 "not a SAS/SATA device. \n",
6251 6250 topo_node->devhdl);
6252 6251 } else if (rval == DEV_INFO_FAIL_ALLOC) {
6253 6252 mptsas_log(mpt, CE_NOTE,
6254 6253 "mptsas_handle_topo_change: could not "
6255 6254 "allocate memory. \n");
6256 6255 }
6257 6256 /*
6258 6257 * If rval is DEV_INFO_PHYS_DISK than there is nothing
6259 6258 * else to do, just leave.
6260 6259 */
6261 6260 if (rval != DEV_INFO_SUCCESS) {
6262 6261 return;
6263 6262 }
6264 6263 }
6265 6264
6266 6265 ASSERT(ptgt->m_devhdl == topo_node->devhdl);
6267 6266
6268 6267 mutex_exit(&mpt->m_mutex);
6269 6268 flags = topo_node->flags;
6270 6269
6271 6270 if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
6272 6271 phymask = ptgt->m_phymask;
6273 6272 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6274 6273 (void) sprintf(phy_mask_name, "%x", phymask);
6275 6274 parent = scsi_hba_iport_find(mpt->m_dip,
6276 6275 phy_mask_name);
6277 6276 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6278 6277 if (parent == NULL) {
6279 6278 mptsas_log(mpt, CE_WARN, "Failed to find a "
6280 6279 "iport for PD, should not happen!");
6281 6280 mutex_enter(&mpt->m_mutex);
6282 6281 break;
6283 6282 }
6284 6283 }
6285 6284
6286 6285 if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6287 6286 ndi_devi_enter(parent, &circ1);
6288 6287 (void) mptsas_config_raid(parent, topo_node->devhdl,
6289 6288 &lundip);
6290 6289 ndi_devi_exit(parent, circ1);
6291 6290 } else {
6292 6291 /*
6293 6292 * hold nexus for bus configure
6294 6293 */
6295 6294 ndi_devi_enter(scsi_vhci_dip, &circ);
6296 6295 ndi_devi_enter(parent, &circ1);
6297 6296 rval = mptsas_config_target(parent, ptgt);
6298 6297 /*
6299 6298 * release nexus for bus configure
6300 6299 */
6301 6300 ndi_devi_exit(parent, circ1);
6302 6301 ndi_devi_exit(scsi_vhci_dip, circ);
6303 6302
6304 6303 /*
6305 6304 * Add parent's props for SMHBA support
6306 6305 */
6307 6306 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6308 6307 bzero(attached_wwnstr,
6309 6308 sizeof (attached_wwnstr));
6310 6309 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
6311 6310 ptgt->m_sas_wwn);
6312 6311 if (ddi_prop_update_string(DDI_DEV_T_NONE,
6313 6312 parent,
6314 6313 SCSI_ADDR_PROP_ATTACHED_PORT,
6315 6314 attached_wwnstr)
6316 6315 != DDI_PROP_SUCCESS) {
6317 6316 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6318 6317 parent,
6319 6318 SCSI_ADDR_PROP_ATTACHED_PORT);
6320 6319 mptsas_log(mpt, CE_WARN, "Failed to"
6321 6320 "attached-port props");
6322 6321 return;
6323 6322 }
6324 6323 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6325 6324 MPTSAS_NUM_PHYS, 1) !=
6326 6325 DDI_PROP_SUCCESS) {
6327 6326 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6328 6327 parent, MPTSAS_NUM_PHYS);
6329 6328 mptsas_log(mpt, CE_WARN, "Failed to"
6330 6329 " create num-phys props");
6331 6330 return;
6332 6331 }
6333 6332
6334 6333 /*
6335 6334 * Update PHY info for smhba
6336 6335 */
6337 6336 mutex_enter(&mpt->m_mutex);
6338 6337 if (mptsas_smhba_phy_init(mpt)) {
6339 6338 mutex_exit(&mpt->m_mutex);
6340 6339 mptsas_log(mpt, CE_WARN, "mptsas phy"
6341 6340 " update failed");
6342 6341 return;
6343 6342 }
6344 6343 mutex_exit(&mpt->m_mutex);
6345 6344 mptsas_smhba_set_phy_props(mpt,
6346 6345 ddi_get_name_addr(parent), parent,
6347 6346 1, &attached_devhdl);
6348 6347 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6349 6348 MPTSAS_VIRTUAL_PORT, 0) !=
6350 6349 DDI_PROP_SUCCESS) {
6351 6350 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6352 6351 parent, MPTSAS_VIRTUAL_PORT);
6353 6352 mptsas_log(mpt, CE_WARN,
6354 6353 "mptsas virtual-port"
6355 6354 "port prop update failed");
6356 6355 return;
6357 6356 }
6358 6357 }
6359 6358 }
6360 6359 mutex_enter(&mpt->m_mutex);
6361 6360
6362 6361 NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
6363 6362 "phymask:%x.", mpt->m_instance, ptgt->m_devhdl,
6364 6363 ptgt->m_phymask));
6365 6364 break;
6366 6365 }
6367 6366 case MPTSAS_DR_EVENT_OFFLINE_TARGET:
6368 6367 {
6369 6368 mptsas_hash_table_t *tgttbl = &mpt->m_active->m_tgttbl;
6370 6369 devhdl = topo_node->devhdl;
6371 6370 ptgt = mptsas_search_by_devhdl(tgttbl, devhdl);
6372 6371 if (ptgt == NULL)
6373 6372 break;
6374 6373
6375 6374 sas_wwn = ptgt->m_sas_wwn;
6376 6375 phy = ptgt->m_phynum;
6377 6376
6378 6377 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
6379 6378
6380 6379 if (sas_wwn) {
6381 6380 (void) sprintf(addr, "w%016"PRIx64, sas_wwn);
6382 6381 } else {
6383 6382 (void) sprintf(addr, "p%x", phy);
6384 6383 }
6385 6384 ASSERT(ptgt->m_devhdl == devhdl);
6386 6385
6387 6386 if ((topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) ||
6388 6387 (topo_node->flags ==
6389 6388 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6390 6389 /*
6391 6390 * Get latest RAID info if RAID volume status changes
6392 6391 * or Phys Disk status changes
6393 6392 */
6394 6393 (void) mptsas_get_raid_info(mpt);
6395 6394 }
6396 6395 /*
6397 6396 * Abort all outstanding command on the device
6398 6397 */
6399 6398 rval = mptsas_do_scsi_reset(mpt, devhdl);
6400 6399 if (rval) {
6401 6400 NDBG20(("mptsas%d handle_topo_change to reset target "
6402 6401 "before offline devhdl:%x, phymask:%x, rval:%x",
6403 6402 mpt->m_instance, ptgt->m_devhdl, ptgt->m_phymask,
6404 6403 rval));
6405 6404 }
6406 6405
6407 6406 mutex_exit(&mpt->m_mutex);
6408 6407
6409 6408 ndi_devi_enter(scsi_vhci_dip, &circ);
6410 6409 ndi_devi_enter(parent, &circ1);
6411 6410 rval = mptsas_offline_target(parent, addr);
6412 6411 ndi_devi_exit(parent, circ1);
6413 6412 ndi_devi_exit(scsi_vhci_dip, circ);
6414 6413 NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
6415 6414 "phymask:%x, rval:%x", mpt->m_instance,
6416 6415 ptgt->m_devhdl, ptgt->m_phymask, rval));
6417 6416
6418 6417 kmem_free(addr, SCSI_MAXNAMELEN);
6419 6418
6420 6419 /*
6421 6420 * Clear parent's props for SMHBA support
6422 6421 */
6423 6422 flags = topo_node->flags;
6424 6423 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6425 6424 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6426 6425 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6427 6426 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6428 6427 DDI_PROP_SUCCESS) {
6429 6428 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6430 6429 SCSI_ADDR_PROP_ATTACHED_PORT);
6431 6430 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6432 6431 "prop update failed");
6433 6432 break;
6434 6433 }
6435 6434 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6436 6435 MPTSAS_NUM_PHYS, 0) !=
6437 6436 DDI_PROP_SUCCESS) {
6438 6437 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6439 6438 MPTSAS_NUM_PHYS);
6440 6439 mptsas_log(mpt, CE_WARN, "mptsas num phys "
6441 6440 "prop update failed");
6442 6441 break;
6443 6442 }
6444 6443 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6445 6444 MPTSAS_VIRTUAL_PORT, 1) !=
↓ open down ↓ |
6060 lines elided |
↑ open up ↑ |
6446 6445 DDI_PROP_SUCCESS) {
6447 6446 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6448 6447 MPTSAS_VIRTUAL_PORT);
6449 6448 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6450 6449 "prop update failed");
6451 6450 break;
6452 6451 }
6453 6452 }
6454 6453
6455 6454 mutex_enter(&mpt->m_mutex);
6456 - if (mptsas_set_led_status(mpt, ptgt, 0) != DDI_SUCCESS) {
6455 + ptgt->m_led_status = 0;
6456 + if (mptsas_flush_led_status(mpt, ptgt) != DDI_SUCCESS) {
6457 6457 NDBG14(("mptsas: clear LED for tgt %x failed",
6458 6458 ptgt->m_slot_num));
6459 6459 }
6460 6460 if (rval == DDI_SUCCESS) {
6461 6461 mptsas_tgt_free(&mpt->m_active->m_tgttbl,
6462 6462 ptgt->m_sas_wwn, ptgt->m_phymask);
6463 6463 ptgt = NULL;
6464 6464 } else {
6465 6465 /*
6466 6466 * clean DR_INTRANSITION flag to allow I/O down to
6467 6467 * PHCI driver since failover finished.
6468 6468 * Invalidate the devhdl
6469 6469 */
6470 6470 mutex_enter(&ptgt->m_tgt_intr_mutex);
6471 6471 ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6472 6472 ptgt->m_tgt_unconfigured = 0;
6473 6473 ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6474 6474 mutex_exit(&ptgt->m_tgt_intr_mutex);
6475 6475 }
6476 6476
6477 6477 /*
6478 6478 * Send SAS IO Unit Control to free the dev handle
6479 6479 */
6480 6480 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6481 6481 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
6482 6482 rval = mptsas_free_devhdl(mpt, devhdl);
6483 6483
6484 6484 NDBG20(("mptsas%d handle_topo_change to remove "
6485 6485 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6486 6486 rval));
6487 6487 }
6488 6488
6489 6489 break;
6490 6490 }
6491 6491 case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
6492 6492 {
6493 6493 devhdl = topo_node->devhdl;
6494 6494 /*
6495 6495 * If this is the remove handle event, do a reset first.
6496 6496 */
6497 6497 if (topo_node->event == MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6498 6498 rval = mptsas_do_scsi_reset(mpt, devhdl);
6499 6499 if (rval) {
6500 6500 NDBG20(("mpt%d reset target before remove "
6501 6501 "devhdl:%x, rval:%x", mpt->m_instance,
6502 6502 devhdl, rval));
6503 6503 }
6504 6504 }
6505 6505
6506 6506 /*
6507 6507 * Send SAS IO Unit Control to free the dev handle
6508 6508 */
6509 6509 rval = mptsas_free_devhdl(mpt, devhdl);
6510 6510 NDBG20(("mptsas%d handle_topo_change to remove "
6511 6511 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6512 6512 rval));
6513 6513 break;
6514 6514 }
6515 6515 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6516 6516 {
6517 6517 mptsas_smp_t smp;
6518 6518 dev_info_t *smpdip;
6519 6519 mptsas_hash_table_t *smptbl = &mpt->m_active->m_smptbl;
6520 6520
6521 6521 devhdl = topo_node->devhdl;
6522 6522
6523 6523 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
6524 6524 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)devhdl;
6525 6525 rval = mptsas_get_sas_expander_page0(mpt, page_address, &smp);
6526 6526 if (rval != DDI_SUCCESS) {
6527 6527 mptsas_log(mpt, CE_WARN, "failed to online smp, "
6528 6528 "handle %x", devhdl);
6529 6529 return;
6530 6530 }
6531 6531
6532 6532 psmp = mptsas_smp_alloc(smptbl, &smp);
6533 6533 if (psmp == NULL) {
6534 6534 return;
6535 6535 }
6536 6536
6537 6537 mutex_exit(&mpt->m_mutex);
6538 6538 ndi_devi_enter(parent, &circ1);
6539 6539 (void) mptsas_online_smp(parent, psmp, &smpdip);
6540 6540 ndi_devi_exit(parent, circ1);
6541 6541
6542 6542 mutex_enter(&mpt->m_mutex);
6543 6543 break;
6544 6544 }
6545 6545 case MPTSAS_DR_EVENT_OFFLINE_SMP:
6546 6546 {
6547 6547 mptsas_hash_table_t *smptbl = &mpt->m_active->m_smptbl;
6548 6548 devhdl = topo_node->devhdl;
6549 6549 uint32_t dev_info;
6550 6550
6551 6551 psmp = mptsas_search_by_devhdl(smptbl, devhdl);
6552 6552 if (psmp == NULL)
6553 6553 break;
6554 6554 /*
6555 6555 * The mptsas_smp_t data is released only if the dip is offlined
6556 6556 * successfully.
6557 6557 */
6558 6558 mutex_exit(&mpt->m_mutex);
6559 6559
6560 6560 ndi_devi_enter(parent, &circ1);
6561 6561 rval = mptsas_offline_smp(parent, psmp, NDI_DEVI_REMOVE);
6562 6562 ndi_devi_exit(parent, circ1);
6563 6563
6564 6564 dev_info = psmp->m_deviceinfo;
6565 6565 if ((dev_info & DEVINFO_DIRECT_ATTACHED) ==
6566 6566 DEVINFO_DIRECT_ATTACHED) {
6567 6567 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6568 6568 MPTSAS_VIRTUAL_PORT, 1) !=
6569 6569 DDI_PROP_SUCCESS) {
6570 6570 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6571 6571 MPTSAS_VIRTUAL_PORT);
6572 6572 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6573 6573 "prop update failed");
6574 6574 return;
6575 6575 }
6576 6576 /*
6577 6577 * Check whether the smp connected to the iport,
6578 6578 */
6579 6579 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6580 6580 MPTSAS_NUM_PHYS, 0) !=
6581 6581 DDI_PROP_SUCCESS) {
6582 6582 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6583 6583 MPTSAS_NUM_PHYS);
6584 6584 mptsas_log(mpt, CE_WARN, "mptsas num phys"
6585 6585 "prop update failed");
6586 6586 return;
6587 6587 }
6588 6588 /*
6589 6589 * Clear parent's attached-port props
6590 6590 */
6591 6591 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6592 6592 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6593 6593 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6594 6594 DDI_PROP_SUCCESS) {
6595 6595 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6596 6596 SCSI_ADDR_PROP_ATTACHED_PORT);
6597 6597 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6598 6598 "prop update failed");
6599 6599 return;
6600 6600 }
6601 6601 }
6602 6602
6603 6603 mutex_enter(&mpt->m_mutex);
6604 6604 NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
6605 6605 "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
6606 6606 if (rval == DDI_SUCCESS) {
6607 6607 mptsas_smp_free(smptbl, psmp->m_sasaddr,
6608 6608 psmp->m_phymask);
6609 6609 } else {
6610 6610 psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
6611 6611 }
6612 6612
6613 6613 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6614 6614
6615 6615 break;
6616 6616 }
6617 6617 default:
6618 6618 return;
6619 6619 }
6620 6620 }
6621 6621
6622 6622 /*
6623 6623 * Record the event if its type is enabled in mpt instance by ioctl.
6624 6624 */
6625 6625 static void
6626 6626 mptsas_record_event(void *args)
6627 6627 {
6628 6628 m_replyh_arg_t *replyh_arg;
6629 6629 pMpi2EventNotificationReply_t eventreply;
6630 6630 uint32_t event, rfm;
6631 6631 mptsas_t *mpt;
6632 6632 int i, j;
6633 6633 uint16_t event_data_len;
6634 6634 boolean_t sendAEN = FALSE;
6635 6635
6636 6636 replyh_arg = (m_replyh_arg_t *)args;
6637 6637 rfm = replyh_arg->rfm;
6638 6638 mpt = replyh_arg->mpt;
6639 6639
6640 6640 eventreply = (pMpi2EventNotificationReply_t)
6641 6641 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6642 6642 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6643 6643
6644 6644
6645 6645 /*
6646 6646 * Generate a system event to let anyone who cares know that a
6647 6647 * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the
6648 6648 * event mask is set to.
6649 6649 */
6650 6650 if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
6651 6651 sendAEN = TRUE;
6652 6652 }
6653 6653
6654 6654 /*
6655 6655 * Record the event only if it is not masked. Determine which dword
6656 6656 * and bit of event mask to test.
6657 6657 */
6658 6658 i = (uint8_t)(event / 32);
6659 6659 j = (uint8_t)(event % 32);
6660 6660 if ((i < 4) && ((1 << j) & mpt->m_event_mask[i])) {
6661 6661 i = mpt->m_event_index;
6662 6662 mpt->m_events[i].Type = event;
6663 6663 mpt->m_events[i].Number = ++mpt->m_event_number;
6664 6664 bzero(mpt->m_events[i].Data, MPTSAS_MAX_EVENT_DATA_LENGTH * 4);
6665 6665 event_data_len = ddi_get16(mpt->m_acc_reply_frame_hdl,
6666 6666 &eventreply->EventDataLength);
6667 6667
6668 6668 if (event_data_len > 0) {
6669 6669 /*
6670 6670 * Limit data to size in m_event entry
6671 6671 */
6672 6672 if (event_data_len > MPTSAS_MAX_EVENT_DATA_LENGTH) {
6673 6673 event_data_len = MPTSAS_MAX_EVENT_DATA_LENGTH;
6674 6674 }
6675 6675 for (j = 0; j < event_data_len; j++) {
6676 6676 mpt->m_events[i].Data[j] =
6677 6677 ddi_get32(mpt->m_acc_reply_frame_hdl,
6678 6678 &(eventreply->EventData[j]));
6679 6679 }
6680 6680
6681 6681 /*
6682 6682 * check for index wrap-around
6683 6683 */
6684 6684 if (++i == MPTSAS_EVENT_QUEUE_SIZE) {
6685 6685 i = 0;
6686 6686 }
6687 6687 mpt->m_event_index = (uint8_t)i;
6688 6688
6689 6689 /*
6690 6690 * Set flag to send the event.
6691 6691 */
6692 6692 sendAEN = TRUE;
6693 6693 }
6694 6694 }
6695 6695
6696 6696 /*
6697 6697 * Generate a system event if flag is set to let anyone who cares know
6698 6698 * that an event has occurred.
6699 6699 */
6700 6700 if (sendAEN) {
6701 6701 (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
6702 6702 "SAS", NULL, NULL, DDI_NOSLEEP);
6703 6703 }
6704 6704 }
6705 6705
6706 6706 #define SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS
6707 6707 /*
6708 6708 * handle sync events from ioc in interrupt
6709 6709 * return value:
6710 6710 * DDI_SUCCESS: The event is handled by this func
6711 6711 * DDI_FAILURE: Event is not handled
6712 6712 */
6713 6713 static int
6714 6714 mptsas_handle_event_sync(void *args)
6715 6715 {
6716 6716 m_replyh_arg_t *replyh_arg;
6717 6717 pMpi2EventNotificationReply_t eventreply;
6718 6718 uint32_t event, rfm;
6719 6719 mptsas_t *mpt;
6720 6720 uint_t iocstatus;
6721 6721
6722 6722 replyh_arg = (m_replyh_arg_t *)args;
6723 6723 rfm = replyh_arg->rfm;
6724 6724 mpt = replyh_arg->mpt;
6725 6725
6726 6726 ASSERT(mutex_owned(&mpt->m_mutex));
6727 6727
6728 6728 eventreply = (pMpi2EventNotificationReply_t)
6729 6729 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6730 6730 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6731 6731
6732 6732 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
6733 6733 &eventreply->IOCStatus)) {
6734 6734 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
6735 6735 mptsas_log(mpt, CE_WARN,
6736 6736 "!mptsas_handle_event_sync: IOCStatus=0x%x, "
6737 6737 "IOCLogInfo=0x%x", iocstatus,
6738 6738 ddi_get32(mpt->m_acc_reply_frame_hdl,
6739 6739 &eventreply->IOCLogInfo));
6740 6740 } else {
6741 6741 mptsas_log(mpt, CE_WARN,
6742 6742 "mptsas_handle_event_sync: IOCStatus=0x%x, "
6743 6743 "IOCLogInfo=0x%x", iocstatus,
6744 6744 ddi_get32(mpt->m_acc_reply_frame_hdl,
6745 6745 &eventreply->IOCLogInfo));
6746 6746 }
6747 6747 }
6748 6748
6749 6749 /*
6750 6750 * figure out what kind of event we got and handle accordingly
6751 6751 */
6752 6752 switch (event) {
6753 6753 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
6754 6754 {
6755 6755 pMpi2EventDataSasTopologyChangeList_t sas_topo_change_list;
6756 6756 uint8_t num_entries, expstatus, phy;
6757 6757 uint8_t phystatus, physport, state, i;
6758 6758 uint8_t start_phy_num, link_rate;
6759 6759 uint16_t dev_handle, reason_code;
6760 6760 uint16_t enc_handle, expd_handle;
6761 6761 char string[80], curr[80], prev[80];
6762 6762 mptsas_topo_change_list_t *topo_head = NULL;
6763 6763 mptsas_topo_change_list_t *topo_tail = NULL;
6764 6764 mptsas_topo_change_list_t *topo_node = NULL;
6765 6765 mptsas_target_t *ptgt;
6766 6766 mptsas_smp_t *psmp;
6767 6767 mptsas_hash_table_t *tgttbl, *smptbl;
6768 6768 uint8_t flags = 0, exp_flag;
6769 6769 smhba_info_t *pSmhba = NULL;
6770 6770
6771 6771 NDBG20(("mptsas_handle_event_sync: SAS topology change"));
6772 6772
6773 6773 tgttbl = &mpt->m_active->m_tgttbl;
6774 6774 smptbl = &mpt->m_active->m_smptbl;
6775 6775
6776 6776 sas_topo_change_list = (pMpi2EventDataSasTopologyChangeList_t)
6777 6777 eventreply->EventData;
6778 6778
6779 6779 enc_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6780 6780 &sas_topo_change_list->EnclosureHandle);
6781 6781 expd_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6782 6782 &sas_topo_change_list->ExpanderDevHandle);
6783 6783 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6784 6784 &sas_topo_change_list->NumEntries);
6785 6785 start_phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
6786 6786 &sas_topo_change_list->StartPhyNum);
6787 6787 expstatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6788 6788 &sas_topo_change_list->ExpStatus);
6789 6789 physport = ddi_get8(mpt->m_acc_reply_frame_hdl,
6790 6790 &sas_topo_change_list->PhysicalPort);
6791 6791
6792 6792 string[0] = 0;
6793 6793 if (expd_handle) {
6794 6794 flags = MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED;
6795 6795 switch (expstatus) {
6796 6796 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6797 6797 (void) sprintf(string, " added");
6798 6798 /*
6799 6799 * New expander device added
6800 6800 */
6801 6801 mpt->m_port_chng = 1;
6802 6802 topo_node = kmem_zalloc(
6803 6803 sizeof (mptsas_topo_change_list_t),
6804 6804 KM_SLEEP);
6805 6805 topo_node->mpt = mpt;
6806 6806 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_SMP;
6807 6807 topo_node->un.physport = physport;
6808 6808 topo_node->devhdl = expd_handle;
6809 6809 topo_node->flags = flags;
6810 6810 topo_node->object = NULL;
6811 6811 if (topo_head == NULL) {
6812 6812 topo_head = topo_tail = topo_node;
6813 6813 } else {
6814 6814 topo_tail->next = topo_node;
6815 6815 topo_tail = topo_node;
6816 6816 }
6817 6817 break;
6818 6818 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6819 6819 (void) sprintf(string, " not responding, "
6820 6820 "removed");
6821 6821 psmp = mptsas_search_by_devhdl(smptbl,
6822 6822 expd_handle);
6823 6823 if (psmp == NULL)
6824 6824 break;
6825 6825
6826 6826 topo_node = kmem_zalloc(
6827 6827 sizeof (mptsas_topo_change_list_t),
6828 6828 KM_SLEEP);
6829 6829 topo_node->mpt = mpt;
6830 6830 topo_node->un.phymask = psmp->m_phymask;
6831 6831 topo_node->event = MPTSAS_DR_EVENT_OFFLINE_SMP;
6832 6832 topo_node->devhdl = expd_handle;
6833 6833 topo_node->flags = flags;
6834 6834 topo_node->object = NULL;
6835 6835 if (topo_head == NULL) {
6836 6836 topo_head = topo_tail = topo_node;
6837 6837 } else {
6838 6838 topo_tail->next = topo_node;
6839 6839 topo_tail = topo_node;
6840 6840 }
6841 6841 break;
6842 6842 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6843 6843 break;
6844 6844 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6845 6845 (void) sprintf(string, " not responding, "
6846 6846 "delaying removal");
6847 6847 break;
6848 6848 default:
6849 6849 break;
6850 6850 }
6851 6851 } else {
6852 6852 flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
6853 6853 }
6854 6854
6855 6855 NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
6856 6856 enc_handle, expd_handle, string));
6857 6857 for (i = 0; i < num_entries; i++) {
6858 6858 phy = i + start_phy_num;
6859 6859 phystatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6860 6860 &sas_topo_change_list->PHY[i].PhyStatus);
6861 6861 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6862 6862 &sas_topo_change_list->PHY[i].AttachedDevHandle);
6863 6863 reason_code = phystatus & MPI2_EVENT_SAS_TOPO_RC_MASK;
6864 6864 /*
6865 6865 * Filter out processing of Phy Vacant Status unless
6866 6866 * the reason code is "Not Responding". Process all
6867 6867 * other combinations of Phy Status and Reason Codes.
6868 6868 */
6869 6869 if ((phystatus &
6870 6870 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) &&
6871 6871 (reason_code !=
6872 6872 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
6873 6873 continue;
6874 6874 }
6875 6875 curr[0] = 0;
6876 6876 prev[0] = 0;
6877 6877 string[0] = 0;
6878 6878 switch (reason_code) {
6879 6879 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6880 6880 {
6881 6881 NDBG20(("mptsas%d phy %d physical_port %d "
6882 6882 "dev_handle %d added", mpt->m_instance, phy,
6883 6883 physport, dev_handle));
6884 6884 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6885 6885 &sas_topo_change_list->PHY[i].LinkRate);
6886 6886 state = (link_rate &
6887 6887 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6888 6888 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6889 6889 switch (state) {
6890 6890 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6891 6891 (void) sprintf(curr, "is disabled");
6892 6892 break;
6893 6893 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6894 6894 (void) sprintf(curr, "is offline, "
6895 6895 "failed speed negotiation");
6896 6896 break;
6897 6897 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6898 6898 (void) sprintf(curr, "SATA OOB "
6899 6899 "complete");
6900 6900 break;
6901 6901 case SMP_RESET_IN_PROGRESS:
6902 6902 (void) sprintf(curr, "SMP reset in "
6903 6903 "progress");
6904 6904 break;
6905 6905 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6906 6906 (void) sprintf(curr, "is online at "
6907 6907 "1.5 Gbps");
6908 6908 break;
6909 6909 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6910 6910 (void) sprintf(curr, "is online at 3.0 "
6911 6911 "Gbps");
6912 6912 break;
6913 6913 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6914 6914 (void) sprintf(curr, "is online at 6.0 "
6915 6915 "Gbps");
6916 6916 break;
6917 6917 default:
6918 6918 (void) sprintf(curr, "state is "
6919 6919 "unknown");
6920 6920 break;
6921 6921 }
6922 6922 /*
6923 6923 * New target device added into the system.
6924 6924 * Set association flag according to if an
6925 6925 * expander is used or not.
6926 6926 */
6927 6927 exp_flag =
6928 6928 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6929 6929 if (flags ==
6930 6930 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6931 6931 flags = exp_flag;
6932 6932 }
6933 6933 topo_node = kmem_zalloc(
6934 6934 sizeof (mptsas_topo_change_list_t),
6935 6935 KM_SLEEP);
6936 6936 topo_node->mpt = mpt;
6937 6937 topo_node->event =
6938 6938 MPTSAS_DR_EVENT_RECONFIG_TARGET;
6939 6939 if (expd_handle == 0) {
6940 6940 /*
6941 6941 * Per MPI 2, if expander dev handle
6942 6942 * is 0, it's a directly attached
6943 6943 * device. So driver use PHY to decide
6944 6944 * which iport is associated
6945 6945 */
6946 6946 physport = phy;
6947 6947 mpt->m_port_chng = 1;
6948 6948 }
6949 6949 topo_node->un.physport = physport;
6950 6950 topo_node->devhdl = dev_handle;
6951 6951 topo_node->flags = flags;
6952 6952 topo_node->object = NULL;
6953 6953 if (topo_head == NULL) {
6954 6954 topo_head = topo_tail = topo_node;
6955 6955 } else {
6956 6956 topo_tail->next = topo_node;
6957 6957 topo_tail = topo_node;
6958 6958 }
6959 6959 break;
6960 6960 }
6961 6961 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6962 6962 {
6963 6963 NDBG20(("mptsas%d phy %d physical_port %d "
6964 6964 "dev_handle %d removed", mpt->m_instance,
6965 6965 phy, physport, dev_handle));
6966 6966 /*
6967 6967 * Set association flag according to if an
6968 6968 * expander is used or not.
6969 6969 */
6970 6970 exp_flag =
6971 6971 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6972 6972 if (flags ==
6973 6973 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6974 6974 flags = exp_flag;
6975 6975 }
6976 6976 /*
6977 6977 * Target device is removed from the system
6978 6978 * Before the device is really offline from
6979 6979 * from system.
6980 6980 */
6981 6981 ptgt = mptsas_search_by_devhdl(tgttbl,
6982 6982 dev_handle);
6983 6983 /*
6984 6984 * If ptgt is NULL here, it means that the
6985 6985 * DevHandle is not in the hash table. This is
6986 6986 * reasonable sometimes. For example, if a
6987 6987 * disk was pulled, then added, then pulled
6988 6988 * again, the disk will not have been put into
6989 6989 * the hash table because the add event will
6990 6990 * have an invalid phymask. BUT, this does not
6991 6991 * mean that the DevHandle is invalid. The
6992 6992 * controller will still have a valid DevHandle
6993 6993 * that must be removed. To do this, use the
6994 6994 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
6995 6995 */
6996 6996 if (ptgt == NULL) {
6997 6997 topo_node = kmem_zalloc(
6998 6998 sizeof (mptsas_topo_change_list_t),
6999 6999 KM_SLEEP);
7000 7000 topo_node->mpt = mpt;
7001 7001 topo_node->un.phymask = 0;
7002 7002 topo_node->event =
7003 7003 MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
7004 7004 topo_node->devhdl = dev_handle;
7005 7005 topo_node->flags = flags;
7006 7006 topo_node->object = NULL;
7007 7007 if (topo_head == NULL) {
7008 7008 topo_head = topo_tail =
7009 7009 topo_node;
7010 7010 } else {
7011 7011 topo_tail->next = topo_node;
7012 7012 topo_tail = topo_node;
7013 7013 }
7014 7014 break;
7015 7015 }
7016 7016
7017 7017 /*
7018 7018 * Update DR flag immediately avoid I/O failure
7019 7019 * before failover finish. Pay attention to the
7020 7020 * mutex protect, we need grab the per target
7021 7021 * mutex during set m_dr_flag because the
7022 7022 * m_mutex would not be held all the time in
7023 7023 * mptsas_scsi_start().
7024 7024 */
7025 7025 mutex_enter(&ptgt->m_tgt_intr_mutex);
7026 7026 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7027 7027 mutex_exit(&ptgt->m_tgt_intr_mutex);
7028 7028
7029 7029 topo_node = kmem_zalloc(
7030 7030 sizeof (mptsas_topo_change_list_t),
7031 7031 KM_SLEEP);
7032 7032 topo_node->mpt = mpt;
7033 7033 topo_node->un.phymask = ptgt->m_phymask;
7034 7034 topo_node->event =
7035 7035 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7036 7036 topo_node->devhdl = dev_handle;
7037 7037 topo_node->flags = flags;
7038 7038 topo_node->object = NULL;
7039 7039 if (topo_head == NULL) {
7040 7040 topo_head = topo_tail = topo_node;
7041 7041 } else {
7042 7042 topo_tail->next = topo_node;
7043 7043 topo_tail = topo_node;
7044 7044 }
7045 7045 break;
7046 7046 }
7047 7047 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7048 7048 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
7049 7049 &sas_topo_change_list->PHY[i].LinkRate);
7050 7050 state = (link_rate &
7051 7051 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
7052 7052 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
7053 7053 pSmhba = &mpt->m_phy_info[i].smhba_info;
7054 7054 pSmhba->negotiated_link_rate = state;
7055 7055 switch (state) {
7056 7056 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7057 7057 (void) sprintf(curr, "is disabled");
7058 7058 mptsas_smhba_log_sysevent(mpt,
7059 7059 ESC_SAS_PHY_EVENT,
7060 7060 SAS_PHY_REMOVE,
7061 7061 &mpt->m_phy_info[i].smhba_info);
7062 7062 mpt->m_phy_info[i].smhba_info.
7063 7063 negotiated_link_rate
7064 7064 = 0x1;
7065 7065 break;
7066 7066 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7067 7067 (void) sprintf(curr, "is offline, "
7068 7068 "failed speed negotiation");
7069 7069 mptsas_smhba_log_sysevent(mpt,
7070 7070 ESC_SAS_PHY_EVENT,
7071 7071 SAS_PHY_OFFLINE,
7072 7072 &mpt->m_phy_info[i].smhba_info);
7073 7073 break;
7074 7074 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7075 7075 (void) sprintf(curr, "SATA OOB "
7076 7076 "complete");
7077 7077 break;
7078 7078 case SMP_RESET_IN_PROGRESS:
7079 7079 (void) sprintf(curr, "SMP reset in "
7080 7080 "progress");
7081 7081 break;
7082 7082 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7083 7083 (void) sprintf(curr, "is online at "
7084 7084 "1.5 Gbps");
7085 7085 if ((expd_handle == 0) &&
7086 7086 (enc_handle == 1)) {
7087 7087 mpt->m_port_chng = 1;
7088 7088 }
7089 7089 mptsas_smhba_log_sysevent(mpt,
7090 7090 ESC_SAS_PHY_EVENT,
7091 7091 SAS_PHY_ONLINE,
7092 7092 &mpt->m_phy_info[i].smhba_info);
7093 7093 break;
7094 7094 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7095 7095 (void) sprintf(curr, "is online at 3.0 "
7096 7096 "Gbps");
7097 7097 if ((expd_handle == 0) &&
7098 7098 (enc_handle == 1)) {
7099 7099 mpt->m_port_chng = 1;
7100 7100 }
7101 7101 mptsas_smhba_log_sysevent(mpt,
7102 7102 ESC_SAS_PHY_EVENT,
7103 7103 SAS_PHY_ONLINE,
7104 7104 &mpt->m_phy_info[i].smhba_info);
7105 7105 break;
7106 7106 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7107 7107 (void) sprintf(curr, "is online at "
7108 7108 "6.0 Gbps");
7109 7109 if ((expd_handle == 0) &&
7110 7110 (enc_handle == 1)) {
7111 7111 mpt->m_port_chng = 1;
7112 7112 }
7113 7113 mptsas_smhba_log_sysevent(mpt,
7114 7114 ESC_SAS_PHY_EVENT,
7115 7115 SAS_PHY_ONLINE,
7116 7116 &mpt->m_phy_info[i].smhba_info);
7117 7117 break;
7118 7118 default:
7119 7119 (void) sprintf(curr, "state is "
7120 7120 "unknown");
7121 7121 break;
7122 7122 }
7123 7123
7124 7124 state = (link_rate &
7125 7125 MPI2_EVENT_SAS_TOPO_LR_PREV_MASK) >>
7126 7126 MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT;
7127 7127 switch (state) {
7128 7128 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7129 7129 (void) sprintf(prev, ", was disabled");
7130 7130 break;
7131 7131 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7132 7132 (void) sprintf(prev, ", was offline, "
7133 7133 "failed speed negotiation");
7134 7134 break;
7135 7135 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7136 7136 (void) sprintf(prev, ", was SATA OOB "
7137 7137 "complete");
7138 7138 break;
7139 7139 case SMP_RESET_IN_PROGRESS:
7140 7140 (void) sprintf(prev, ", was SMP reset "
7141 7141 "in progress");
7142 7142 break;
7143 7143 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7144 7144 (void) sprintf(prev, ", was online at "
7145 7145 "1.5 Gbps");
7146 7146 break;
7147 7147 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7148 7148 (void) sprintf(prev, ", was online at "
7149 7149 "3.0 Gbps");
7150 7150 break;
7151 7151 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7152 7152 (void) sprintf(prev, ", was online at "
7153 7153 "6.0 Gbps");
7154 7154 break;
7155 7155 default:
7156 7156 break;
7157 7157 }
7158 7158 (void) sprintf(&string[strlen(string)], "link "
7159 7159 "changed, ");
7160 7160 break;
7161 7161 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7162 7162 continue;
7163 7163 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7164 7164 (void) sprintf(&string[strlen(string)],
7165 7165 "target not responding, delaying "
7166 7166 "removal");
7167 7167 break;
7168 7168 }
7169 7169 NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
7170 7170 mpt->m_instance, phy, dev_handle, string, curr,
7171 7171 prev));
7172 7172 }
7173 7173 if (topo_head != NULL) {
7174 7174 /*
7175 7175 * Launch DR taskq to handle topology change
7176 7176 */
7177 7177 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7178 7178 mptsas_handle_dr, (void *)topo_head,
7179 7179 DDI_NOSLEEP)) != DDI_SUCCESS) {
7180 7180 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7181 7181 "for handle SAS DR event failed. \n");
7182 7182 }
7183 7183 }
7184 7184 break;
7185 7185 }
7186 7186 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7187 7187 {
7188 7188 Mpi2EventDataIrConfigChangeList_t *irChangeList;
7189 7189 mptsas_topo_change_list_t *topo_head = NULL;
7190 7190 mptsas_topo_change_list_t *topo_tail = NULL;
7191 7191 mptsas_topo_change_list_t *topo_node = NULL;
7192 7192 mptsas_target_t *ptgt;
7193 7193 mptsas_hash_table_t *tgttbl;
7194 7194 uint8_t num_entries, i, reason;
7195 7195 uint16_t volhandle, diskhandle;
7196 7196
7197 7197 irChangeList = (pMpi2EventDataIrConfigChangeList_t)
7198 7198 eventreply->EventData;
7199 7199 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
7200 7200 &irChangeList->NumElements);
7201 7201
7202 7202 tgttbl = &mpt->m_active->m_tgttbl;
7203 7203
7204 7204 NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
7205 7205 mpt->m_instance));
7206 7206
7207 7207 for (i = 0; i < num_entries; i++) {
7208 7208 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7209 7209 &irChangeList->ConfigElement[i].ReasonCode);
7210 7210 volhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7211 7211 &irChangeList->ConfigElement[i].VolDevHandle);
7212 7212 diskhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7213 7213 &irChangeList->ConfigElement[i].PhysDiskDevHandle);
7214 7214
7215 7215 switch (reason) {
7216 7216 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
7217 7217 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
7218 7218 {
7219 7219 NDBG20(("mptsas %d volume added\n",
7220 7220 mpt->m_instance));
7221 7221
7222 7222 topo_node = kmem_zalloc(
7223 7223 sizeof (mptsas_topo_change_list_t),
7224 7224 KM_SLEEP);
7225 7225
7226 7226 topo_node->mpt = mpt;
7227 7227 topo_node->event =
7228 7228 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7229 7229 topo_node->un.physport = 0xff;
7230 7230 topo_node->devhdl = volhandle;
7231 7231 topo_node->flags =
7232 7232 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7233 7233 topo_node->object = NULL;
7234 7234 if (topo_head == NULL) {
7235 7235 topo_head = topo_tail = topo_node;
7236 7236 } else {
7237 7237 topo_tail->next = topo_node;
7238 7238 topo_tail = topo_node;
7239 7239 }
7240 7240 break;
7241 7241 }
7242 7242 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
7243 7243 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
7244 7244 {
7245 7245 NDBG20(("mptsas %d volume deleted\n",
7246 7246 mpt->m_instance));
7247 7247 ptgt = mptsas_search_by_devhdl(tgttbl,
7248 7248 volhandle);
7249 7249 if (ptgt == NULL)
7250 7250 break;
7251 7251
7252 7252 /*
7253 7253 * Clear any flags related to volume
7254 7254 */
7255 7255 (void) mptsas_delete_volume(mpt, volhandle);
7256 7256
7257 7257 /*
7258 7258 * Update DR flag immediately avoid I/O failure
7259 7259 */
7260 7260 mutex_enter(&ptgt->m_tgt_intr_mutex);
7261 7261 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7262 7262 mutex_exit(&ptgt->m_tgt_intr_mutex);
7263 7263
7264 7264 topo_node = kmem_zalloc(
7265 7265 sizeof (mptsas_topo_change_list_t),
7266 7266 KM_SLEEP);
7267 7267 topo_node->mpt = mpt;
7268 7268 topo_node->un.phymask = ptgt->m_phymask;
7269 7269 topo_node->event =
7270 7270 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7271 7271 topo_node->devhdl = volhandle;
7272 7272 topo_node->flags =
7273 7273 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7274 7274 topo_node->object = (void *)ptgt;
7275 7275 if (topo_head == NULL) {
7276 7276 topo_head = topo_tail = topo_node;
7277 7277 } else {
7278 7278 topo_tail->next = topo_node;
7279 7279 topo_tail = topo_node;
7280 7280 }
7281 7281 break;
7282 7282 }
7283 7283 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
7284 7284 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
7285 7285 {
7286 7286 ptgt = mptsas_search_by_devhdl(tgttbl,
7287 7287 diskhandle);
7288 7288 if (ptgt == NULL)
7289 7289 break;
7290 7290
7291 7291 /*
7292 7292 * Update DR flag immediately avoid I/O failure
7293 7293 */
7294 7294 mutex_enter(&ptgt->m_tgt_intr_mutex);
7295 7295 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7296 7296 mutex_exit(&ptgt->m_tgt_intr_mutex);
7297 7297
7298 7298 topo_node = kmem_zalloc(
7299 7299 sizeof (mptsas_topo_change_list_t),
7300 7300 KM_SLEEP);
7301 7301 topo_node->mpt = mpt;
7302 7302 topo_node->un.phymask = ptgt->m_phymask;
7303 7303 topo_node->event =
7304 7304 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7305 7305 topo_node->devhdl = diskhandle;
7306 7306 topo_node->flags =
7307 7307 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7308 7308 topo_node->object = (void *)ptgt;
7309 7309 if (topo_head == NULL) {
7310 7310 topo_head = topo_tail = topo_node;
7311 7311 } else {
7312 7312 topo_tail->next = topo_node;
7313 7313 topo_tail = topo_node;
7314 7314 }
7315 7315 break;
7316 7316 }
7317 7317 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
7318 7318 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
7319 7319 {
7320 7320 /*
7321 7321 * The physical drive is released by a IR
7322 7322 * volume. But we cannot get the the physport
7323 7323 * or phynum from the event data, so we only
7324 7324 * can get the physport/phynum after SAS
7325 7325 * Device Page0 request for the devhdl.
7326 7326 */
7327 7327 topo_node = kmem_zalloc(
7328 7328 sizeof (mptsas_topo_change_list_t),
7329 7329 KM_SLEEP);
7330 7330 topo_node->mpt = mpt;
7331 7331 topo_node->un.phymask = 0;
7332 7332 topo_node->event =
7333 7333 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7334 7334 topo_node->devhdl = diskhandle;
7335 7335 topo_node->flags =
7336 7336 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7337 7337 topo_node->object = NULL;
7338 7338 mpt->m_port_chng = 1;
7339 7339 if (topo_head == NULL) {
7340 7340 topo_head = topo_tail = topo_node;
7341 7341 } else {
7342 7342 topo_tail->next = topo_node;
7343 7343 topo_tail = topo_node;
7344 7344 }
7345 7345 break;
7346 7346 }
7347 7347 default:
7348 7348 break;
7349 7349 }
7350 7350 }
7351 7351
7352 7352 if (topo_head != NULL) {
7353 7353 /*
7354 7354 * Launch DR taskq to handle topology change
7355 7355 */
7356 7356 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7357 7357 mptsas_handle_dr, (void *)topo_head,
7358 7358 DDI_NOSLEEP)) != DDI_SUCCESS) {
7359 7359 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7360 7360 "for handle SAS DR event failed. \n");
7361 7361 }
7362 7362 }
7363 7363 break;
7364 7364 }
7365 7365 default:
7366 7366 return (DDI_FAILURE);
7367 7367 }
7368 7368
7369 7369 return (DDI_SUCCESS);
7370 7370 }
7371 7371
7372 7372 /*
7373 7373 * handle events from ioc
7374 7374 */
7375 7375 static void
7376 7376 mptsas_handle_event(void *args)
7377 7377 {
7378 7378 m_replyh_arg_t *replyh_arg;
7379 7379 pMpi2EventNotificationReply_t eventreply;
7380 7380 uint32_t event, iocloginfo, rfm;
7381 7381 uint32_t status;
7382 7382 uint8_t port;
7383 7383 mptsas_t *mpt;
7384 7384 uint_t iocstatus;
7385 7385
7386 7386 replyh_arg = (m_replyh_arg_t *)args;
7387 7387 rfm = replyh_arg->rfm;
7388 7388 mpt = replyh_arg->mpt;
7389 7389
7390 7390 mutex_enter(&mpt->m_mutex);
7391 7391
7392 7392 eventreply = (pMpi2EventNotificationReply_t)
7393 7393 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
7394 7394 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7395 7395
7396 7396 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7397 7397 &eventreply->IOCStatus)) {
7398 7398 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7399 7399 mptsas_log(mpt, CE_WARN,
7400 7400 "!mptsas_handle_event: IOCStatus=0x%x, "
7401 7401 "IOCLogInfo=0x%x", iocstatus,
7402 7402 ddi_get32(mpt->m_acc_reply_frame_hdl,
7403 7403 &eventreply->IOCLogInfo));
7404 7404 } else {
7405 7405 mptsas_log(mpt, CE_WARN,
7406 7406 "mptsas_handle_event: IOCStatus=0x%x, "
7407 7407 "IOCLogInfo=0x%x", iocstatus,
7408 7408 ddi_get32(mpt->m_acc_reply_frame_hdl,
7409 7409 &eventreply->IOCLogInfo));
7410 7410 }
7411 7411 }
7412 7412
7413 7413 /*
7414 7414 * figure out what kind of event we got and handle accordingly
7415 7415 */
7416 7416 switch (event) {
7417 7417 case MPI2_EVENT_LOG_ENTRY_ADDED:
7418 7418 break;
7419 7419 case MPI2_EVENT_LOG_DATA:
7420 7420 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7421 7421 &eventreply->IOCLogInfo);
7422 7422 NDBG20(("mptsas %d log info %x received.\n", mpt->m_instance,
7423 7423 iocloginfo));
7424 7424 break;
7425 7425 case MPI2_EVENT_STATE_CHANGE:
7426 7426 NDBG20(("mptsas%d state change.", mpt->m_instance));
7427 7427 break;
7428 7428 case MPI2_EVENT_HARD_RESET_RECEIVED:
7429 7429 NDBG20(("mptsas%d event change.", mpt->m_instance));
7430 7430 break;
7431 7431 case MPI2_EVENT_SAS_DISCOVERY:
7432 7432 {
7433 7433 MPI2_EVENT_DATA_SAS_DISCOVERY *sasdiscovery;
7434 7434 char string[80];
7435 7435 uint8_t rc;
7436 7436
7437 7437 sasdiscovery =
7438 7438 (pMpi2EventDataSasDiscovery_t)eventreply->EventData;
7439 7439
7440 7440 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7441 7441 &sasdiscovery->ReasonCode);
7442 7442 port = ddi_get8(mpt->m_acc_reply_frame_hdl,
7443 7443 &sasdiscovery->PhysicalPort);
7444 7444 status = ddi_get32(mpt->m_acc_reply_frame_hdl,
7445 7445 &sasdiscovery->DiscoveryStatus);
7446 7446
7447 7447 string[0] = 0;
7448 7448 switch (rc) {
7449 7449 case MPI2_EVENT_SAS_DISC_RC_STARTED:
7450 7450 (void) sprintf(string, "STARTING");
7451 7451 break;
7452 7452 case MPI2_EVENT_SAS_DISC_RC_COMPLETED:
7453 7453 (void) sprintf(string, "COMPLETED");
7454 7454 break;
7455 7455 default:
7456 7456 (void) sprintf(string, "UNKNOWN");
7457 7457 break;
7458 7458 }
7459 7459
7460 7460 NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string,
7461 7461 port, status));
7462 7462
7463 7463 break;
7464 7464 }
7465 7465 case MPI2_EVENT_EVENT_CHANGE:
7466 7466 NDBG20(("mptsas%d event change.", mpt->m_instance));
7467 7467 break;
7468 7468 case MPI2_EVENT_TASK_SET_FULL:
7469 7469 {
7470 7470 pMpi2EventDataTaskSetFull_t taskfull;
7471 7471
7472 7472 taskfull = (pMpi2EventDataTaskSetFull_t)eventreply->EventData;
7473 7473
7474 7474 NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
7475 7475 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7476 7476 &taskfull->CurrentDepth)));
7477 7477 break;
7478 7478 }
7479 7479 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7480 7480 {
7481 7481 /*
7482 7482 * SAS TOPOLOGY CHANGE LIST Event has already been handled
7483 7483 * in mptsas_handle_event_sync() of interrupt context
7484 7484 */
7485 7485 break;
7486 7486 }
7487 7487 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
7488 7488 {
7489 7489 pMpi2EventDataSasEnclDevStatusChange_t encstatus;
7490 7490 uint8_t rc;
7491 7491 char string[80];
7492 7492
7493 7493 encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
7494 7494 eventreply->EventData;
7495 7495
7496 7496 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7497 7497 &encstatus->ReasonCode);
7498 7498 switch (rc) {
7499 7499 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7500 7500 (void) sprintf(string, "added");
7501 7501 break;
7502 7502 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7503 7503 (void) sprintf(string, ", not responding");
7504 7504 break;
7505 7505 default:
7506 7506 break;
7507 7507 }
7508 7508 NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure %x%s\n",
7509 7509 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7510 7510 &encstatus->EnclosureHandle), string));
7511 7511 break;
7512 7512 }
7513 7513
7514 7514 /*
7515 7515 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
7516 7516 * mptsas_handle_event_sync,in here just send ack message.
7517 7517 */
7518 7518 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
7519 7519 {
7520 7520 pMpi2EventDataSasDeviceStatusChange_t statuschange;
7521 7521 uint8_t rc;
7522 7522 uint16_t devhdl;
7523 7523 uint64_t wwn = 0;
7524 7524 uint32_t wwn_lo, wwn_hi;
7525 7525
7526 7526 statuschange = (pMpi2EventDataSasDeviceStatusChange_t)
7527 7527 eventreply->EventData;
7528 7528 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7529 7529 &statuschange->ReasonCode);
7530 7530 wwn_lo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7531 7531 (uint32_t *)(void *)&statuschange->SASAddress);
7532 7532 wwn_hi = ddi_get32(mpt->m_acc_reply_frame_hdl,
7533 7533 (uint32_t *)(void *)&statuschange->SASAddress + 1);
7534 7534 wwn = ((uint64_t)wwn_hi << 32) | wwn_lo;
7535 7535 devhdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
7536 7536 &statuschange->DevHandle);
7537 7537
7538 7538 NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64,
7539 7539 wwn));
7540 7540
7541 7541 switch (rc) {
7542 7542 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7543 7543 NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x",
7544 7544 ddi_get8(mpt->m_acc_reply_frame_hdl,
7545 7545 &statuschange->ASC),
7546 7546 ddi_get8(mpt->m_acc_reply_frame_hdl,
7547 7547 &statuschange->ASCQ)));
7548 7548 break;
7549 7549
7550 7550 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7551 7551 NDBG20(("Device not supported"));
7552 7552 break;
7553 7553
7554 7554 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7555 7555 NDBG20(("IOC internally generated the Target Reset "
7556 7556 "for devhdl:%x", devhdl));
7557 7557 break;
7558 7558
7559 7559 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7560 7560 NDBG20(("IOC's internally generated Target Reset "
7561 7561 "completed for devhdl:%x", devhdl));
7562 7562 break;
7563 7563
7564 7564 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7565 7565 NDBG20(("IOC internally generated Abort Task"));
7566 7566 break;
7567 7567
7568 7568 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7569 7569 NDBG20(("IOC's internally generated Abort Task "
7570 7570 "completed"));
7571 7571 break;
7572 7572
7573 7573 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7574 7574 NDBG20(("IOC internally generated Abort Task Set"));
7575 7575 break;
7576 7576
7577 7577 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7578 7578 NDBG20(("IOC internally generated Clear Task Set"));
7579 7579 break;
7580 7580
7581 7581 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7582 7582 NDBG20(("IOC internally generated Query Task"));
7583 7583 break;
7584 7584
7585 7585 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7586 7586 NDBG20(("Device sent an Asynchronous Notification"));
7587 7587 break;
7588 7588
7589 7589 default:
7590 7590 break;
7591 7591 }
7592 7592 break;
7593 7593 }
7594 7594 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7595 7595 {
7596 7596 /*
7597 7597 * IR TOPOLOGY CHANGE LIST Event has already been handled
7598 7598 * in mpt_handle_event_sync() of interrupt context
7599 7599 */
7600 7600 break;
7601 7601 }
7602 7602 case MPI2_EVENT_IR_OPERATION_STATUS:
7603 7603 {
7604 7604 Mpi2EventDataIrOperationStatus_t *irOpStatus;
7605 7605 char reason_str[80];
7606 7606 uint8_t rc, percent;
7607 7607 uint16_t handle;
7608 7608
7609 7609 irOpStatus = (pMpi2EventDataIrOperationStatus_t)
7610 7610 eventreply->EventData;
7611 7611 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7612 7612 &irOpStatus->RAIDOperation);
7613 7613 percent = ddi_get8(mpt->m_acc_reply_frame_hdl,
7614 7614 &irOpStatus->PercentComplete);
7615 7615 handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7616 7616 &irOpStatus->VolDevHandle);
7617 7617
7618 7618 switch (rc) {
7619 7619 case MPI2_EVENT_IR_RAIDOP_RESYNC:
7620 7620 (void) sprintf(reason_str, "resync");
7621 7621 break;
7622 7622 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
7623 7623 (void) sprintf(reason_str, "online capacity "
7624 7624 "expansion");
7625 7625 break;
7626 7626 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
7627 7627 (void) sprintf(reason_str, "consistency check");
7628 7628 break;
7629 7629 default:
7630 7630 (void) sprintf(reason_str, "unknown reason %x",
7631 7631 rc);
7632 7632 }
7633 7633
7634 7634 NDBG20(("mptsas%d raid operational status: (%s)"
7635 7635 "\thandle(0x%04x), percent complete(%d)\n",
7636 7636 mpt->m_instance, reason_str, handle, percent));
7637 7637 break;
7638 7638 }
7639 7639 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
7640 7640 {
7641 7641 pMpi2EventDataSasBroadcastPrimitive_t sas_broadcast;
7642 7642 uint8_t phy_num;
7643 7643 uint8_t primitive;
7644 7644
7645 7645 sas_broadcast = (pMpi2EventDataSasBroadcastPrimitive_t)
7646 7646 eventreply->EventData;
7647 7647
7648 7648 phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
7649 7649 &sas_broadcast->PhyNum);
7650 7650 primitive = ddi_get8(mpt->m_acc_reply_frame_hdl,
7651 7651 &sas_broadcast->Primitive);
7652 7652
7653 7653 switch (primitive) {
7654 7654 case MPI2_EVENT_PRIMITIVE_CHANGE:
7655 7655 mptsas_smhba_log_sysevent(mpt,
7656 7656 ESC_SAS_HBA_PORT_BROADCAST,
7657 7657 SAS_PORT_BROADCAST_CHANGE,
7658 7658 &mpt->m_phy_info[phy_num].smhba_info);
7659 7659 break;
7660 7660 case MPI2_EVENT_PRIMITIVE_SES:
7661 7661 mptsas_smhba_log_sysevent(mpt,
7662 7662 ESC_SAS_HBA_PORT_BROADCAST,
7663 7663 SAS_PORT_BROADCAST_SES,
7664 7664 &mpt->m_phy_info[phy_num].smhba_info);
7665 7665 break;
7666 7666 case MPI2_EVENT_PRIMITIVE_EXPANDER:
7667 7667 mptsas_smhba_log_sysevent(mpt,
7668 7668 ESC_SAS_HBA_PORT_BROADCAST,
7669 7669 SAS_PORT_BROADCAST_D01_4,
7670 7670 &mpt->m_phy_info[phy_num].smhba_info);
7671 7671 break;
7672 7672 case MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT:
7673 7673 mptsas_smhba_log_sysevent(mpt,
7674 7674 ESC_SAS_HBA_PORT_BROADCAST,
7675 7675 SAS_PORT_BROADCAST_D04_7,
7676 7676 &mpt->m_phy_info[phy_num].smhba_info);
7677 7677 break;
7678 7678 case MPI2_EVENT_PRIMITIVE_RESERVED3:
7679 7679 mptsas_smhba_log_sysevent(mpt,
7680 7680 ESC_SAS_HBA_PORT_BROADCAST,
7681 7681 SAS_PORT_BROADCAST_D16_7,
7682 7682 &mpt->m_phy_info[phy_num].smhba_info);
7683 7683 break;
7684 7684 case MPI2_EVENT_PRIMITIVE_RESERVED4:
7685 7685 mptsas_smhba_log_sysevent(mpt,
7686 7686 ESC_SAS_HBA_PORT_BROADCAST,
7687 7687 SAS_PORT_BROADCAST_D29_7,
7688 7688 &mpt->m_phy_info[phy_num].smhba_info);
7689 7689 break;
7690 7690 case MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED:
7691 7691 mptsas_smhba_log_sysevent(mpt,
7692 7692 ESC_SAS_HBA_PORT_BROADCAST,
7693 7693 SAS_PORT_BROADCAST_D24_0,
7694 7694 &mpt->m_phy_info[phy_num].smhba_info);
7695 7695 break;
7696 7696 case MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED:
7697 7697 mptsas_smhba_log_sysevent(mpt,
7698 7698 ESC_SAS_HBA_PORT_BROADCAST,
7699 7699 SAS_PORT_BROADCAST_D27_4,
7700 7700 &mpt->m_phy_info[phy_num].smhba_info);
7701 7701 break;
7702 7702 default:
7703 7703 NDBG20(("mptsas%d: unknown BROADCAST PRIMITIVE"
7704 7704 " %x received",
7705 7705 mpt->m_instance, primitive));
7706 7706 break;
7707 7707 }
7708 7708 NDBG20(("mptsas%d sas broadcast primitive: "
7709 7709 "\tprimitive(0x%04x), phy(%d) complete\n",
7710 7710 mpt->m_instance, primitive, phy_num));
7711 7711 break;
7712 7712 }
7713 7713 case MPI2_EVENT_IR_VOLUME:
7714 7714 {
7715 7715 Mpi2EventDataIrVolume_t *irVolume;
7716 7716 uint16_t devhandle;
7717 7717 uint32_t state;
7718 7718 int config, vol;
7719 7719 mptsas_slots_t *slots = mpt->m_active;
7720 7720 uint8_t found = FALSE;
7721 7721
7722 7722 irVolume = (pMpi2EventDataIrVolume_t)eventreply->EventData;
7723 7723 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7724 7724 &irVolume->NewValue);
7725 7725 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7726 7726 &irVolume->VolDevHandle);
7727 7727
7728 7728 NDBG20(("EVENT_IR_VOLUME event is received"));
7729 7729
7730 7730 /*
7731 7731 * Get latest RAID info and then find the DevHandle for this
7732 7732 * event in the configuration. If the DevHandle is not found
7733 7733 * just exit the event.
7734 7734 */
7735 7735 (void) mptsas_get_raid_info(mpt);
7736 7736 for (config = 0; (config < slots->m_num_raid_configs) &&
7737 7737 (!found); config++) {
7738 7738 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
7739 7739 if (slots->m_raidconfig[config].m_raidvol[vol].
7740 7740 m_raidhandle == devhandle) {
7741 7741 found = TRUE;
7742 7742 break;
7743 7743 }
7744 7744 }
7745 7745 }
7746 7746 if (!found) {
7747 7747 break;
7748 7748 }
7749 7749
7750 7750 switch (irVolume->ReasonCode) {
7751 7751 case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
7752 7752 {
7753 7753 uint32_t i;
7754 7754 slots->m_raidconfig[config].m_raidvol[vol].m_settings =
7755 7755 state;
7756 7756
7757 7757 i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
7758 7758 mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
7759 7759 ", auto-config of hot-swap drives is %s"
7760 7760 ", write caching is %s"
7761 7761 ", hot-spare pool mask is %02x\n",
7762 7762 vol, state &
7763 7763 MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
7764 7764 ? "disabled" : "enabled",
7765 7765 i == MPI2_RAIDVOL0_SETTING_UNCHANGED
7766 7766 ? "controlled by member disks" :
7767 7767 i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
7768 7768 ? "disabled" :
7769 7769 i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
7770 7770 ? "enabled" :
7771 7771 "incorrectly set",
7772 7772 (state >> 16) & 0xff);
7773 7773 break;
7774 7774 }
7775 7775 case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
7776 7776 {
7777 7777 slots->m_raidconfig[config].m_raidvol[vol].m_state =
7778 7778 (uint8_t)state;
7779 7779
7780 7780 mptsas_log(mpt, CE_NOTE,
7781 7781 "Volume %d is now %s\n", vol,
7782 7782 state == MPI2_RAID_VOL_STATE_OPTIMAL
7783 7783 ? "optimal" :
7784 7784 state == MPI2_RAID_VOL_STATE_DEGRADED
7785 7785 ? "degraded" :
7786 7786 state == MPI2_RAID_VOL_STATE_ONLINE
7787 7787 ? "online" :
7788 7788 state == MPI2_RAID_VOL_STATE_INITIALIZING
7789 7789 ? "initializing" :
7790 7790 state == MPI2_RAID_VOL_STATE_FAILED
7791 7791 ? "failed" :
7792 7792 state == MPI2_RAID_VOL_STATE_MISSING
7793 7793 ? "missing" :
7794 7794 "state unknown");
7795 7795 break;
7796 7796 }
7797 7797 case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
7798 7798 {
7799 7799 slots->m_raidconfig[config].m_raidvol[vol].
7800 7800 m_statusflags = state;
7801 7801
7802 7802 mptsas_log(mpt, CE_NOTE,
7803 7803 " Volume %d is now %s%s%s%s%s%s%s%s%s\n",
7804 7804 vol,
7805 7805 state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
7806 7806 ? ", enabled" : ", disabled",
7807 7807 state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
7808 7808 ? ", quiesced" : "",
7809 7809 state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
7810 7810 ? ", inactive" : ", active",
7811 7811 state &
7812 7812 MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
7813 7813 ? ", bad block table is full" : "",
7814 7814 state &
7815 7815 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
7816 7816 ? ", resync in progress" : "",
7817 7817 state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
7818 7818 ? ", background initialization in progress" : "",
7819 7819 state &
7820 7820 MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
7821 7821 ? ", capacity expansion in progress" : "",
7822 7822 state &
7823 7823 MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
7824 7824 ? ", consistency check in progress" : "",
7825 7825 state & MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB
7826 7826 ? ", data scrub in progress" : "");
7827 7827 break;
7828 7828 }
7829 7829 default:
7830 7830 break;
7831 7831 }
7832 7832 break;
7833 7833 }
7834 7834 case MPI2_EVENT_IR_PHYSICAL_DISK:
7835 7835 {
7836 7836 Mpi2EventDataIrPhysicalDisk_t *irPhysDisk;
7837 7837 uint16_t devhandle, enchandle, slot;
7838 7838 uint32_t status, state;
7839 7839 uint8_t physdisknum, reason;
7840 7840
7841 7841 irPhysDisk = (Mpi2EventDataIrPhysicalDisk_t *)
7842 7842 eventreply->EventData;
7843 7843 physdisknum = ddi_get8(mpt->m_acc_reply_frame_hdl,
7844 7844 &irPhysDisk->PhysDiskNum);
7845 7845 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7846 7846 &irPhysDisk->PhysDiskDevHandle);
7847 7847 enchandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7848 7848 &irPhysDisk->EnclosureHandle);
7849 7849 slot = ddi_get16(mpt->m_acc_reply_frame_hdl,
7850 7850 &irPhysDisk->Slot);
7851 7851 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7852 7852 &irPhysDisk->NewValue);
7853 7853 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7854 7854 &irPhysDisk->ReasonCode);
7855 7855
7856 7856 NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
7857 7857
7858 7858 switch (reason) {
7859 7859 case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
7860 7860 mptsas_log(mpt, CE_NOTE,
7861 7861 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7862 7862 "for enclosure with handle 0x%x is now in hot "
7863 7863 "spare pool %d",
7864 7864 physdisknum, devhandle, slot, enchandle,
7865 7865 (state >> 16) & 0xff);
7866 7866 break;
7867 7867
7868 7868 case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
7869 7869 status = state;
7870 7870 mptsas_log(mpt, CE_NOTE,
7871 7871 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7872 7872 "for enclosure with handle 0x%x is now "
7873 7873 "%s%s%s%s%s\n", physdisknum, devhandle, slot,
7874 7874 enchandle,
7875 7875 status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
7876 7876 ? ", inactive" : ", active",
7877 7877 status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
7878 7878 ? ", out of sync" : "",
7879 7879 status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
7880 7880 ? ", quiesced" : "",
7881 7881 status &
7882 7882 MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
7883 7883 ? ", write cache enabled" : "",
7884 7884 status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
7885 7885 ? ", capacity expansion target" : "");
7886 7886 break;
7887 7887
7888 7888 case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
7889 7889 mptsas_log(mpt, CE_NOTE,
7890 7890 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7891 7891 "for enclosure with handle 0x%x is now %s\n",
7892 7892 physdisknum, devhandle, slot, enchandle,
7893 7893 state == MPI2_RAID_PD_STATE_OPTIMAL
7894 7894 ? "optimal" :
7895 7895 state == MPI2_RAID_PD_STATE_REBUILDING
7896 7896 ? "rebuilding" :
7897 7897 state == MPI2_RAID_PD_STATE_DEGRADED
7898 7898 ? "degraded" :
7899 7899 state == MPI2_RAID_PD_STATE_HOT_SPARE
7900 7900 ? "a hot spare" :
7901 7901 state == MPI2_RAID_PD_STATE_ONLINE
7902 7902 ? "online" :
7903 7903 state == MPI2_RAID_PD_STATE_OFFLINE
7904 7904 ? "offline" :
7905 7905 state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
7906 7906 ? "not compatible" :
7907 7907 state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
7908 7908 ? "not configured" :
7909 7909 "state unknown");
7910 7910 break;
7911 7911 }
7912 7912 break;
7913 7913 }
7914 7914 default:
7915 7915 NDBG20(("mptsas%d: unknown event %x received",
7916 7916 mpt->m_instance, event));
7917 7917 break;
7918 7918 }
7919 7919
7920 7920 /*
7921 7921 * Return the reply frame to the free queue.
7922 7922 */
7923 7923 ddi_put32(mpt->m_acc_free_queue_hdl,
7924 7924 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm);
7925 7925 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
7926 7926 DDI_DMA_SYNC_FORDEV);
7927 7927 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
7928 7928 mpt->m_free_index = 0;
7929 7929 }
7930 7930 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
7931 7931 mpt->m_free_index);
7932 7932 mutex_exit(&mpt->m_mutex);
7933 7933 }
7934 7934
7935 7935 /*
7936 7936 * invoked from timeout() to restart qfull cmds with throttle == 0
7937 7937 */
7938 7938 static void
7939 7939 mptsas_restart_cmd(void *arg)
7940 7940 {
7941 7941 mptsas_t *mpt = arg;
7942 7942 mptsas_target_t *ptgt = NULL;
7943 7943
7944 7944 mutex_enter(&mpt->m_mutex);
7945 7945
7946 7946 mpt->m_restart_cmd_timeid = 0;
7947 7947
7948 7948 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
7949 7949 MPTSAS_HASH_FIRST);
7950 7950 while (ptgt != NULL) {
7951 7951 mutex_enter(&ptgt->m_tgt_intr_mutex);
7952 7952 if (ptgt->m_reset_delay == 0) {
7953 7953 if (ptgt->m_t_throttle == QFULL_THROTTLE) {
7954 7954 mptsas_set_throttle(mpt, ptgt,
7955 7955 MAX_THROTTLE);
7956 7956 }
7957 7957 }
7958 7958 mutex_exit(&ptgt->m_tgt_intr_mutex);
7959 7959
7960 7960 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
7961 7961 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
7962 7962 }
7963 7963 mptsas_restart_hba(mpt);
7964 7964 mutex_exit(&mpt->m_mutex);
7965 7965 }
7966 7966
7967 7967 /*
7968 7968 * mptsas_remove_cmd0 is similar to mptsas_remove_cmd except that it is called
7969 7969 * where m_intr_mutex has already been held.
7970 7970 */
7971 7971 void
7972 7972 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7973 7973 {
7974 7974 ASSERT(mutex_owned(&mpt->m_mutex));
7975 7975
7976 7976 /*
7977 7977 * With new fine-grained lock mechanism, the outstanding cmd is only
7978 7978 * linked to m_active before the dma is triggerred(MPTSAS_START_CMD)
7979 7979 * to send it. that is, mptsas_save_cmd() doesn't link the outstanding
7980 7980 * cmd now. So when mptsas_remove_cmd is called, a mptsas_save_cmd must
7981 7981 * have been called, but the cmd may have not been linked.
7982 7982 * For mptsas_remove_cmd0, the cmd must have been linked.
7983 7983 * In order to keep the same semantic, we link the cmd to the
7984 7984 * outstanding cmd list.
7985 7985 */
7986 7986 mpt->m_active->m_slot[cmd->cmd_slot] = cmd;
7987 7987
7988 7988 mutex_enter(&mpt->m_intr_mutex);
7989 7989 mptsas_remove_cmd0(mpt, cmd);
7990 7990 mutex_exit(&mpt->m_intr_mutex);
7991 7991 }
7992 7992
7993 7993 static inline void
7994 7994 mptsas_remove_cmd0(mptsas_t *mpt, mptsas_cmd_t *cmd)
7995 7995 {
7996 7996 int slot;
7997 7997 mptsas_slots_t *slots = mpt->m_active;
7998 7998 int t;
7999 7999 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8000 8000 mptsas_slot_free_e_t *pe;
8001 8001
8002 8002 ASSERT(cmd != NULL);
8003 8003 ASSERT(cmd->cmd_queued == FALSE);
8004 8004
8005 8005 /*
8006 8006 * Task Management cmds are removed in their own routines. Also,
8007 8007 * we don't want to modify timeout based on TM cmds.
8008 8008 */
8009 8009 if (cmd->cmd_flags & CFLAG_TM_CMD) {
8010 8010 return;
8011 8011 }
8012 8012
8013 8013 t = Tgt(cmd);
8014 8014 slot = cmd->cmd_slot;
8015 8015 pe = mpt->m_slot_free_ae + slot - 1;
8016 8016 ASSERT(cmd == slots->m_slot[slot]);
8017 8017 ASSERT((slot > 0) && slot < (mpt->m_max_requests - 1));
8018 8018
8019 8019 /*
8020 8020 * remove the cmd.
8021 8021 */
8022 8022 mutex_enter(&mpt->m_slot_freeq_pairp[pe->cpuid].
8023 8023 m_slot_releq.s.m_fq_mutex);
8024 8024 NDBG31(("mptsas_remove_cmd0: removing cmd=0x%p", (void *)cmd));
8025 8025 slots->m_slot[slot] = NULL;
8026 8026 ASSERT(pe->slot == slot);
8027 8027 list_insert_tail(&mpt->m_slot_freeq_pairp[pe->cpuid].
8028 8028 m_slot_releq.s.m_fq_list, pe);
8029 8029 mpt->m_slot_freeq_pairp[pe->cpuid].m_slot_releq.s.m_fq_n++;
8030 8030 ASSERT(mpt->m_slot_freeq_pairp[pe->cpuid].
8031 8031 m_slot_releq.s.m_fq_n <= mpt->m_max_requests - 2);
8032 8032 mutex_exit(&mpt->m_slot_freeq_pairp[pe->cpuid].
8033 8033 m_slot_releq.s.m_fq_mutex);
8034 8034
8035 8035 /*
8036 8036 * only decrement per target ncmds if command
8037 8037 * has a target associated with it.
8038 8038 */
8039 8039 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
8040 8040 mutex_enter(&ptgt->m_tgt_intr_mutex);
8041 8041 ptgt->m_t_ncmds--;
8042 8042 /*
8043 8043 * reset throttle if we just ran an untagged command
8044 8044 * to a tagged target
8045 8045 */
8046 8046 if ((ptgt->m_t_ncmds == 0) &&
8047 8047 ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
8048 8048 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8049 8049 }
8050 8050 mutex_exit(&ptgt->m_tgt_intr_mutex);
8051 8051 }
8052 8052
8053 8053 /*
8054 8054 * This is all we need to do for ioc commands.
8055 8055 * The ioc cmds would never be handled in fastpath in ISR, so we make
8056 8056 * sure the mptsas_return_to_pool() would always be called with
8057 8057 * m_mutex protected.
8058 8058 */
8059 8059 if (cmd->cmd_flags & CFLAG_CMDIOC) {
8060 8060 ASSERT(mutex_owned(&mpt->m_mutex));
8061 8061 mptsas_return_to_pool(mpt, cmd);
8062 8062 return;
8063 8063 }
8064 8064
8065 8065 /*
8066 8066 * Figure out what to set tag Q timeout for...
8067 8067 *
8068 8068 * Optimize: If we have duplicate's of same timeout
8069 8069 * we're using, then we'll use it again until we run
8070 8070 * out of duplicates. This should be the normal case
8071 8071 * for block and raw I/O.
8072 8072 * If no duplicates, we have to scan through tag que and
8073 8073 * find the longest timeout value and use it. This is
8074 8074 * going to take a while...
8075 8075 * Add 1 to m_n_slots to account for TM request.
8076 8076 */
8077 8077 mutex_enter(&ptgt->m_tgt_intr_mutex);
8078 8078 if (cmd->cmd_pkt->pkt_time == ptgt->m_timebase) {
8079 8079 if (--(ptgt->m_dups) == 0) {
8080 8080 if (ptgt->m_t_ncmds) {
8081 8081 mptsas_cmd_t *ssp;
8082 8082 uint_t n = 0;
8083 8083 ushort_t nslots = (slots->m_n_slots + 1);
8084 8084 ushort_t i;
8085 8085 /*
8086 8086 * This crude check assumes we don't do
8087 8087 * this too often which seems reasonable
8088 8088 * for block and raw I/O.
8089 8089 */
8090 8090 for (i = 0; i < nslots; i++) {
8091 8091 ssp = slots->m_slot[i];
8092 8092 if (ssp && (Tgt(ssp) == t) &&
8093 8093 (ssp->cmd_pkt->pkt_time > n)) {
8094 8094 n = ssp->cmd_pkt->pkt_time;
8095 8095 ptgt->m_dups = 1;
8096 8096 } else if (ssp && (Tgt(ssp) == t) &&
8097 8097 (ssp->cmd_pkt->pkt_time == n)) {
8098 8098 ptgt->m_dups++;
8099 8099 }
8100 8100 }
8101 8101 ptgt->m_timebase = n;
8102 8102 } else {
8103 8103 ptgt->m_dups = 0;
8104 8104 ptgt->m_timebase = 0;
8105 8105 }
8106 8106 }
8107 8107 }
8108 8108 ptgt->m_timeout = ptgt->m_timebase;
8109 8109
8110 8110 ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);
8111 8111 mutex_exit(&ptgt->m_tgt_intr_mutex);
8112 8112 }
8113 8113
8114 8114 /*
8115 8115 * start a fresh request from the top of the device queue.
8116 8116 */
8117 8117 static void
8118 8118 mptsas_restart_hba(mptsas_t *mpt)
8119 8119 {
8120 8120 mptsas_cmd_t *cmd, *next_cmd;
8121 8121 mptsas_target_t *ptgt = NULL;
8122 8122
8123 8123 NDBG1(("mptsas_restart_hba: mpt=0x%p", (void *)mpt));
8124 8124
8125 8125 ASSERT(mutex_owned(&mpt->m_mutex));
8126 8126
8127 8127 /*
8128 8128 * If there is a reset delay, don't start any cmds. Otherwise, start
8129 8129 * as many cmds as possible.
8130 8130 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
8131 8131 * commands is m_max_requests - 2.
8132 8132 */
8133 8133 cmd = mpt->m_waitq;
8134 8134
8135 8135 while (cmd != NULL) {
8136 8136 next_cmd = cmd->cmd_linkp;
8137 8137 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
8138 8138 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8139 8139 /*
8140 8140 * passthru command get slot need
8141 8141 * set CFLAG_PREPARED.
8142 8142 */
8143 8143 cmd->cmd_flags |= CFLAG_PREPARED;
8144 8144 mptsas_waitq_delete(mpt, cmd);
8145 8145 mptsas_start_passthru(mpt, cmd);
8146 8146 }
8147 8147 cmd = next_cmd;
8148 8148 continue;
8149 8149 }
8150 8150 if (cmd->cmd_flags & CFLAG_CONFIG) {
8151 8151 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8152 8152 /*
8153 8153 * Send the config page request and delete it
8154 8154 * from the waitq.
8155 8155 */
8156 8156 cmd->cmd_flags |= CFLAG_PREPARED;
8157 8157 mptsas_waitq_delete(mpt, cmd);
8158 8158 mptsas_start_config_page_access(mpt, cmd);
8159 8159 }
8160 8160 cmd = next_cmd;
8161 8161 continue;
8162 8162 }
8163 8163 if (cmd->cmd_flags & CFLAG_FW_DIAG) {
8164 8164 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8165 8165 /*
8166 8166 * Send the FW Diag request and delete if from
8167 8167 * the waitq.
8168 8168 */
8169 8169 cmd->cmd_flags |= CFLAG_PREPARED;
8170 8170 mptsas_waitq_delete(mpt, cmd);
8171 8171 mptsas_start_diag(mpt, cmd);
8172 8172 }
8173 8173 cmd = next_cmd;
8174 8174 continue;
8175 8175 }
8176 8176
8177 8177 ptgt = cmd->cmd_tgt_addr;
8178 8178 if (ptgt) {
8179 8179 mutex_enter(&mpt->m_intr_mutex);
8180 8180 mutex_enter(&ptgt->m_tgt_intr_mutex);
8181 8181 if ((ptgt->m_t_throttle == DRAIN_THROTTLE) &&
8182 8182 (ptgt->m_t_ncmds == 0)) {
8183 8183 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8184 8184 }
8185 8185 if ((ptgt->m_reset_delay == 0) &&
8186 8186 (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
8187 8187 mutex_exit(&ptgt->m_tgt_intr_mutex);
8188 8188 mutex_exit(&mpt->m_intr_mutex);
8189 8189 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8190 8190 mptsas_waitq_delete(mpt, cmd);
8191 8191 (void) mptsas_start_cmd(mpt, cmd);
8192 8192 }
8193 8193 goto out;
8194 8194 }
8195 8195 mutex_exit(&ptgt->m_tgt_intr_mutex);
8196 8196 mutex_exit(&mpt->m_intr_mutex);
8197 8197 }
8198 8198 out:
8199 8199 cmd = next_cmd;
8200 8200 }
8201 8201 }
8202 8202
8203 8203 /*
8204 8204 * mpt tag type lookup
8205 8205 */
8206 8206 static char mptsas_tag_lookup[] =
8207 8207 {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
8208 8208
8209 8209 /*
8210 8210 * mptsas_start_cmd0 is similar to mptsas_start_cmd, except that, it is called
8211 8211 * without ANY mutex protected, while, mptsas_start_cmd is called with m_mutex
8212 8212 * protected.
8213 8213 *
8214 8214 * the relevant field in ptgt should be protected by m_tgt_intr_mutex in both
8215 8215 * functions.
8216 8216 *
8217 8217 * before the cmds are linked on the slot for monitor as outstanding cmds, they
8218 8218 * are accessed as slab objects, so slab framework ensures the exclusive access,
8219 8219 * and no other mutex is requireed. Linking for monitor and the trigger of dma
8220 8220 * must be done exclusively.
8221 8221 */
8222 8222 static int
8223 8223 mptsas_start_cmd0(mptsas_t *mpt, mptsas_cmd_t *cmd)
8224 8224 {
8225 8225 struct scsi_pkt *pkt = CMD2PKT(cmd);
8226 8226 uint32_t control = 0;
8227 8227 int n;
8228 8228 caddr_t mem;
8229 8229 pMpi2SCSIIORequest_t io_request;
8230 8230 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
8231 8231 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
8232 8232 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8233 8233 uint16_t SMID, io_flags = 0;
8234 8234 uint32_t request_desc_low, request_desc_high;
8235 8235
8236 8236 NDBG1(("mptsas_start_cmd0: cmd=0x%p", (void *)cmd));
8237 8237
8238 8238 /*
8239 8239 * Set SMID and increment index. Rollover to 1 instead of 0 if index
8240 8240 * is at the max. 0 is an invalid SMID, so we call the first index 1.
8241 8241 */
8242 8242 SMID = cmd->cmd_slot;
8243 8243
8244 8244 /*
8245 8245 * It is possible for back to back device reset to
8246 8246 * happen before the reset delay has expired. That's
8247 8247 * ok, just let the device reset go out on the bus.
8248 8248 */
8249 8249 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8250 8250 ASSERT(ptgt->m_reset_delay == 0);
8251 8251 }
8252 8252
8253 8253 /*
8254 8254 * if a non-tagged cmd is submitted to an active tagged target
8255 8255 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
8256 8256 * to be untagged
8257 8257 */
8258 8258 mutex_enter(&ptgt->m_tgt_intr_mutex);
8259 8259 if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
8260 8260 (ptgt->m_t_ncmds > 1) &&
8261 8261 ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
8262 8262 (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
8263 8263 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8264 8264 NDBG23(("target=%d, untagged cmd, start draining\n",
8265 8265 ptgt->m_devhdl));
8266 8266
8267 8267 if (ptgt->m_reset_delay == 0) {
8268 8268 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
8269 8269 }
8270 8270 mutex_exit(&ptgt->m_tgt_intr_mutex);
8271 8271
8272 8272 mutex_enter(&mpt->m_mutex);
8273 8273 mptsas_remove_cmd(mpt, cmd);
8274 8274 cmd->cmd_pkt_flags |= FLAG_HEAD;
8275 8275 mptsas_waitq_add(mpt, cmd);
8276 8276 mutex_exit(&mpt->m_mutex);
8277 8277 return (DDI_FAILURE);
8278 8278 }
8279 8279 mutex_exit(&ptgt->m_tgt_intr_mutex);
8280 8280 return (DDI_FAILURE);
8281 8281 }
8282 8282 mutex_exit(&ptgt->m_tgt_intr_mutex);
8283 8283
8284 8284 /*
8285 8285 * Set correct tag bits.
8286 8286 */
8287 8287 if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
8288 8288 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
8289 8289 FLAG_TAGMASK) >> 12)]) {
8290 8290 case MSG_SIMPLE_QTAG:
8291 8291 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8292 8292 break;
8293 8293 case MSG_HEAD_QTAG:
8294 8294 control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
8295 8295 break;
8296 8296 case MSG_ORDERED_QTAG:
8297 8297 control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
8298 8298 break;
8299 8299 default:
8300 8300 mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
8301 8301 break;
8302 8302 }
8303 8303 } else {
8304 8304 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
8305 8305 ptgt->m_t_throttle = 1;
8306 8306 }
8307 8307 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8308 8308 }
8309 8309
8310 8310 if (cmd->cmd_pkt_flags & FLAG_TLR) {
8311 8311 control |= MPI2_SCSIIO_CONTROL_TLR_ON;
8312 8312 }
8313 8313
8314 8314 mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
8315 8315 io_request = (pMpi2SCSIIORequest_t)mem;
8316 8316
8317 8317 bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
8318 8318 ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
8319 8319 (MPI2_SCSI_IO_REQUEST, SGL) / 4);
8320 8320 mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
8321 8321 MPI2_FUNCTION_SCSI_IO_REQUEST);
8322 8322
8323 8323 (void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
8324 8324 io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
8325 8325
8326 8326 io_flags = cmd->cmd_cdblen;
8327 8327 ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
8328 8328 /*
8329 8329 * setup the Scatter/Gather DMA list for this request
8330 8330 */
8331 8331 if (cmd->cmd_cookiec > 0) {
8332 8332 mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
8333 8333 } else {
8334 8334 ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
8335 8335 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
8336 8336 MPI2_SGE_FLAGS_END_OF_BUFFER |
8337 8337 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
8338 8338 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
8339 8339 }
8340 8340
8341 8341 /*
8342 8342 * save ARQ information
8343 8343 */
8344 8344 ddi_put8(acc_hdl, &io_request->SenseBufferLength, cmd->cmd_rqslen);
8345 8345 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
8346 8346 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
8347 8347 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8348 8348 cmd->cmd_ext_arqcookie.dmac_address);
8349 8349 } else {
8350 8350 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8351 8351 cmd->cmd_arqcookie.dmac_address);
8352 8352 }
8353 8353
8354 8354 ddi_put32(acc_hdl, &io_request->Control, control);
8355 8355
8356 8356 NDBG31(("starting message=0x%p, with cmd=0x%p",
8357 8357 (void *)(uintptr_t)mpt->m_req_frame_dma_addr, (void *)cmd));
8358 8358
8359 8359 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
8360 8360
8361 8361 /*
8362 8362 * Build request descriptor and write it to the request desc post reg.
8363 8363 */
8364 8364 request_desc_low = (SMID << 16) + MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
8365 8365 request_desc_high = ptgt->m_devhdl << 16;
8366 8366
8367 8367 mutex_enter(&mpt->m_mutex);
8368 8368 mpt->m_active->m_slot[cmd->cmd_slot] = cmd;
8369 8369 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
8370 8370 mutex_exit(&mpt->m_mutex);
8371 8371
8372 8372 /*
8373 8373 * Start timeout.
8374 8374 */
8375 8375 mutex_enter(&ptgt->m_tgt_intr_mutex);
8376 8376 #ifdef MPTSAS_TEST
8377 8377 /*
8378 8378 * Temporarily set timebase = 0; needed for
8379 8379 * timeout torture test.
8380 8380 */
8381 8381 if (mptsas_test_timeouts) {
8382 8382 ptgt->m_timebase = 0;
8383 8383 }
8384 8384 #endif
8385 8385 n = pkt->pkt_time - ptgt->m_timebase;
8386 8386
8387 8387 if (n == 0) {
8388 8388 (ptgt->m_dups)++;
8389 8389 ptgt->m_timeout = ptgt->m_timebase;
8390 8390 } else if (n > 0) {
8391 8391 ptgt->m_timeout =
8392 8392 ptgt->m_timebase = pkt->pkt_time;
8393 8393 ptgt->m_dups = 1;
8394 8394 } else if (n < 0) {
8395 8395 ptgt->m_timeout = ptgt->m_timebase;
8396 8396 }
8397 8397 #ifdef MPTSAS_TEST
8398 8398 /*
8399 8399 * Set back to a number higher than
8400 8400 * mptsas_scsi_watchdog_tick
8401 8401 * so timeouts will happen in mptsas_watchsubr
8402 8402 */
8403 8403 if (mptsas_test_timeouts) {
8404 8404 ptgt->m_timebase = 60;
8405 8405 }
8406 8406 #endif
8407 8407 mutex_exit(&ptgt->m_tgt_intr_mutex);
8408 8408
8409 8409 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8410 8410 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8411 8411 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8412 8412 return (DDI_FAILURE);
8413 8413 }
8414 8414 return (DDI_SUCCESS);
8415 8415 }
8416 8416
8417 8417 static int
8418 8418 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8419 8419 {
8420 8420 struct scsi_pkt *pkt = CMD2PKT(cmd);
8421 8421 uint32_t control = 0;
8422 8422 int n;
8423 8423 caddr_t mem;
8424 8424 pMpi2SCSIIORequest_t io_request;
8425 8425 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
8426 8426 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
8427 8427 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8428 8428 uint16_t SMID, io_flags = 0;
8429 8429 uint32_t request_desc_low, request_desc_high;
8430 8430
8431 8431 NDBG1(("mptsas_start_cmd: cmd=0x%p", (void *)cmd));
8432 8432
8433 8433 /*
8434 8434 * Set SMID and increment index. Rollover to 1 instead of 0 if index
8435 8435 * is at the max. 0 is an invalid SMID, so we call the first index 1.
8436 8436 */
8437 8437 SMID = cmd->cmd_slot;
8438 8438
8439 8439 /*
8440 8440 * It is possible for back to back device reset to
8441 8441 * happen before the reset delay has expired. That's
8442 8442 * ok, just let the device reset go out on the bus.
8443 8443 */
8444 8444 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8445 8445 ASSERT(ptgt->m_reset_delay == 0);
8446 8446 }
8447 8447
8448 8448 /*
8449 8449 * if a non-tagged cmd is submitted to an active tagged target
8450 8450 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
8451 8451 * to be untagged
8452 8452 */
8453 8453 mutex_enter(&ptgt->m_tgt_intr_mutex);
8454 8454 if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
8455 8455 (ptgt->m_t_ncmds > 1) &&
8456 8456 ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
8457 8457 (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
8458 8458 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8459 8459 NDBG23(("target=%d, untagged cmd, start draining\n",
8460 8460 ptgt->m_devhdl));
8461 8461
8462 8462 if (ptgt->m_reset_delay == 0) {
8463 8463 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
8464 8464 }
8465 8465 mutex_exit(&ptgt->m_tgt_intr_mutex);
8466 8466
8467 8467 mptsas_remove_cmd(mpt, cmd);
8468 8468 cmd->cmd_pkt_flags |= FLAG_HEAD;
8469 8469 mptsas_waitq_add(mpt, cmd);
8470 8470 return (DDI_FAILURE);
8471 8471 }
8472 8472 mutex_exit(&ptgt->m_tgt_intr_mutex);
8473 8473 return (DDI_FAILURE);
8474 8474 }
8475 8475 mutex_exit(&ptgt->m_tgt_intr_mutex);
8476 8476
8477 8477 /*
8478 8478 * Set correct tag bits.
8479 8479 */
8480 8480 if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
8481 8481 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
8482 8482 FLAG_TAGMASK) >> 12)]) {
8483 8483 case MSG_SIMPLE_QTAG:
8484 8484 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8485 8485 break;
8486 8486 case MSG_HEAD_QTAG:
8487 8487 control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
8488 8488 break;
8489 8489 case MSG_ORDERED_QTAG:
8490 8490 control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
8491 8491 break;
8492 8492 default:
8493 8493 mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
8494 8494 break;
8495 8495 }
8496 8496 } else {
8497 8497 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
8498 8498 ptgt->m_t_throttle = 1;
8499 8499 }
8500 8500 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8501 8501 }
8502 8502
8503 8503 if (cmd->cmd_pkt_flags & FLAG_TLR) {
8504 8504 control |= MPI2_SCSIIO_CONTROL_TLR_ON;
8505 8505 }
8506 8506
8507 8507 mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
8508 8508 io_request = (pMpi2SCSIIORequest_t)mem;
8509 8509
8510 8510 bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
8511 8511 ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
8512 8512 (MPI2_SCSI_IO_REQUEST, SGL) / 4);
8513 8513 mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
8514 8514 MPI2_FUNCTION_SCSI_IO_REQUEST);
8515 8515
8516 8516 (void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
8517 8517 io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
8518 8518
8519 8519 io_flags = cmd->cmd_cdblen;
8520 8520 ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
8521 8521 /*
8522 8522 * setup the Scatter/Gather DMA list for this request
8523 8523 */
8524 8524 if (cmd->cmd_cookiec > 0) {
8525 8525 mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
8526 8526 } else {
8527 8527 ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
8528 8528 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
8529 8529 MPI2_SGE_FLAGS_END_OF_BUFFER |
8530 8530 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
8531 8531 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
8532 8532 }
8533 8533
8534 8534 /*
8535 8535 * save ARQ information
8536 8536 */
8537 8537 ddi_put8(acc_hdl, &io_request->SenseBufferLength, cmd->cmd_rqslen);
8538 8538 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
8539 8539 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
8540 8540 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8541 8541 cmd->cmd_ext_arqcookie.dmac_address);
8542 8542 } else {
8543 8543 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8544 8544 cmd->cmd_arqcookie.dmac_address);
8545 8545 }
8546 8546
8547 8547 ddi_put32(acc_hdl, &io_request->Control, control);
8548 8548
8549 8549 NDBG31(("starting message=0x%p, with cmd=0x%p",
8550 8550 (void *)(uintptr_t)mpt->m_req_frame_dma_addr, (void *)cmd));
8551 8551
8552 8552 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
8553 8553
8554 8554 /*
8555 8555 * Build request descriptor and write it to the request desc post reg.
8556 8556 */
8557 8557 request_desc_low = (SMID << 16) + MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
8558 8558 request_desc_high = ptgt->m_devhdl << 16;
8559 8559
8560 8560 mpt->m_active->m_slot[cmd->cmd_slot] = cmd;
8561 8561 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
8562 8562
8563 8563 /*
8564 8564 * Start timeout.
8565 8565 */
8566 8566 mutex_enter(&ptgt->m_tgt_intr_mutex);
8567 8567 #ifdef MPTSAS_TEST
8568 8568 /*
8569 8569 * Temporarily set timebase = 0; needed for
8570 8570 * timeout torture test.
8571 8571 */
8572 8572 if (mptsas_test_timeouts) {
8573 8573 ptgt->m_timebase = 0;
8574 8574 }
8575 8575 #endif
8576 8576 n = pkt->pkt_time - ptgt->m_timebase;
8577 8577
8578 8578 if (n == 0) {
8579 8579 (ptgt->m_dups)++;
8580 8580 ptgt->m_timeout = ptgt->m_timebase;
8581 8581 } else if (n > 0) {
8582 8582 ptgt->m_timeout =
8583 8583 ptgt->m_timebase = pkt->pkt_time;
8584 8584 ptgt->m_dups = 1;
8585 8585 } else if (n < 0) {
8586 8586 ptgt->m_timeout = ptgt->m_timebase;
8587 8587 }
8588 8588 #ifdef MPTSAS_TEST
8589 8589 /*
8590 8590 * Set back to a number higher than
8591 8591 * mptsas_scsi_watchdog_tick
8592 8592 * so timeouts will happen in mptsas_watchsubr
8593 8593 */
8594 8594 if (mptsas_test_timeouts) {
8595 8595 ptgt->m_timebase = 60;
8596 8596 }
8597 8597 #endif
8598 8598 mutex_exit(&ptgt->m_tgt_intr_mutex);
8599 8599
8600 8600 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8601 8601 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8602 8602 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8603 8603 return (DDI_FAILURE);
8604 8604 }
8605 8605 return (DDI_SUCCESS);
8606 8606 }
8607 8607
8608 8608 /*
8609 8609 * Select a helper thread to handle current doneq
8610 8610 */
8611 8611 static void
8612 8612 mptsas_deliver_doneq_thread(mptsas_t *mpt)
8613 8613 {
8614 8614 uint64_t t, i;
8615 8615 uint32_t min = 0xffffffff;
8616 8616 mptsas_doneq_thread_list_t *item;
8617 8617
8618 8618 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
8619 8619 item = &mpt->m_doneq_thread_id[i];
8620 8620 /*
8621 8621 * If the completed command on help thread[i] less than
8622 8622 * doneq_thread_threshold, then pick the thread[i]. Otherwise
8623 8623 * pick a thread which has least completed command.
8624 8624 */
8625 8625
8626 8626 mutex_enter(&item->mutex);
8627 8627 if (item->len < mpt->m_doneq_thread_threshold) {
8628 8628 t = i;
8629 8629 mutex_exit(&item->mutex);
8630 8630 break;
8631 8631 }
8632 8632 if (item->len < min) {
8633 8633 min = item->len;
8634 8634 t = i;
8635 8635 }
8636 8636 mutex_exit(&item->mutex);
8637 8637 }
8638 8638 mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
8639 8639 mptsas_doneq_mv(mpt, t);
8640 8640 cv_signal(&mpt->m_doneq_thread_id[t].cv);
8641 8641 mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
8642 8642 }
8643 8643
8644 8644 /*
8645 8645 * move the current global doneq to the doneq of thread[t]
8646 8646 */
8647 8647 static void
8648 8648 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
8649 8649 {
8650 8650 mptsas_cmd_t *cmd;
8651 8651 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8652 8652
8653 8653 ASSERT(mutex_owned(&item->mutex));
8654 8654 mutex_enter(&mpt->m_intr_mutex);
8655 8655 while ((cmd = mpt->m_doneq) != NULL) {
8656 8656 if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
8657 8657 mpt->m_donetail = &mpt->m_doneq;
8658 8658 }
8659 8659 cmd->cmd_linkp = NULL;
8660 8660 *item->donetail = cmd;
8661 8661 item->donetail = &cmd->cmd_linkp;
8662 8662 mpt->m_doneq_len--;
8663 8663 item->len++;
8664 8664 }
8665 8665 mutex_exit(&mpt->m_intr_mutex);
8666 8666 }
8667 8667
8668 8668 void
8669 8669 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
8670 8670 {
8671 8671 struct scsi_pkt *pkt = CMD2PKT(cmd);
8672 8672
8673 8673 /* Check all acc and dma handles */
8674 8674 if ((mptsas_check_acc_handle(mpt->m_datap) !=
8675 8675 DDI_SUCCESS) ||
8676 8676 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
8677 8677 DDI_SUCCESS) ||
8678 8678 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
8679 8679 DDI_SUCCESS) ||
8680 8680 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
8681 8681 DDI_SUCCESS) ||
8682 8682 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
8683 8683 DDI_SUCCESS) ||
8684 8684 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
8685 8685 DDI_SUCCESS) ||
8686 8686 (mptsas_check_acc_handle(mpt->m_config_handle) !=
8687 8687 DDI_SUCCESS)) {
8688 8688 ddi_fm_service_impact(mpt->m_dip,
8689 8689 DDI_SERVICE_UNAFFECTED);
8690 8690 ddi_fm_acc_err_clear(mpt->m_config_handle,
8691 8691 DDI_FME_VER0);
8692 8692 pkt->pkt_reason = CMD_TRAN_ERR;
8693 8693 pkt->pkt_statistics = 0;
8694 8694 }
8695 8695 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
8696 8696 DDI_SUCCESS) ||
8697 8697 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
8698 8698 DDI_SUCCESS) ||
8699 8699 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
8700 8700 DDI_SUCCESS) ||
8701 8701 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
8702 8702 DDI_SUCCESS) ||
8703 8703 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
8704 8704 DDI_SUCCESS)) {
8705 8705 ddi_fm_service_impact(mpt->m_dip,
8706 8706 DDI_SERVICE_UNAFFECTED);
8707 8707 pkt->pkt_reason = CMD_TRAN_ERR;
8708 8708 pkt->pkt_statistics = 0;
8709 8709 }
8710 8710 if (cmd->cmd_dmahandle &&
8711 8711 (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
8712 8712 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8713 8713 pkt->pkt_reason = CMD_TRAN_ERR;
8714 8714 pkt->pkt_statistics = 0;
8715 8715 }
8716 8716 if ((cmd->cmd_extra_frames &&
8717 8717 ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
8718 8718 DDI_SUCCESS) ||
8719 8719 (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
8720 8720 DDI_SUCCESS)))) {
8721 8721 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8722 8722 pkt->pkt_reason = CMD_TRAN_ERR;
8723 8723 pkt->pkt_statistics = 0;
8724 8724 }
8725 8725 if (cmd->cmd_arqhandle &&
8726 8726 (mptsas_check_dma_handle(cmd->cmd_arqhandle) != DDI_SUCCESS)) {
8727 8727 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8728 8728 pkt->pkt_reason = CMD_TRAN_ERR;
8729 8729 pkt->pkt_statistics = 0;
8730 8730 }
8731 8731 if (cmd->cmd_ext_arqhandle &&
8732 8732 (mptsas_check_dma_handle(cmd->cmd_ext_arqhandle) != DDI_SUCCESS)) {
8733 8733 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8734 8734 pkt->pkt_reason = CMD_TRAN_ERR;
8735 8735 pkt->pkt_statistics = 0;
8736 8736 }
8737 8737 }
8738 8738
8739 8739 /*
8740 8740 * mptsas_doneq_add0 is similar to mptsas_doneq_add except that it is called
8741 8741 * where m_intr_mutex has already been held.
8742 8742 */
8743 8743 static inline void
8744 8744 mptsas_doneq_add0(mptsas_t *mpt, mptsas_cmd_t *cmd)
8745 8745 {
8746 8746 struct scsi_pkt *pkt = CMD2PKT(cmd);
8747 8747
8748 8748 NDBG31(("mptsas_doneq_add0: cmd=0x%p", (void *)cmd));
8749 8749
8750 8750 ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
8751 8751 cmd->cmd_linkp = NULL;
8752 8752 cmd->cmd_flags |= CFLAG_FINISHED;
8753 8753 cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
8754 8754
8755 8755 /*
8756 8756 * only add scsi pkts that have completion routines to
8757 8757 * the doneq. no intr cmds do not have callbacks.
8758 8758 */
8759 8759 if (pkt && (pkt->pkt_comp)) {
8760 8760 *mpt->m_donetail = cmd;
8761 8761 mpt->m_donetail = &cmd->cmd_linkp;
8762 8762 mpt->m_doneq_len++;
8763 8763 }
8764 8764 }
8765 8765
8766 8766 /*
8767 8767 * These routines manipulate the queue of commands that
8768 8768 * are waiting for their completion routines to be called.
8769 8769 * The queue is usually in FIFO order but on an MP system
8770 8770 * it's possible for the completion routines to get out
8771 8771 * of order. If that's a problem you need to add a global
8772 8772 * mutex around the code that calls the completion routine
8773 8773 * in the interrupt handler.
8774 8774 */
8775 8775 static void
8776 8776 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8777 8777 {
8778 8778 ASSERT(mutex_owned(&mpt->m_mutex));
8779 8779
8780 8780 mptsas_fma_check(mpt, cmd);
8781 8781
8782 8782 mutex_enter(&mpt->m_intr_mutex);
8783 8783 mptsas_doneq_add0(mpt, cmd);
8784 8784 mutex_exit(&mpt->m_intr_mutex);
8785 8785 }
8786 8786
8787 8787 static mptsas_cmd_t *
8788 8788 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
8789 8789 {
8790 8790 mptsas_cmd_t *cmd;
8791 8791 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8792 8792
8793 8793 /* pop one off the done queue */
8794 8794 if ((cmd = item->doneq) != NULL) {
8795 8795 /* if the queue is now empty fix the tail pointer */
8796 8796 NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
8797 8797 if ((item->doneq = cmd->cmd_linkp) == NULL) {
8798 8798 item->donetail = &item->doneq;
8799 8799 }
8800 8800 cmd->cmd_linkp = NULL;
8801 8801 item->len--;
8802 8802 }
8803 8803 return (cmd);
8804 8804 }
8805 8805
8806 8806 static void
8807 8807 mptsas_doneq_empty(mptsas_t *mpt)
8808 8808 {
8809 8809 mutex_enter(&mpt->m_intr_mutex);
8810 8810 if (mpt->m_doneq && !mpt->m_in_callback) {
8811 8811 mptsas_cmd_t *cmd, *next;
8812 8812 struct scsi_pkt *pkt;
8813 8813
8814 8814 mpt->m_in_callback = 1;
8815 8815 cmd = mpt->m_doneq;
8816 8816 mpt->m_doneq = NULL;
8817 8817 mpt->m_donetail = &mpt->m_doneq;
8818 8818 mpt->m_doneq_len = 0;
8819 8819
8820 8820 mutex_exit(&mpt->m_intr_mutex);
8821 8821
8822 8822 /*
8823 8823 * ONLY in ISR, is it called without m_mutex held, otherwise,
8824 8824 * it is always called with m_mutex held.
8825 8825 */
8826 8826 if ((curthread->t_flag & T_INTR_THREAD) == 0)
8827 8827 mutex_exit(&mpt->m_mutex);
8828 8828 /*
8829 8829 * run the completion routines of all the
8830 8830 * completed commands
8831 8831 */
8832 8832 while (cmd != NULL) {
8833 8833 next = cmd->cmd_linkp;
8834 8834 cmd->cmd_linkp = NULL;
8835 8835 /* run this command's completion routine */
8836 8836 cmd->cmd_flags |= CFLAG_COMPLETED;
8837 8837 pkt = CMD2PKT(cmd);
8838 8838 mptsas_pkt_comp(pkt, cmd);
8839 8839 cmd = next;
8840 8840 }
8841 8841 if ((curthread->t_flag & T_INTR_THREAD) == 0)
8842 8842 mutex_enter(&mpt->m_mutex);
8843 8843 mpt->m_in_callback = 0;
8844 8844 return;
8845 8845 }
8846 8846 mutex_exit(&mpt->m_intr_mutex);
8847 8847 }
8848 8848
8849 8849 /*
8850 8850 * These routines manipulate the target's queue of pending requests
8851 8851 */
8852 8852 void
8853 8853 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8854 8854 {
8855 8855 NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
8856 8856 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8857 8857 cmd->cmd_queued = TRUE;
8858 8858 if (ptgt)
8859 8859 ptgt->m_t_nwait++;
8860 8860 if (cmd->cmd_pkt_flags & FLAG_HEAD) {
8861 8861 mutex_enter(&mpt->m_intr_mutex);
8862 8862 if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
8863 8863 mpt->m_waitqtail = &cmd->cmd_linkp;
8864 8864 }
8865 8865 mpt->m_waitq = cmd;
8866 8866 mutex_exit(&mpt->m_intr_mutex);
8867 8867 } else {
8868 8868 cmd->cmd_linkp = NULL;
8869 8869 *(mpt->m_waitqtail) = cmd;
8870 8870 mpt->m_waitqtail = &cmd->cmd_linkp;
8871 8871 }
8872 8872 }
8873 8873
8874 8874 static mptsas_cmd_t *
8875 8875 mptsas_waitq_rm(mptsas_t *mpt)
8876 8876 {
8877 8877 mptsas_cmd_t *cmd;
8878 8878 mptsas_target_t *ptgt;
8879 8879 NDBG7(("mptsas_waitq_rm"));
8880 8880
8881 8881 mutex_enter(&mpt->m_intr_mutex);
8882 8882 MPTSAS_WAITQ_RM(mpt, cmd);
8883 8883 mutex_exit(&mpt->m_intr_mutex);
8884 8884
8885 8885 NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
8886 8886 if (cmd) {
8887 8887 ptgt = cmd->cmd_tgt_addr;
8888 8888 if (ptgt) {
8889 8889 ptgt->m_t_nwait--;
8890 8890 ASSERT(ptgt->m_t_nwait >= 0);
8891 8891 }
8892 8892 }
8893 8893 return (cmd);
8894 8894 }
8895 8895
8896 8896 /*
8897 8897 * remove specified cmd from the middle of the wait queue.
8898 8898 */
8899 8899 static void
8900 8900 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8901 8901 {
8902 8902 mptsas_cmd_t *prevp = mpt->m_waitq;
8903 8903 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8904 8904
8905 8905 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8906 8906 (void *)mpt, (void *)cmd));
8907 8907 if (ptgt) {
8908 8908 ptgt->m_t_nwait--;
8909 8909 ASSERT(ptgt->m_t_nwait >= 0);
8910 8910 }
8911 8911
8912 8912 if (prevp == cmd) {
8913 8913 mutex_enter(&mpt->m_intr_mutex);
8914 8914 if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
8915 8915 mpt->m_waitqtail = &mpt->m_waitq;
8916 8916 mutex_exit(&mpt->m_intr_mutex);
8917 8917
8918 8918 cmd->cmd_linkp = NULL;
8919 8919 cmd->cmd_queued = FALSE;
8920 8920 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8921 8921 (void *)mpt, (void *)cmd));
8922 8922 return;
8923 8923 }
8924 8924
8925 8925 while (prevp != NULL) {
8926 8926 if (prevp->cmd_linkp == cmd) {
8927 8927 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8928 8928 mpt->m_waitqtail = &prevp->cmd_linkp;
8929 8929
8930 8930 cmd->cmd_linkp = NULL;
8931 8931 cmd->cmd_queued = FALSE;
8932 8932 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8933 8933 (void *)mpt, (void *)cmd));
8934 8934 return;
8935 8935 }
8936 8936 prevp = prevp->cmd_linkp;
8937 8937 }
8938 8938 cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
8939 8939 }
8940 8940
8941 8941 /*
8942 8942 * device and bus reset handling
8943 8943 *
8944 8944 * Notes:
8945 8945 * - RESET_ALL: reset the controller
8946 8946 * - RESET_TARGET: reset the target specified in scsi_address
8947 8947 */
8948 8948 static int
8949 8949 mptsas_scsi_reset(struct scsi_address *ap, int level)
8950 8950 {
8951 8951 mptsas_t *mpt = ADDR2MPT(ap);
8952 8952 int rval;
8953 8953 mptsas_tgt_private_t *tgt_private;
8954 8954 mptsas_target_t *ptgt = NULL;
8955 8955
8956 8956 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
8957 8957 ptgt = tgt_private->t_private;
8958 8958 if (ptgt == NULL) {
8959 8959 return (FALSE);
8960 8960 }
8961 8961 NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,
8962 8962 level));
8963 8963
8964 8964 mutex_enter(&mpt->m_mutex);
8965 8965 /*
8966 8966 * if we are not in panic set up a reset delay for this target
8967 8967 */
8968 8968 if (!ddi_in_panic()) {
8969 8969 mptsas_setup_bus_reset_delay(mpt);
8970 8970 } else {
8971 8971 drv_usecwait(mpt->m_scsi_reset_delay * 1000);
8972 8972 }
8973 8973 rval = mptsas_do_scsi_reset(mpt, ptgt->m_devhdl);
8974 8974 mutex_exit(&mpt->m_mutex);
8975 8975
8976 8976 /*
8977 8977 * The transport layer expect to only see TRUE and
8978 8978 * FALSE. Therefore, we will adjust the return value
8979 8979 * if mptsas_do_scsi_reset returns FAILED.
8980 8980 */
8981 8981 if (rval == FAILED)
8982 8982 rval = FALSE;
8983 8983 return (rval);
8984 8984 }
8985 8985
8986 8986 static int
8987 8987 mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl)
8988 8988 {
8989 8989 int rval = FALSE;
8990 8990 uint8_t config, disk;
8991 8991 mptsas_slots_t *slots = mpt->m_active;
8992 8992
8993 8993 ASSERT(mutex_owned(&mpt->m_mutex));
8994 8994
8995 8995 if (mptsas_debug_resets) {
8996 8996 mptsas_log(mpt, CE_WARN, "mptsas_do_scsi_reset: target=%d",
8997 8997 devhdl);
8998 8998 }
8999 8999
9000 9000 /*
9001 9001 * Issue a Target Reset message to the target specified but not to a
9002 9002 * disk making up a raid volume. Just look through the RAID config
9003 9003 * Phys Disk list of DevHandles. If the target's DevHandle is in this
9004 9004 * list, then don't reset this target.
9005 9005 */
9006 9006 for (config = 0; config < slots->m_num_raid_configs; config++) {
9007 9007 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
9008 9008 if (devhdl == slots->m_raidconfig[config].
9009 9009 m_physdisk_devhdl[disk]) {
9010 9010 return (TRUE);
9011 9011 }
9012 9012 }
9013 9013 }
9014 9014
9015 9015 rval = mptsas_ioc_task_management(mpt,
9016 9016 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, devhdl, 0, NULL, 0, 0);
9017 9017
9018 9018 mptsas_doneq_empty(mpt);
9019 9019 return (rval);
9020 9020 }
9021 9021
9022 9022 static int
9023 9023 mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
9024 9024 void (*callback)(caddr_t), caddr_t arg)
9025 9025 {
9026 9026 mptsas_t *mpt = ADDR2MPT(ap);
9027 9027
9028 9028 NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap->a_target));
9029 9029
9030 9030 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
9031 9031 &mpt->m_mutex, &mpt->m_reset_notify_listf));
9032 9032 }
9033 9033
9034 9034 static int
9035 9035 mptsas_get_name(struct scsi_device *sd, char *name, int len)
9036 9036 {
9037 9037 dev_info_t *lun_dip = NULL;
9038 9038
9039 9039 ASSERT(sd != NULL);
9040 9040 ASSERT(name != NULL);
9041 9041 lun_dip = sd->sd_dev;
9042 9042 ASSERT(lun_dip != NULL);
9043 9043
9044 9044 if (mptsas_name_child(lun_dip, name, len) == DDI_SUCCESS) {
9045 9045 return (1);
9046 9046 } else {
9047 9047 return (0);
9048 9048 }
9049 9049 }
9050 9050
9051 9051 static int
9052 9052 mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len)
9053 9053 {
9054 9054 return (mptsas_get_name(sd, name, len));
9055 9055 }
9056 9056
9057 9057 void
9058 9058 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
9059 9059 {
9060 9060
9061 9061 NDBG25(("mptsas_set_throttle: throttle=%x", what));
9062 9062
9063 9063 /*
9064 9064 * if the bus is draining/quiesced, no changes to the throttles
9065 9065 * are allowed. Not allowing change of throttles during draining
9066 9066 * limits error recovery but will reduce draining time
9067 9067 *
9068 9068 * all throttles should have been set to HOLD_THROTTLE
9069 9069 */
9070 9070 if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
9071 9071 return;
9072 9072 }
9073 9073
9074 9074 if (what == HOLD_THROTTLE) {
9075 9075 ptgt->m_t_throttle = HOLD_THROTTLE;
9076 9076 } else if (ptgt->m_reset_delay == 0) {
9077 9077 ptgt->m_t_throttle = what;
9078 9078 }
9079 9079 }
9080 9080
9081 9081 /*
9082 9082 * Clean up from a device reset.
9083 9083 * For the case of target reset, this function clears the waitq of all
9084 9084 * commands for a particular target. For the case of abort task set, this
9085 9085 * function clears the waitq of all commonds for a particular target/lun.
9086 9086 */
9087 9087 static void
9088 9088 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
9089 9089 {
9090 9090 mptsas_slots_t *slots = mpt->m_active;
9091 9091 mptsas_cmd_t *cmd, *next_cmd;
9092 9092 int slot;
9093 9093 uchar_t reason;
9094 9094 uint_t stat;
9095 9095
9096 9096 NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
9097 9097
9098 9098 /*
9099 9099 * Make sure the I/O Controller has flushed all cmds
9100 9100 * that are associated with this target for a target reset
9101 9101 * and target/lun for abort task set.
9102 9102 * Account for TM requests, which use the last SMID.
9103 9103 */
9104 9104 mutex_enter(&mpt->m_intr_mutex);
9105 9105 for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
9106 9106 if ((cmd = slots->m_slot[slot]) == NULL) {
9107 9107 continue;
9108 9108 }
9109 9109 reason = CMD_RESET;
9110 9110 stat = STAT_DEV_RESET;
9111 9111 switch (tasktype) {
9112 9112 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
9113 9113 if (Tgt(cmd) == target) {
9114 9114 NDBG25(("mptsas_flush_target discovered non-"
9115 9115 "NULL cmd in slot %d, tasktype 0x%x", slot,
9116 9116 tasktype));
9117 9117 mptsas_dump_cmd(mpt, cmd);
9118 9118 mptsas_remove_cmd0(mpt, cmd);
9119 9119 mptsas_set_pkt_reason(mpt, cmd, reason, stat);
9120 9120 mptsas_doneq_add0(mpt, cmd);
9121 9121 }
9122 9122 break;
9123 9123 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9124 9124 reason = CMD_ABORTED;
9125 9125 stat = STAT_ABORTED;
9126 9126 /*FALLTHROUGH*/
9127 9127 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9128 9128 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9129 9129
9130 9130 NDBG25(("mptsas_flush_target discovered non-"
9131 9131 "NULL cmd in slot %d, tasktype 0x%x", slot,
9132 9132 tasktype));
9133 9133 mptsas_dump_cmd(mpt, cmd);
9134 9134 mptsas_remove_cmd0(mpt, cmd);
9135 9135 mptsas_set_pkt_reason(mpt, cmd, reason,
9136 9136 stat);
9137 9137 mptsas_doneq_add0(mpt, cmd);
9138 9138 }
9139 9139 break;
9140 9140 default:
9141 9141 break;
9142 9142 }
9143 9143 }
9144 9144 mutex_exit(&mpt->m_intr_mutex);
9145 9145
9146 9146 /*
9147 9147 * Flush the waitq of this target's cmds
9148 9148 */
9149 9149 cmd = mpt->m_waitq;
9150 9150
9151 9151 reason = CMD_RESET;
9152 9152 stat = STAT_DEV_RESET;
9153 9153
9154 9154 switch (tasktype) {
9155 9155 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
9156 9156 while (cmd != NULL) {
9157 9157 next_cmd = cmd->cmd_linkp;
9158 9158 if (Tgt(cmd) == target) {
9159 9159 mptsas_waitq_delete(mpt, cmd);
9160 9160 mptsas_set_pkt_reason(mpt, cmd,
9161 9161 reason, stat);
9162 9162 mptsas_doneq_add(mpt, cmd);
9163 9163 }
9164 9164 cmd = next_cmd;
9165 9165 }
9166 9166 break;
9167 9167 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9168 9168 reason = CMD_ABORTED;
9169 9169 stat = STAT_ABORTED;
9170 9170 /*FALLTHROUGH*/
9171 9171 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9172 9172 while (cmd != NULL) {
9173 9173 next_cmd = cmd->cmd_linkp;
9174 9174 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9175 9175 mptsas_waitq_delete(mpt, cmd);
9176 9176 mptsas_set_pkt_reason(mpt, cmd,
9177 9177 reason, stat);
9178 9178 mptsas_doneq_add(mpt, cmd);
9179 9179 }
9180 9180 cmd = next_cmd;
9181 9181 }
9182 9182 break;
9183 9183 default:
9184 9184 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
9185 9185 tasktype);
9186 9186 break;
9187 9187 }
9188 9188 }
9189 9189
9190 9190 /*
9191 9191 * Clean up hba state, abort all outstanding command and commands in waitq
9192 9192 * reset timeout of all targets.
9193 9193 */
9194 9194 static void
9195 9195 mptsas_flush_hba(mptsas_t *mpt)
9196 9196 {
9197 9197 mptsas_slots_t *slots = mpt->m_active;
9198 9198 mptsas_cmd_t *cmd;
9199 9199 int slot;
9200 9200
9201 9201 NDBG25(("mptsas_flush_hba"));
9202 9202
9203 9203 /*
9204 9204 * The I/O Controller should have already sent back
9205 9205 * all commands via the scsi I/O reply frame. Make
9206 9206 * sure all commands have been flushed.
9207 9207 * Account for TM request, which use the last SMID.
9208 9208 */
9209 9209 mutex_enter(&mpt->m_intr_mutex);
9210 9210 for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
9211 9211 if ((cmd = slots->m_slot[slot]) == NULL) {
9212 9212 continue;
9213 9213 }
9214 9214
9215 9215 if (cmd->cmd_flags & CFLAG_CMDIOC) {
9216 9216 /*
9217 9217 * Need to make sure to tell everyone that might be
9218 9218 * waiting on this command that it's going to fail. If
9219 9219 * we get here, this command will never timeout because
9220 9220 * the active command table is going to be re-allocated,
9221 9221 * so there will be nothing to check against a time out.
9222 9222 * Instead, mark the command as failed due to reset.
9223 9223 */
9224 9224 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
9225 9225 STAT_BUS_RESET);
9226 9226 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9227 9227 (cmd->cmd_flags & CFLAG_CONFIG) ||
9228 9228 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9229 9229 cmd->cmd_flags |= CFLAG_FINISHED;
9230 9230 cv_broadcast(&mpt->m_passthru_cv);
9231 9231 cv_broadcast(&mpt->m_config_cv);
9232 9232 cv_broadcast(&mpt->m_fw_diag_cv);
9233 9233 }
9234 9234 continue;
9235 9235 }
9236 9236
9237 9237 NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
9238 9238 slot));
9239 9239 mptsas_dump_cmd(mpt, cmd);
9240 9240
9241 9241 mptsas_remove_cmd0(mpt, cmd);
9242 9242 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9243 9243 mptsas_doneq_add0(mpt, cmd);
9244 9244 }
9245 9245 mutex_exit(&mpt->m_intr_mutex);
9246 9246
9247 9247 /*
9248 9248 * Flush the waitq.
9249 9249 */
9250 9250 while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
9251 9251 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9252 9252 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9253 9253 (cmd->cmd_flags & CFLAG_CONFIG) ||
9254 9254 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9255 9255 cmd->cmd_flags |= CFLAG_FINISHED;
9256 9256 cv_broadcast(&mpt->m_passthru_cv);
9257 9257 cv_broadcast(&mpt->m_config_cv);
9258 9258 cv_broadcast(&mpt->m_fw_diag_cv);
9259 9259 } else {
9260 9260 mptsas_doneq_add(mpt, cmd);
9261 9261 }
9262 9262 }
9263 9263 }
9264 9264
9265 9265 /*
9266 9266 * set pkt_reason and OR in pkt_statistics flag
9267 9267 */
9268 9268 static void
9269 9269 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
9270 9270 uint_t stat)
9271 9271 {
9272 9272 #ifndef __lock_lint
9273 9273 _NOTE(ARGUNUSED(mpt))
9274 9274 #endif
9275 9275
9276 9276 NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
9277 9277 (void *)cmd, reason, stat));
9278 9278
9279 9279 if (cmd) {
9280 9280 if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
9281 9281 cmd->cmd_pkt->pkt_reason = reason;
9282 9282 }
9283 9283 cmd->cmd_pkt->pkt_statistics |= stat;
9284 9284 }
9285 9285 }
9286 9286
9287 9287 static void
9288 9288 mptsas_start_watch_reset_delay()
9289 9289 {
9290 9290 NDBG22(("mptsas_start_watch_reset_delay"));
9291 9291
9292 9292 mutex_enter(&mptsas_global_mutex);
9293 9293 if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
9294 9294 mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
9295 9295 drv_usectohz((clock_t)
9296 9296 MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
9297 9297 ASSERT(mptsas_reset_watch != NULL);
9298 9298 }
9299 9299 mutex_exit(&mptsas_global_mutex);
9300 9300 }
9301 9301
9302 9302 static void
9303 9303 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
9304 9304 {
9305 9305 mptsas_target_t *ptgt = NULL;
9306 9306
9307 9307 NDBG22(("mptsas_setup_bus_reset_delay"));
9308 9308 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9309 9309 MPTSAS_HASH_FIRST);
9310 9310 while (ptgt != NULL) {
9311 9311 mutex_enter(&ptgt->m_tgt_intr_mutex);
9312 9312 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9313 9313 ptgt->m_reset_delay = mpt->m_scsi_reset_delay;
9314 9314 mutex_exit(&ptgt->m_tgt_intr_mutex);
9315 9315
9316 9316 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9317 9317 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9318 9318 }
9319 9319
9320 9320 mptsas_start_watch_reset_delay();
9321 9321 }
9322 9322
9323 9323 /*
9324 9324 * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
9325 9325 * mpt instance for active reset delays
9326 9326 */
9327 9327 static void
9328 9328 mptsas_watch_reset_delay(void *arg)
9329 9329 {
9330 9330 #ifndef __lock_lint
9331 9331 _NOTE(ARGUNUSED(arg))
9332 9332 #endif
9333 9333
9334 9334 mptsas_t *mpt;
9335 9335 int not_done = 0;
9336 9336
9337 9337 NDBG22(("mptsas_watch_reset_delay"));
9338 9338
9339 9339 mutex_enter(&mptsas_global_mutex);
9340 9340 mptsas_reset_watch = 0;
9341 9341 mutex_exit(&mptsas_global_mutex);
9342 9342 rw_enter(&mptsas_global_rwlock, RW_READER);
9343 9343 for (mpt = mptsas_head; mpt != NULL; mpt = mpt->m_next) {
9344 9344 if (mpt->m_tran == 0) {
9345 9345 continue;
9346 9346 }
9347 9347 mutex_enter(&mpt->m_mutex);
9348 9348 not_done += mptsas_watch_reset_delay_subr(mpt);
9349 9349 mutex_exit(&mpt->m_mutex);
9350 9350 }
9351 9351 rw_exit(&mptsas_global_rwlock);
9352 9352
9353 9353 if (not_done) {
9354 9354 mptsas_start_watch_reset_delay();
9355 9355 }
9356 9356 }
9357 9357
9358 9358 static int
9359 9359 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
9360 9360 {
9361 9361 int done = 0;
9362 9362 int restart = 0;
9363 9363 mptsas_target_t *ptgt = NULL;
9364 9364
9365 9365 NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
9366 9366
9367 9367 ASSERT(mutex_owned(&mpt->m_mutex));
9368 9368
9369 9369 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9370 9370 MPTSAS_HASH_FIRST);
9371 9371 while (ptgt != NULL) {
9372 9372 mutex_enter(&ptgt->m_tgt_intr_mutex);
9373 9373 if (ptgt->m_reset_delay != 0) {
9374 9374 ptgt->m_reset_delay -=
9375 9375 MPTSAS_WATCH_RESET_DELAY_TICK;
9376 9376 if (ptgt->m_reset_delay <= 0) {
9377 9377 ptgt->m_reset_delay = 0;
9378 9378 mptsas_set_throttle(mpt, ptgt,
9379 9379 MAX_THROTTLE);
9380 9380 restart++;
9381 9381 } else {
9382 9382 done = -1;
9383 9383 }
9384 9384 }
9385 9385 mutex_exit(&ptgt->m_tgt_intr_mutex);
9386 9386
9387 9387 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9388 9388 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9389 9389 }
9390 9390
9391 9391 if (restart > 0) {
9392 9392 mptsas_restart_hba(mpt);
9393 9393 }
9394 9394 return (done);
9395 9395 }
9396 9396
9397 9397 #ifdef MPTSAS_TEST
9398 9398 static void
9399 9399 mptsas_test_reset(mptsas_t *mpt, int target)
9400 9400 {
9401 9401 mptsas_target_t *ptgt = NULL;
9402 9402
9403 9403 if (mptsas_rtest == target) {
9404 9404 if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
9405 9405 mptsas_rtest = -1;
9406 9406 }
9407 9407 if (mptsas_rtest == -1) {
9408 9408 NDBG22(("mptsas_test_reset success"));
9409 9409 }
9410 9410 }
9411 9411 }
9412 9412 #endif
9413 9413
9414 9414 /*
9415 9415 * abort handling:
9416 9416 *
9417 9417 * Notes:
9418 9418 * - if pkt is not NULL, abort just that command
9419 9419 * - if pkt is NULL, abort all outstanding commands for target
9420 9420 */
9421 9421 static int
9422 9422 mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
9423 9423 {
9424 9424 mptsas_t *mpt = ADDR2MPT(ap);
9425 9425 int rval;
9426 9426 mptsas_tgt_private_t *tgt_private;
9427 9427 int target, lun;
9428 9428
9429 9429 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
9430 9430 tran_tgt_private;
9431 9431 ASSERT(tgt_private != NULL);
9432 9432 target = tgt_private->t_private->m_devhdl;
9433 9433 lun = tgt_private->t_lun;
9434 9434
9435 9435 NDBG23(("mptsas_scsi_abort: target=%d.%d", target, lun));
9436 9436
9437 9437 mutex_enter(&mpt->m_mutex);
9438 9438 rval = mptsas_do_scsi_abort(mpt, target, lun, pkt);
9439 9439 mutex_exit(&mpt->m_mutex);
9440 9440 return (rval);
9441 9441 }
9442 9442
9443 9443 static int
9444 9444 mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, struct scsi_pkt *pkt)
9445 9445 {
9446 9446 mptsas_cmd_t *sp = NULL;
9447 9447 mptsas_slots_t *slots = mpt->m_active;
9448 9448 int rval = FALSE;
9449 9449
9450 9450 ASSERT(mutex_owned(&mpt->m_mutex));
9451 9451
9452 9452 /*
9453 9453 * Abort the command pkt on the target/lun in ap. If pkt is
9454 9454 * NULL, abort all outstanding commands on that target/lun.
9455 9455 * If you can abort them, return 1, else return 0.
9456 9456 * Each packet that's aborted should be sent back to the target
9457 9457 * driver through the callback routine, with pkt_reason set to
9458 9458 * CMD_ABORTED.
9459 9459 *
9460 9460 * abort cmd pkt on HBA hardware; clean out of outstanding
9461 9461 * command lists, etc.
9462 9462 */
9463 9463 if (pkt != NULL) {
9464 9464 /* abort the specified packet */
9465 9465 sp = PKT2CMD(pkt);
9466 9466
9467 9467 if (sp->cmd_queued) {
9468 9468 NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
9469 9469 (void *)sp));
9470 9470 mptsas_waitq_delete(mpt, sp);
9471 9471 mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
9472 9472 STAT_ABORTED);
9473 9473 mptsas_doneq_add(mpt, sp);
9474 9474 rval = TRUE;
9475 9475 goto done;
9476 9476 }
9477 9477
9478 9478 /*
9479 9479 * Have mpt firmware abort this command
9480 9480 */
9481 9481 mutex_enter(&mpt->m_intr_mutex);
9482 9482 if (slots->m_slot[sp->cmd_slot] != NULL) {
9483 9483 mutex_exit(&mpt->m_intr_mutex);
9484 9484 rval = mptsas_ioc_task_management(mpt,
9485 9485 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
9486 9486 lun, NULL, 0, 0);
9487 9487
9488 9488 /*
9489 9489 * The transport layer expects only TRUE and FALSE.
9490 9490 * Therefore, if mptsas_ioc_task_management returns
9491 9491 * FAILED we will return FALSE.
9492 9492 */
9493 9493 if (rval == FAILED)
9494 9494 rval = FALSE;
9495 9495 goto done;
9496 9496 }
9497 9497 mutex_exit(&mpt->m_intr_mutex);
9498 9498 }
9499 9499
9500 9500 /*
9501 9501 * If pkt is NULL then abort task set
9502 9502 */
9503 9503 rval = mptsas_ioc_task_management(mpt,
9504 9504 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0);
9505 9505
9506 9506 /*
9507 9507 * The transport layer expects only TRUE and FALSE.
9508 9508 * Therefore, if mptsas_ioc_task_management returns
9509 9509 * FAILED we will return FALSE.
9510 9510 */
9511 9511 if (rval == FAILED)
9512 9512 rval = FALSE;
9513 9513
9514 9514 #ifdef MPTSAS_TEST
9515 9515 if (rval && mptsas_test_stop) {
9516 9516 debug_enter("mptsas_do_scsi_abort");
9517 9517 }
9518 9518 #endif
9519 9519
9520 9520 done:
9521 9521 mptsas_doneq_empty(mpt);
9522 9522 return (rval);
9523 9523 }
9524 9524
9525 9525 /*
9526 9526 * capability handling:
9527 9527 * (*tran_getcap). Get the capability named, and return its value.
9528 9528 */
9529 9529 static int
9530 9530 mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
9531 9531 {
9532 9532 mptsas_t *mpt = ADDR2MPT(ap);
9533 9533 int ckey;
9534 9534 int rval = FALSE;
9535 9535
9536 9536 NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
9537 9537 ap->a_target, cap, tgtonly));
9538 9538
9539 9539 mutex_enter(&mpt->m_mutex);
9540 9540
9541 9541 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9542 9542 mutex_exit(&mpt->m_mutex);
9543 9543 return (UNDEFINED);
9544 9544 }
9545 9545
9546 9546 switch (ckey) {
9547 9547 case SCSI_CAP_DMA_MAX:
9548 9548 rval = (int)mpt->m_msg_dma_attr.dma_attr_maxxfer;
9549 9549 break;
9550 9550 case SCSI_CAP_ARQ:
9551 9551 rval = TRUE;
9552 9552 break;
9553 9553 case SCSI_CAP_MSG_OUT:
9554 9554 case SCSI_CAP_PARITY:
9555 9555 case SCSI_CAP_UNTAGGED_QING:
9556 9556 rval = TRUE;
9557 9557 break;
9558 9558 case SCSI_CAP_TAGGED_QING:
9559 9559 rval = TRUE;
9560 9560 break;
9561 9561 case SCSI_CAP_RESET_NOTIFICATION:
9562 9562 rval = TRUE;
9563 9563 break;
9564 9564 case SCSI_CAP_LINKED_CMDS:
9565 9565 rval = FALSE;
9566 9566 break;
9567 9567 case SCSI_CAP_QFULL_RETRIES:
9568 9568 rval = ((mptsas_tgt_private_t *)(ap->a_hba_tran->
9569 9569 tran_tgt_private))->t_private->m_qfull_retries;
9570 9570 break;
9571 9571 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9572 9572 rval = drv_hztousec(((mptsas_tgt_private_t *)
9573 9573 (ap->a_hba_tran->tran_tgt_private))->
9574 9574 t_private->m_qfull_retry_interval) / 1000;
9575 9575 break;
9576 9576 case SCSI_CAP_CDB_LEN:
9577 9577 rval = CDB_GROUP4;
9578 9578 break;
9579 9579 case SCSI_CAP_INTERCONNECT_TYPE:
9580 9580 rval = INTERCONNECT_SAS;
9581 9581 break;
9582 9582 case SCSI_CAP_TRAN_LAYER_RETRIES:
9583 9583 if (mpt->m_ioc_capabilities &
9584 9584 MPI2_IOCFACTS_CAPABILITY_TLR)
9585 9585 rval = TRUE;
9586 9586 else
9587 9587 rval = FALSE;
9588 9588 break;
9589 9589 default:
9590 9590 rval = UNDEFINED;
9591 9591 break;
9592 9592 }
9593 9593
9594 9594 NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
9595 9595
9596 9596 mutex_exit(&mpt->m_mutex);
9597 9597 return (rval);
9598 9598 }
9599 9599
9600 9600 /*
9601 9601 * (*tran_setcap). Set the capability named to the value given.
9602 9602 */
9603 9603 static int
9604 9604 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
9605 9605 {
9606 9606 mptsas_t *mpt = ADDR2MPT(ap);
9607 9607 int ckey;
9608 9608 int rval = FALSE;
9609 9609 mptsas_target_t *ptgt;
9610 9610
9611 9611 NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
9612 9612 ap->a_target, cap, value, tgtonly));
9613 9613
9614 9614 if (!tgtonly) {
9615 9615 return (rval);
9616 9616 }
9617 9617
9618 9618 mutex_enter(&mpt->m_mutex);
9619 9619
9620 9620 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9621 9621 mutex_exit(&mpt->m_mutex);
9622 9622 return (UNDEFINED);
9623 9623 }
9624 9624
9625 9625 switch (ckey) {
9626 9626 case SCSI_CAP_DMA_MAX:
9627 9627 case SCSI_CAP_MSG_OUT:
9628 9628 case SCSI_CAP_PARITY:
9629 9629 case SCSI_CAP_INITIATOR_ID:
9630 9630 case SCSI_CAP_LINKED_CMDS:
9631 9631 case SCSI_CAP_UNTAGGED_QING:
9632 9632 case SCSI_CAP_RESET_NOTIFICATION:
9633 9633 /*
9634 9634 * None of these are settable via
9635 9635 * the capability interface.
9636 9636 */
9637 9637 break;
9638 9638 case SCSI_CAP_ARQ:
9639 9639 /*
9640 9640 * We cannot turn off arq so return false if asked to
9641 9641 */
9642 9642 if (value) {
9643 9643 rval = TRUE;
9644 9644 } else {
9645 9645 rval = FALSE;
9646 9646 }
9647 9647 break;
9648 9648 case SCSI_CAP_TAGGED_QING:
9649 9649 ptgt = ((mptsas_tgt_private_t *)
9650 9650 (ap->a_hba_tran->tran_tgt_private))->t_private;
9651 9651 mutex_enter(&ptgt->m_tgt_intr_mutex);
9652 9652 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9653 9653 mutex_exit(&ptgt->m_tgt_intr_mutex);
9654 9654 rval = TRUE;
9655 9655 break;
9656 9656 case SCSI_CAP_QFULL_RETRIES:
9657 9657 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9658 9658 t_private->m_qfull_retries = (uchar_t)value;
9659 9659 rval = TRUE;
9660 9660 break;
9661 9661 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9662 9662 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9663 9663 t_private->m_qfull_retry_interval =
9664 9664 drv_usectohz(value * 1000);
9665 9665 rval = TRUE;
9666 9666 break;
9667 9667 default:
9668 9668 rval = UNDEFINED;
9669 9669 break;
9670 9670 }
9671 9671 mutex_exit(&mpt->m_mutex);
9672 9672 return (rval);
9673 9673 }
9674 9674
9675 9675 /*
9676 9676 * Utility routine for mptsas_ifsetcap/ifgetcap
9677 9677 */
9678 9678 /*ARGSUSED*/
9679 9679 static int
9680 9680 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp)
9681 9681 {
9682 9682 NDBG24(("mptsas_scsi_capchk: cap=%s", cap));
9683 9683
9684 9684 if (!cap)
9685 9685 return (FALSE);
9686 9686
9687 9687 *cidxp = scsi_hba_lookup_capstr(cap);
9688 9688 return (TRUE);
9689 9689 }
9690 9690
9691 9691 static int
9692 9692 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
9693 9693 {
9694 9694 mptsas_slots_t *old_active = mpt->m_active;
9695 9695 mptsas_slots_t *new_active;
9696 9696 size_t size;
9697 9697 int rval = -1, nslot, i;
9698 9698 mptsas_slot_free_e_t *pe;
9699 9699
9700 9700 if (mptsas_outstanding_cmds_n(mpt)) {
9701 9701 NDBG9(("cannot change size of active slots array"));
9702 9702 return (rval);
9703 9703 }
9704 9704
9705 9705 size = MPTSAS_SLOTS_SIZE(mpt);
9706 9706 new_active = kmem_zalloc(size, flag);
9707 9707 if (new_active == NULL) {
9708 9708 NDBG1(("new active alloc failed"));
9709 9709 return (rval);
9710 9710 }
9711 9711 /*
9712 9712 * Since SMID 0 is reserved and the TM slot is reserved, the
9713 9713 * number of slots that can be used at any one time is
9714 9714 * m_max_requests - 2.
9715 9715 */
9716 9716 new_active->m_n_slots = nslot = (mpt->m_max_requests - 2);
9717 9717 new_active->m_size = size;
9718 9718 new_active->m_tags = 1;
9719 9719
9720 9720 if (old_active) {
9721 9721 new_active->m_tgttbl = old_active->m_tgttbl;
9722 9722 new_active->m_smptbl = old_active->m_smptbl;
9723 9723 new_active->m_num_raid_configs =
9724 9724 old_active->m_num_raid_configs;
9725 9725 for (i = 0; i < new_active->m_num_raid_configs; i++) {
9726 9726 new_active->m_raidconfig[i] =
9727 9727 old_active->m_raidconfig[i];
9728 9728 }
9729 9729 mptsas_free_active_slots(mpt);
9730 9730 }
9731 9731
9732 9732 if (max_ncpus & (max_ncpus - 1)) {
9733 9733 mpt->m_slot_freeq_pair_n = (1 << highbit(max_ncpus));
9734 9734 } else {
9735 9735 mpt->m_slot_freeq_pair_n = max_ncpus;
9736 9736 }
9737 9737 mpt->m_slot_freeq_pairp = kmem_zalloc(
9738 9738 mpt->m_slot_freeq_pair_n *
9739 9739 sizeof (mptsas_slot_freeq_pair_t), KM_SLEEP);
9740 9740 for (i = 0; i < mpt->m_slot_freeq_pair_n; i++) {
9741 9741 list_create(&mpt->m_slot_freeq_pairp[i].
9742 9742 m_slot_allocq.s.m_fq_list,
9743 9743 sizeof (mptsas_slot_free_e_t),
9744 9744 offsetof(mptsas_slot_free_e_t, node));
9745 9745 list_create(&mpt->m_slot_freeq_pairp[i].
9746 9746 m_slot_releq.s.m_fq_list,
9747 9747 sizeof (mptsas_slot_free_e_t),
9748 9748 offsetof(mptsas_slot_free_e_t, node));
9749 9749 mpt->m_slot_freeq_pairp[i].m_slot_allocq.s.m_fq_n = 0;
9750 9750 mpt->m_slot_freeq_pairp[i].m_slot_releq.s.m_fq_n = 0;
9751 9751 mutex_init(&mpt->m_slot_freeq_pairp[i].
9752 9752 m_slot_allocq.s.m_fq_mutex, NULL, MUTEX_DRIVER,
9753 9753 DDI_INTR_PRI(mpt->m_intr_pri));
9754 9754 mutex_init(&mpt->m_slot_freeq_pairp[i].
9755 9755 m_slot_releq.s.m_fq_mutex, NULL, MUTEX_DRIVER,
9756 9756 DDI_INTR_PRI(mpt->m_intr_pri));
9757 9757 }
9758 9758 pe = mpt->m_slot_free_ae = kmem_zalloc(nslot *
9759 9759 sizeof (mptsas_slot_free_e_t), KM_SLEEP);
9760 9760 /*
9761 9761 * An array of Mpi2ReplyDescriptorsUnion_t is defined here.
9762 9762 * We are trying to eliminate the m_mutex in the context
9763 9763 * reply code path in the ISR. Since the read of the
9764 9764 * ReplyDescriptor and update/write of the ReplyIndex must
9765 9765 * be atomic (since the poll thread may also update them at
9766 9766 * the same time) so we first read out of the ReplyDescriptor
9767 9767 * into this array and update the ReplyIndex register with a
9768 9768 * separate mutex m_intr_mutex protected, and then release the
9769 9769 * mutex and process all of them. the length of the array is
9770 9770 * defined as max as 128(128*64=8k), which is
9771 9771 * assumed as the maxmium depth of the interrupt coalese.
9772 9772 */
9773 9773 mpt->m_reply = kmem_zalloc(MPI_ADDRESS_COALSCE_MAX *
9774 9774 sizeof (Mpi2ReplyDescriptorsUnion_t), KM_SLEEP);
9775 9775 for (i = 0; i < nslot; i++, pe++) {
9776 9776 pe->slot = i + 1; /* SMID 0 is reserved */
9777 9777 pe->cpuid = i % mpt->m_slot_freeq_pair_n;
9778 9778 list_insert_tail(&mpt->m_slot_freeq_pairp
9779 9779 [i % mpt->m_slot_freeq_pair_n]
9780 9780 .m_slot_allocq.s.m_fq_list, pe);
9781 9781 mpt->m_slot_freeq_pairp[i % mpt->m_slot_freeq_pair_n]
9782 9782 .m_slot_allocq.s.m_fq_n++;
9783 9783 mpt->m_slot_freeq_pairp[i % mpt->m_slot_freeq_pair_n]
9784 9784 .m_slot_allocq.s.m_fq_n_init++;
9785 9785 }
9786 9786
9787 9787 mpt->m_active = new_active;
9788 9788 rval = 0;
9789 9789
9790 9790 return (rval);
9791 9791 }
9792 9792
9793 9793 static void
9794 9794 mptsas_free_active_slots(mptsas_t *mpt)
9795 9795 {
9796 9796 mptsas_slots_t *active = mpt->m_active;
9797 9797 size_t size;
9798 9798 mptsas_slot_free_e_t *pe;
9799 9799 int i;
9800 9800
9801 9801 if (active == NULL)
9802 9802 return;
9803 9803
9804 9804 if (mpt->m_slot_freeq_pairp) {
9805 9805 for (i = 0; i < mpt->m_slot_freeq_pair_n; i++) {
9806 9806 while ((pe = list_head(&mpt->m_slot_freeq_pairp
9807 9807 [i].m_slot_allocq.s.m_fq_list)) != NULL) {
9808 9808 list_remove(&mpt->m_slot_freeq_pairp[i]
9809 9809 .m_slot_allocq.s.m_fq_list, pe);
9810 9810 }
9811 9811 list_destroy(&mpt->m_slot_freeq_pairp
9812 9812 [i].m_slot_allocq.s.m_fq_list);
9813 9813 while ((pe = list_head(&mpt->m_slot_freeq_pairp
9814 9814 [i].m_slot_releq.s.m_fq_list)) != NULL) {
9815 9815 list_remove(&mpt->m_slot_freeq_pairp[i]
9816 9816 .m_slot_releq.s.m_fq_list, pe);
9817 9817 }
9818 9818 list_destroy(&mpt->m_slot_freeq_pairp
9819 9819 [i].m_slot_releq.s.m_fq_list);
9820 9820 mutex_destroy(&mpt->m_slot_freeq_pairp
9821 9821 [i].m_slot_allocq.s.m_fq_mutex);
9822 9822 mutex_destroy(&mpt->m_slot_freeq_pairp
9823 9823 [i].m_slot_releq.s.m_fq_mutex);
9824 9824 }
9825 9825 kmem_free(mpt->m_slot_freeq_pairp, mpt->m_slot_freeq_pair_n *
9826 9826 sizeof (mptsas_slot_freeq_pair_t));
9827 9827 }
9828 9828 if (mpt->m_slot_free_ae)
9829 9829 kmem_free(mpt->m_slot_free_ae, mpt->m_active->m_n_slots *
9830 9830 sizeof (mptsas_slot_free_e_t));
9831 9831
9832 9832 if (mpt->m_reply)
9833 9833 kmem_free(mpt->m_reply, MPI_ADDRESS_COALSCE_MAX *
9834 9834 sizeof (Mpi2ReplyDescriptorsUnion_t));
9835 9835
9836 9836 size = active->m_size;
9837 9837 kmem_free(active, size);
9838 9838 mpt->m_active = NULL;
9839 9839 }
9840 9840
9841 9841 /*
9842 9842 * Error logging, printing, and debug print routines.
9843 9843 */
9844 9844 static char *mptsas_label = "mpt_sas";
9845 9845
9846 9846 /*PRINTFLIKE3*/
9847 9847 void
9848 9848 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
9849 9849 {
9850 9850 dev_info_t *dev;
9851 9851 va_list ap;
9852 9852
9853 9853 if (mpt) {
9854 9854 dev = mpt->m_dip;
9855 9855 } else {
9856 9856 dev = 0;
9857 9857 }
9858 9858
9859 9859 mutex_enter(&mptsas_log_mutex);
9860 9860
9861 9861 va_start(ap, fmt);
9862 9862 (void) vsprintf(mptsas_log_buf, fmt, ap);
9863 9863 va_end(ap);
9864 9864
9865 9865 if (level == CE_CONT) {
9866 9866 scsi_log(dev, mptsas_label, level, "%s\n", mptsas_log_buf);
9867 9867 } else {
9868 9868 scsi_log(dev, mptsas_label, level, "%s", mptsas_log_buf);
9869 9869 }
9870 9870
9871 9871 mutex_exit(&mptsas_log_mutex);
9872 9872 }
9873 9873
9874 9874 #ifdef MPTSAS_DEBUG
9875 9875 /*PRINTFLIKE1*/
9876 9876 void
9877 9877 mptsas_printf(char *fmt, ...)
9878 9878 {
9879 9879 dev_info_t *dev = 0;
9880 9880 va_list ap;
9881 9881
9882 9882 mutex_enter(&mptsas_log_mutex);
9883 9883
9884 9884 va_start(ap, fmt);
9885 9885 (void) vsprintf(mptsas_log_buf, fmt, ap);
9886 9886 va_end(ap);
9887 9887
9888 9888 #ifdef PROM_PRINTF
9889 9889 prom_printf("%s:\t%s\n", mptsas_label, mptsas_log_buf);
9890 9890 #else
9891 9891 scsi_log(dev, mptsas_label, SCSI_DEBUG, "%s\n", mptsas_log_buf);
9892 9892 #endif
9893 9893 mutex_exit(&mptsas_log_mutex);
9894 9894 }
9895 9895 #endif
9896 9896
9897 9897 /*
9898 9898 * timeout handling
9899 9899 */
9900 9900 static void
9901 9901 mptsas_watch(void *arg)
9902 9902 {
9903 9903 #ifndef __lock_lint
9904 9904 _NOTE(ARGUNUSED(arg))
9905 9905 #endif
9906 9906
9907 9907 mptsas_t *mpt;
9908 9908 uint32_t doorbell;
9909 9909
9910 9910 NDBG30(("mptsas_watch"));
9911 9911
9912 9912 rw_enter(&mptsas_global_rwlock, RW_READER);
9913 9913 for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
9914 9914
9915 9915 mutex_enter(&mpt->m_mutex);
9916 9916
9917 9917 /* Skip device if not powered on */
9918 9918 if (mpt->m_options & MPTSAS_OPT_PM) {
9919 9919 if (mpt->m_power_level == PM_LEVEL_D0) {
9920 9920 (void) pm_busy_component(mpt->m_dip, 0);
9921 9921 mpt->m_busy = 1;
9922 9922 } else {
9923 9923 mutex_exit(&mpt->m_mutex);
9924 9924 continue;
9925 9925 }
9926 9926 }
9927 9927
9928 9928 /*
9929 9929 * Check if controller is in a FAULT state. If so, reset it.
9930 9930 */
9931 9931 doorbell = ddi_get32(mpt->m_datap, &mpt->m_reg->Doorbell);
9932 9932 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
9933 9933 doorbell &= MPI2_DOORBELL_DATA_MASK;
9934 9934 mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
9935 9935 "code: %04x", doorbell);
9936 9936 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
9937 9937 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
9938 9938 mptsas_log(mpt, CE_WARN, "Reset failed"
9939 9939 "after fault was detected");
9940 9940 }
9941 9941 }
9942 9942
9943 9943 /*
9944 9944 * For now, always call mptsas_watchsubr.
9945 9945 */
9946 9946 mptsas_watchsubr(mpt);
9947 9947
9948 9948 if (mpt->m_options & MPTSAS_OPT_PM) {
9949 9949 mpt->m_busy = 0;
9950 9950 (void) pm_idle_component(mpt->m_dip, 0);
9951 9951 }
9952 9952
9953 9953 mutex_exit(&mpt->m_mutex);
9954 9954 }
9955 9955 rw_exit(&mptsas_global_rwlock);
9956 9956
9957 9957 mutex_enter(&mptsas_global_mutex);
9958 9958 if (mptsas_timeouts_enabled)
9959 9959 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
9960 9960 mutex_exit(&mptsas_global_mutex);
9961 9961 }
9962 9962
9963 9963 static void
9964 9964 mptsas_watchsubr(mptsas_t *mpt)
9965 9965 {
9966 9966 int i;
9967 9967 mptsas_cmd_t *cmd;
9968 9968 mptsas_target_t *ptgt = NULL;
9969 9969
9970 9970 NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
9971 9971
9972 9972 #ifdef MPTSAS_TEST
9973 9973 if (mptsas_enable_untagged) {
9974 9974 mptsas_test_untagged++;
9975 9975 }
9976 9976 #endif
9977 9977
9978 9978 /*
9979 9979 * Check for commands stuck in active slot
9980 9980 * Account for TM requests, which use the last SMID.
9981 9981 */
9982 9982 mutex_enter(&mpt->m_intr_mutex);
9983 9983 for (i = 0; i <= mpt->m_active->m_n_slots; i++) {
9984 9984 if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
9985 9985 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
9986 9986 cmd->cmd_active_timeout -=
9987 9987 mptsas_scsi_watchdog_tick;
9988 9988 if (cmd->cmd_active_timeout <= 0) {
9989 9989 /*
9990 9990 * There seems to be a command stuck
9991 9991 * in the active slot. Drain throttle.
9992 9992 */
9993 9993 ptgt = cmd->cmd_tgt_addr;
9994 9994 mutex_enter(&ptgt->m_tgt_intr_mutex);
9995 9995 mptsas_set_throttle(mpt, ptgt,
9996 9996 DRAIN_THROTTLE);
9997 9997 mutex_exit(&ptgt->m_tgt_intr_mutex);
9998 9998 }
9999 9999 }
10000 10000 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
10001 10001 (cmd->cmd_flags & CFLAG_CONFIG) ||
10002 10002 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
10003 10003 cmd->cmd_active_timeout -=
10004 10004 mptsas_scsi_watchdog_tick;
10005 10005 if (cmd->cmd_active_timeout <= 0) {
10006 10006 /*
10007 10007 * passthrough command timeout
10008 10008 */
10009 10009 cmd->cmd_flags |= (CFLAG_FINISHED |
10010 10010 CFLAG_TIMEOUT);
10011 10011 cv_broadcast(&mpt->m_passthru_cv);
10012 10012 cv_broadcast(&mpt->m_config_cv);
10013 10013 cv_broadcast(&mpt->m_fw_diag_cv);
10014 10014 }
10015 10015 }
10016 10016 }
10017 10017 }
10018 10018 mutex_exit(&mpt->m_intr_mutex);
10019 10019
10020 10020 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
10021 10021 MPTSAS_HASH_FIRST);
10022 10022 while (ptgt != NULL) {
10023 10023 /*
10024 10024 * In order to avoid using m_mutex in the key code path in ISR,
10025 10025 * separate mutexs are introduced to protect those elements
10026 10026 * shown in ISR.
10027 10027 */
10028 10028 mutex_enter(&ptgt->m_tgt_intr_mutex);
10029 10029
10030 10030 /*
10031 10031 * If we were draining due to a qfull condition,
10032 10032 * go back to full throttle.
10033 10033 */
10034 10034 if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
10035 10035 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
10036 10036 (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
10037 10037 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10038 10038 mptsas_restart_hba(mpt);
10039 10039 }
10040 10040
10041 10041 if ((ptgt->m_t_ncmds > 0) &&
10042 10042 (ptgt->m_timebase)) {
10043 10043
10044 10044 if (ptgt->m_timebase <=
10045 10045 mptsas_scsi_watchdog_tick) {
10046 10046 ptgt->m_timebase +=
10047 10047 mptsas_scsi_watchdog_tick;
10048 10048 mutex_exit(&ptgt->m_tgt_intr_mutex);
10049 10049 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10050 10050 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10051 10051 continue;
10052 10052 }
10053 10053
10054 10054 ptgt->m_timeout -= mptsas_scsi_watchdog_tick;
10055 10055
10056 10056 if (ptgt->m_timeout < 0) {
10057 10057 mutex_exit(&ptgt->m_tgt_intr_mutex);
10058 10058 mptsas_cmd_timeout(mpt, ptgt->m_devhdl);
10059 10059 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10060 10060 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10061 10061 continue;
10062 10062 }
10063 10063
10064 10064 if ((ptgt->m_timeout) <=
10065 10065 mptsas_scsi_watchdog_tick) {
10066 10066 NDBG23(("pending timeout"));
10067 10067 mptsas_set_throttle(mpt, ptgt,
10068 10068 DRAIN_THROTTLE);
10069 10069 }
10070 10070 }
10071 10071 mutex_exit(&ptgt->m_tgt_intr_mutex);
10072 10072 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10073 10073 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10074 10074 }
10075 10075 }
10076 10076
10077 10077 /*
10078 10078 * timeout recovery
10079 10079 */
10080 10080 static void
10081 10081 mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl)
10082 10082 {
10083 10083
10084 10084 NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
10085 10085 mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
10086 10086 "Target %d", devhdl);
10087 10087
10088 10088 /*
10089 10089 * If the current target is not the target passed in,
10090 10090 * try to reset that target.
10091 10091 */
10092 10092 NDBG29(("mptsas_cmd_timeout: device reset"));
10093 10093 if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
10094 10094 mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
10095 10095 "recovery failed!", devhdl);
10096 10096 }
10097 10097 }
10098 10098
10099 10099 /*
10100 10100 * Device / Hotplug control
10101 10101 */
10102 10102 static int
10103 10103 mptsas_scsi_quiesce(dev_info_t *dip)
10104 10104 {
10105 10105 mptsas_t *mpt;
10106 10106 scsi_hba_tran_t *tran;
10107 10107
10108 10108 tran = ddi_get_driver_private(dip);
10109 10109 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
10110 10110 return (-1);
10111 10111
10112 10112 return (mptsas_quiesce_bus(mpt));
10113 10113 }
10114 10114
10115 10115 static int
10116 10116 mptsas_scsi_unquiesce(dev_info_t *dip)
10117 10117 {
10118 10118 mptsas_t *mpt;
10119 10119 scsi_hba_tran_t *tran;
10120 10120
10121 10121 tran = ddi_get_driver_private(dip);
10122 10122 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
10123 10123 return (-1);
10124 10124
10125 10125 return (mptsas_unquiesce_bus(mpt));
10126 10126 }
10127 10127
10128 10128 static int
10129 10129 mptsas_quiesce_bus(mptsas_t *mpt)
10130 10130 {
10131 10131 mptsas_target_t *ptgt = NULL;
10132 10132
10133 10133 NDBG28(("mptsas_quiesce_bus"));
10134 10134 mutex_enter(&mpt->m_mutex);
10135 10135
10136 10136 /* Set all the throttles to zero */
10137 10137 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
10138 10138 MPTSAS_HASH_FIRST);
10139 10139 while (ptgt != NULL) {
10140 10140 mutex_enter(&ptgt->m_tgt_intr_mutex);
10141 10141 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10142 10142 mutex_exit(&ptgt->m_tgt_intr_mutex);
10143 10143
10144 10144 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10145 10145 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10146 10146 }
10147 10147
10148 10148 /* If there are any outstanding commands in the queue */
10149 10149 mutex_enter(&mpt->m_intr_mutex);
10150 10150 if (mptsas_outstanding_cmds_n(mpt)) {
10151 10151 mutex_exit(&mpt->m_intr_mutex);
10152 10152 mpt->m_softstate |= MPTSAS_SS_DRAINING;
10153 10153 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
10154 10154 mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
10155 10155 if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
10156 10156 /*
10157 10157 * Quiesce has been interrupted
10158 10158 */
10159 10159 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
10160 10160 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10161 10161 &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
10162 10162 while (ptgt != NULL) {
10163 10163 mutex_enter(&ptgt->m_tgt_intr_mutex);
10164 10164 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10165 10165 mutex_exit(&ptgt->m_tgt_intr_mutex);
10166 10166
10167 10167 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10168 10168 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10169 10169 }
10170 10170 mptsas_restart_hba(mpt);
10171 10171 if (mpt->m_quiesce_timeid != 0) {
10172 10172 timeout_id_t tid = mpt->m_quiesce_timeid;
10173 10173 mpt->m_quiesce_timeid = 0;
10174 10174 mutex_exit(&mpt->m_mutex);
10175 10175 (void) untimeout(tid);
10176 10176 return (-1);
10177 10177 }
10178 10178 mutex_exit(&mpt->m_mutex);
10179 10179 return (-1);
10180 10180 } else {
10181 10181 /* Bus has been quiesced */
10182 10182 ASSERT(mpt->m_quiesce_timeid == 0);
10183 10183 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
10184 10184 mpt->m_softstate |= MPTSAS_SS_QUIESCED;
10185 10185 mutex_exit(&mpt->m_mutex);
10186 10186 return (0);
10187 10187 }
10188 10188 }
10189 10189 mutex_exit(&mpt->m_intr_mutex);
10190 10190 /* Bus was not busy - QUIESCED */
10191 10191 mutex_exit(&mpt->m_mutex);
10192 10192
10193 10193 return (0);
10194 10194 }
10195 10195
10196 10196 static int
10197 10197 mptsas_unquiesce_bus(mptsas_t *mpt)
10198 10198 {
10199 10199 mptsas_target_t *ptgt = NULL;
10200 10200
10201 10201 NDBG28(("mptsas_unquiesce_bus"));
10202 10202 mutex_enter(&mpt->m_mutex);
10203 10203 mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
10204 10204 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
10205 10205 MPTSAS_HASH_FIRST);
10206 10206 while (ptgt != NULL) {
10207 10207 mutex_enter(&ptgt->m_tgt_intr_mutex);
10208 10208 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10209 10209 mutex_exit(&ptgt->m_tgt_intr_mutex);
10210 10210
10211 10211 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10212 10212 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10213 10213 }
10214 10214 mptsas_restart_hba(mpt);
10215 10215 mutex_exit(&mpt->m_mutex);
10216 10216 return (0);
10217 10217 }
10218 10218
10219 10219 static void
10220 10220 mptsas_ncmds_checkdrain(void *arg)
10221 10221 {
10222 10222 mptsas_t *mpt = arg;
10223 10223 mptsas_target_t *ptgt = NULL;
10224 10224
10225 10225 mutex_enter(&mpt->m_mutex);
10226 10226 if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
10227 10227 mpt->m_quiesce_timeid = 0;
10228 10228 mutex_enter(&mpt->m_intr_mutex);
10229 10229 if (mptsas_outstanding_cmds_n(mpt)) {
10230 10230 mutex_exit(&mpt->m_intr_mutex);
10231 10231 /*
10232 10232 * The throttle may have been reset because
10233 10233 * of a SCSI bus reset
10234 10234 */
10235 10235 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10236 10236 &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
10237 10237 while (ptgt != NULL) {
10238 10238 mutex_enter(&ptgt->m_tgt_intr_mutex);
10239 10239 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10240 10240 mutex_exit(&ptgt->m_tgt_intr_mutex);
10241 10241
10242 10242 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10243 10243 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10244 10244 }
10245 10245
10246 10246 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
10247 10247 mpt, (MPTSAS_QUIESCE_TIMEOUT *
10248 10248 drv_usectohz(1000000)));
10249 10249 } else {
10250 10250 mutex_exit(&mpt->m_intr_mutex);
10251 10251 /* Command queue has been drained */
10252 10252 cv_signal(&mpt->m_cv);
10253 10253 }
10254 10254 }
10255 10255 mutex_exit(&mpt->m_mutex);
10256 10256 }
10257 10257
10258 10258 /*ARGSUSED*/
10259 10259 static void
10260 10260 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
10261 10261 {
10262 10262 int i;
10263 10263 uint8_t *cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
10264 10264 char buf[128];
10265 10265
10266 10266 buf[0] = '\0';
10267 10267 NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd,
10268 10268 Tgt(cmd), Lun(cmd)));
10269 10269 (void) sprintf(&buf[0], "\tcdb=[");
10270 10270 for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
10271 10271 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
10272 10272 }
10273 10273 (void) sprintf(&buf[strlen(buf)], " ]");
10274 10274 NDBG25(("?%s\n", buf));
10275 10275 NDBG25(("?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
10276 10276 cmd->cmd_pkt->pkt_flags, cmd->cmd_pkt->pkt_statistics,
10277 10277 cmd->cmd_pkt->pkt_state));
10278 10278 NDBG25(("?pkt_scbp=0x%x cmd_flags=0x%x\n", cmd->cmd_pkt->pkt_scbp ?
10279 10279 *(cmd->cmd_pkt->pkt_scbp) : 0, cmd->cmd_flags));
10280 10280 }
10281 10281
10282 10282 static void
10283 10283 mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd)
10284 10284 {
10285 10285 caddr_t memp;
10286 10286 pMPI2RequestHeader_t request_hdrp;
10287 10287 struct scsi_pkt *pkt = cmd->cmd_pkt;
10288 10288 mptsas_pt_request_t *pt = pkt->pkt_ha_private;
10289 10289 uint32_t request_size, data_size, dataout_size;
10290 10290 uint32_t direction;
10291 10291 ddi_dma_cookie_t data_cookie;
10292 10292 ddi_dma_cookie_t dataout_cookie;
10293 10293 uint32_t request_desc_low, request_desc_high = 0;
10294 10294 uint32_t i, sense_bufp;
10295 10295 uint8_t desc_type;
10296 10296 uint8_t *request, function;
10297 10297 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
10298 10298 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
10299 10299
10300 10300 desc_type = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10301 10301
10302 10302 request = pt->request;
10303 10303 direction = pt->direction;
10304 10304 request_size = pt->request_size;
10305 10305 data_size = pt->data_size;
10306 10306 dataout_size = pt->dataout_size;
10307 10307 data_cookie = pt->data_cookie;
10308 10308 dataout_cookie = pt->dataout_cookie;
10309 10309
10310 10310 /*
10311 10311 * Store the passthrough message in memory location
10312 10312 * corresponding to our slot number
10313 10313 */
10314 10314 memp = mpt->m_req_frame + (mpt->m_req_frame_size * cmd->cmd_slot);
10315 10315 request_hdrp = (pMPI2RequestHeader_t)memp;
10316 10316 bzero(memp, mpt->m_req_frame_size);
10317 10317
10318 10318 for (i = 0; i < request_size; i++) {
10319 10319 bcopy(request + i, memp + i, 1);
10320 10320 }
10321 10321
10322 10322 if (data_size || dataout_size) {
10323 10323 pMpi2SGESimple64_t sgep;
10324 10324 uint32_t sge_flags;
10325 10325
10326 10326 sgep = (pMpi2SGESimple64_t)((uint8_t *)request_hdrp +
10327 10327 request_size);
10328 10328 if (dataout_size) {
10329 10329
10330 10330 sge_flags = dataout_size |
10331 10331 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10332 10332 MPI2_SGE_FLAGS_END_OF_BUFFER |
10333 10333 MPI2_SGE_FLAGS_HOST_TO_IOC |
10334 10334 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10335 10335 MPI2_SGE_FLAGS_SHIFT);
10336 10336 ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
10337 10337 ddi_put32(acc_hdl, &sgep->Address.Low,
10338 10338 (uint32_t)(dataout_cookie.dmac_laddress &
10339 10339 0xffffffffull));
10340 10340 ddi_put32(acc_hdl, &sgep->Address.High,
10341 10341 (uint32_t)(dataout_cookie.dmac_laddress
10342 10342 >> 32));
10343 10343 sgep++;
10344 10344 }
10345 10345 sge_flags = data_size;
10346 10346 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10347 10347 MPI2_SGE_FLAGS_LAST_ELEMENT |
10348 10348 MPI2_SGE_FLAGS_END_OF_BUFFER |
10349 10349 MPI2_SGE_FLAGS_END_OF_LIST |
10350 10350 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10351 10351 MPI2_SGE_FLAGS_SHIFT);
10352 10352 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10353 10353 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
10354 10354 MPI2_SGE_FLAGS_SHIFT);
10355 10355 } else {
10356 10356 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
10357 10357 MPI2_SGE_FLAGS_SHIFT);
10358 10358 }
10359 10359 ddi_put32(acc_hdl, &sgep->FlagsLength,
10360 10360 sge_flags);
10361 10361 ddi_put32(acc_hdl, &sgep->Address.Low,
10362 10362 (uint32_t)(data_cookie.dmac_laddress &
10363 10363 0xffffffffull));
10364 10364 ddi_put32(acc_hdl, &sgep->Address.High,
10365 10365 (uint32_t)(data_cookie.dmac_laddress >> 32));
10366 10366 }
10367 10367
10368 10368 function = request_hdrp->Function;
10369 10369 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10370 10370 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10371 10371 pMpi2SCSIIORequest_t scsi_io_req;
10372 10372
10373 10373 scsi_io_req = (pMpi2SCSIIORequest_t)request_hdrp;
10374 10374 /*
10375 10375 * Put SGE for data and data_out buffer at the end of
10376 10376 * scsi_io_request message header.(64 bytes in total)
10377 10377 * Following above SGEs, the residual space will be
10378 10378 * used by sense data.
10379 10379 */
10380 10380 ddi_put8(acc_hdl,
10381 10381 &scsi_io_req->SenseBufferLength,
10382 10382 (uint8_t)(request_size - 64));
10383 10383
10384 10384 sense_bufp = mpt->m_req_frame_dma_addr +
10385 10385 (mpt->m_req_frame_size * cmd->cmd_slot);
10386 10386 sense_bufp += 64;
10387 10387 ddi_put32(acc_hdl,
10388 10388 &scsi_io_req->SenseBufferLowAddress, sense_bufp);
10389 10389
10390 10390 /*
10391 10391 * Set SGLOffset0 value
10392 10392 */
10393 10393 ddi_put8(acc_hdl, &scsi_io_req->SGLOffset0,
10394 10394 offsetof(MPI2_SCSI_IO_REQUEST, SGL) / 4);
10395 10395
10396 10396 /*
10397 10397 * Setup descriptor info. RAID passthrough must use the
10398 10398 * default request descriptor which is already set, so if this
10399 10399 * is a SCSI IO request, change the descriptor to SCSI IO.
10400 10400 */
10401 10401 if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
10402 10402 desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
10403 10403 request_desc_high = (ddi_get16(acc_hdl,
10404 10404 &scsi_io_req->DevHandle) << 16);
10405 10405 }
10406 10406 }
10407 10407
10408 10408 /*
10409 10409 * We must wait till the message has been completed before
10410 10410 * beginning the next message so we wait for this one to
10411 10411 * finish.
10412 10412 */
10413 10413 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
10414 10414 request_desc_low = (cmd->cmd_slot << 16) + desc_type;
10415 10415 cmd->cmd_rfm = NULL;
10416 10416 mpt->m_active->m_slot[cmd->cmd_slot] = cmd;
10417 10417 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
10418 10418 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
10419 10419 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
10420 10420 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10421 10421 }
10422 10422 }
10423 10423
10424 10424
10425 10425
10426 10426 static int
10427 10427 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
10428 10428 uint8_t *data, uint32_t request_size, uint32_t reply_size,
10429 10429 uint32_t data_size, uint32_t direction, uint8_t *dataout,
10430 10430 uint32_t dataout_size, short timeout, int mode)
10431 10431 {
10432 10432 mptsas_pt_request_t pt;
10433 10433 mptsas_dma_alloc_state_t data_dma_state;
10434 10434 mptsas_dma_alloc_state_t dataout_dma_state;
10435 10435 caddr_t memp;
10436 10436 mptsas_cmd_t *cmd = NULL;
10437 10437 struct scsi_pkt *pkt;
10438 10438 uint32_t reply_len = 0, sense_len = 0;
10439 10439 pMPI2RequestHeader_t request_hdrp;
10440 10440 pMPI2RequestHeader_t request_msg;
10441 10441 pMPI2DefaultReply_t reply_msg;
10442 10442 Mpi2SCSIIOReply_t rep_msg;
10443 10443 int i, status = 0, pt_flags = 0, rv = 0;
10444 10444 int rvalue;
10445 10445 uint8_t function;
10446 10446
10447 10447 ASSERT(mutex_owned(&mpt->m_mutex));
10448 10448
10449 10449 reply_msg = (pMPI2DefaultReply_t)(&rep_msg);
10450 10450 bzero(reply_msg, sizeof (MPI2_DEFAULT_REPLY));
10451 10451 request_msg = kmem_zalloc(request_size, KM_SLEEP);
10452 10452
10453 10453 mutex_exit(&mpt->m_mutex);
10454 10454 /*
10455 10455 * copy in the request buffer since it could be used by
10456 10456 * another thread when the pt request into waitq
10457 10457 */
10458 10458 if (ddi_copyin(request, request_msg, request_size, mode)) {
10459 10459 mutex_enter(&mpt->m_mutex);
10460 10460 status = EFAULT;
10461 10461 mptsas_log(mpt, CE_WARN, "failed to copy request data");
10462 10462 goto out;
10463 10463 }
10464 10464 mutex_enter(&mpt->m_mutex);
10465 10465
10466 10466 function = request_msg->Function;
10467 10467 if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
10468 10468 pMpi2SCSITaskManagementRequest_t task;
10469 10469 task = (pMpi2SCSITaskManagementRequest_t)request_msg;
10470 10470 mptsas_setup_bus_reset_delay(mpt);
10471 10471 rv = mptsas_ioc_task_management(mpt, task->TaskType,
10472 10472 task->DevHandle, (int)task->LUN[1], reply, reply_size,
10473 10473 mode);
10474 10474
10475 10475 if (rv != TRUE) {
10476 10476 status = EIO;
10477 10477 mptsas_log(mpt, CE_WARN, "task management failed");
10478 10478 }
10479 10479 goto out;
10480 10480 }
10481 10481
10482 10482 if (data_size != 0) {
10483 10483 data_dma_state.size = data_size;
10484 10484 if (mptsas_dma_alloc(mpt, &data_dma_state) != DDI_SUCCESS) {
10485 10485 status = ENOMEM;
10486 10486 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
10487 10487 "resource");
10488 10488 goto out;
10489 10489 }
10490 10490 pt_flags |= MPTSAS_DATA_ALLOCATED;
10491 10491 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10492 10492 mutex_exit(&mpt->m_mutex);
10493 10493 for (i = 0; i < data_size; i++) {
10494 10494 if (ddi_copyin(data + i, (uint8_t *)
10495 10495 data_dma_state.memp + i, 1, mode)) {
10496 10496 mutex_enter(&mpt->m_mutex);
10497 10497 status = EFAULT;
10498 10498 mptsas_log(mpt, CE_WARN, "failed to "
10499 10499 "copy read data");
10500 10500 goto out;
10501 10501 }
10502 10502 }
10503 10503 mutex_enter(&mpt->m_mutex);
10504 10504 }
10505 10505 }
10506 10506
10507 10507 if (dataout_size != 0) {
10508 10508 dataout_dma_state.size = dataout_size;
10509 10509 if (mptsas_dma_alloc(mpt, &dataout_dma_state) != DDI_SUCCESS) {
10510 10510 status = ENOMEM;
10511 10511 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
10512 10512 "resource");
10513 10513 goto out;
10514 10514 }
10515 10515 pt_flags |= MPTSAS_DATAOUT_ALLOCATED;
10516 10516 mutex_exit(&mpt->m_mutex);
10517 10517 for (i = 0; i < dataout_size; i++) {
10518 10518 if (ddi_copyin(dataout + i, (uint8_t *)
10519 10519 dataout_dma_state.memp + i, 1, mode)) {
10520 10520 mutex_enter(&mpt->m_mutex);
10521 10521 mptsas_log(mpt, CE_WARN, "failed to copy out"
10522 10522 " data");
10523 10523 status = EFAULT;
10524 10524 goto out;
10525 10525 }
10526 10526 }
10527 10527 mutex_enter(&mpt->m_mutex);
10528 10528 }
10529 10529
10530 10530 if ((rvalue = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10531 10531 status = EAGAIN;
10532 10532 mptsas_log(mpt, CE_NOTE, "event ack command pool is full");
10533 10533 goto out;
10534 10534 }
10535 10535 pt_flags |= MPTSAS_REQUEST_POOL_CMD;
10536 10536
10537 10537 bzero((caddr_t)cmd, sizeof (*cmd));
10538 10538 bzero((caddr_t)pkt, scsi_pkt_size());
10539 10539 bzero((caddr_t)&pt, sizeof (pt));
10540 10540
10541 10541 cmd->ioc_cmd_slot = (uint32_t)(rvalue);
10542 10542
10543 10543 pt.request = (uint8_t *)request_msg;
10544 10544 pt.direction = direction;
10545 10545 pt.request_size = request_size;
10546 10546 pt.data_size = data_size;
10547 10547 pt.dataout_size = dataout_size;
10548 10548 pt.data_cookie = data_dma_state.cookie;
10549 10549 pt.dataout_cookie = dataout_dma_state.cookie;
10550 10550
10551 10551 /*
10552 10552 * Form a blank cmd/pkt to store the acknowledgement message
10553 10553 */
10554 10554 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb[0];
10555 10555 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
10556 10556 pkt->pkt_ha_private = (opaque_t)&pt;
10557 10557 pkt->pkt_flags = FLAG_HEAD;
10558 10558 pkt->pkt_time = timeout;
10559 10559 cmd->cmd_pkt = pkt;
10560 10560 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_PASSTHRU;
10561 10561
10562 10562 /*
10563 10563 * Save the command in a slot
10564 10564 */
10565 10565 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10566 10566 /*
10567 10567 * Once passthru command get slot, set cmd_flags
10568 10568 * CFLAG_PREPARED.
10569 10569 */
10570 10570 cmd->cmd_flags |= CFLAG_PREPARED;
10571 10571 mptsas_start_passthru(mpt, cmd);
10572 10572 } else {
10573 10573 mptsas_waitq_add(mpt, cmd);
10574 10574 }
10575 10575
10576 10576 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10577 10577 cv_wait(&mpt->m_passthru_cv, &mpt->m_mutex);
10578 10578 }
10579 10579
10580 10580 if (cmd->cmd_flags & CFLAG_PREPARED) {
10581 10581 memp = mpt->m_req_frame + (mpt->m_req_frame_size *
10582 10582 cmd->cmd_slot);
10583 10583 request_hdrp = (pMPI2RequestHeader_t)memp;
10584 10584 }
10585 10585
10586 10586 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10587 10587 status = ETIMEDOUT;
10588 10588 mptsas_log(mpt, CE_WARN, "passthrough command timeout");
10589 10589 pt_flags |= MPTSAS_CMD_TIMEOUT;
10590 10590 goto out;
10591 10591 }
10592 10592
10593 10593 if (cmd->cmd_rfm) {
10594 10594 /*
10595 10595 * cmd_rfm is zero means the command reply is a CONTEXT
10596 10596 * reply and no PCI Write to post the free reply SMFA
10597 10597 * because no reply message frame is used.
10598 10598 * cmd_rfm is non-zero means the reply is a ADDRESS
10599 10599 * reply and reply message frame is used.
10600 10600 */
10601 10601 pt_flags |= MPTSAS_ADDRESS_REPLY;
10602 10602 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10603 10603 DDI_DMA_SYNC_FORCPU);
10604 10604 reply_msg = (pMPI2DefaultReply_t)
10605 10605 (mpt->m_reply_frame + (cmd->cmd_rfm -
10606 10606 mpt->m_reply_frame_dma_addr));
10607 10607 }
10608 10608
10609 10609 mptsas_fma_check(mpt, cmd);
10610 10610 if (pkt->pkt_reason == CMD_TRAN_ERR) {
10611 10611 status = EAGAIN;
10612 10612 mptsas_log(mpt, CE_WARN, "passthru fma error");
10613 10613 goto out;
10614 10614 }
10615 10615 if (pkt->pkt_reason == CMD_RESET) {
10616 10616 status = EAGAIN;
10617 10617 mptsas_log(mpt, CE_WARN, "ioc reset abort passthru");
10618 10618 goto out;
10619 10619 }
10620 10620
10621 10621 if (pkt->pkt_reason == CMD_INCOMPLETE) {
10622 10622 status = EIO;
10623 10623 mptsas_log(mpt, CE_WARN, "passthrough command incomplete");
10624 10624 goto out;
10625 10625 }
10626 10626
10627 10627 mutex_exit(&mpt->m_mutex);
10628 10628 if (cmd->cmd_flags & CFLAG_PREPARED) {
10629 10629 function = request_hdrp->Function;
10630 10630 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10631 10631 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10632 10632 reply_len = sizeof (MPI2_SCSI_IO_REPLY);
10633 10633 sense_len = reply_size - reply_len;
10634 10634 } else {
10635 10635 reply_len = reply_size;
10636 10636 sense_len = 0;
10637 10637 }
10638 10638
10639 10639 for (i = 0; i < reply_len; i++) {
10640 10640 if (ddi_copyout((uint8_t *)reply_msg + i, reply + i, 1,
10641 10641 mode)) {
10642 10642 mutex_enter(&mpt->m_mutex);
10643 10643 status = EFAULT;
10644 10644 mptsas_log(mpt, CE_WARN, "failed to copy out "
10645 10645 "reply data");
10646 10646 goto out;
10647 10647 }
10648 10648 }
10649 10649 for (i = 0; i < sense_len; i++) {
10650 10650 if (ddi_copyout((uint8_t *)request_hdrp + 64 + i,
10651 10651 reply + reply_len + i, 1, mode)) {
10652 10652 mutex_enter(&mpt->m_mutex);
10653 10653 status = EFAULT;
10654 10654 mptsas_log(mpt, CE_WARN, "failed to copy out "
10655 10655 "sense data");
10656 10656 goto out;
10657 10657 }
10658 10658 }
10659 10659 }
10660 10660
10661 10661 if (data_size) {
10662 10662 if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10663 10663 (void) ddi_dma_sync(data_dma_state.handle, 0, 0,
10664 10664 DDI_DMA_SYNC_FORCPU);
10665 10665 for (i = 0; i < data_size; i++) {
10666 10666 if (ddi_copyout((uint8_t *)(
10667 10667 data_dma_state.memp + i), data + i, 1,
10668 10668 mode)) {
10669 10669 mutex_enter(&mpt->m_mutex);
10670 10670 status = EFAULT;
10671 10671 mptsas_log(mpt, CE_WARN, "failed to "
10672 10672 "copy out the reply data");
10673 10673 goto out;
10674 10674 }
10675 10675 }
10676 10676 }
10677 10677 }
10678 10678 mutex_enter(&mpt->m_mutex);
10679 10679 out:
10680 10680 /*
10681 10681 * Put the reply frame back on the free queue, increment the free
10682 10682 * index, and write the new index to the free index register. But only
10683 10683 * if this reply is an ADDRESS reply.
10684 10684 */
10685 10685 if (pt_flags & MPTSAS_ADDRESS_REPLY) {
10686 10686 ddi_put32(mpt->m_acc_free_queue_hdl,
10687 10687 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10688 10688 cmd->cmd_rfm);
10689 10689 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10690 10690 DDI_DMA_SYNC_FORDEV);
10691 10691 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10692 10692 mpt->m_free_index = 0;
10693 10693 }
10694 10694 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10695 10695 mpt->m_free_index);
10696 10696 }
10697 10697 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10698 10698 mptsas_remove_cmd(mpt, cmd);
10699 10699 pt_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10700 10700 }
10701 10701 if (pt_flags & MPTSAS_REQUEST_POOL_CMD)
10702 10702 mptsas_return_to_pool(mpt, cmd);
10703 10703 if (pt_flags & MPTSAS_DATA_ALLOCATED) {
10704 10704 if (mptsas_check_dma_handle(data_dma_state.handle) !=
10705 10705 DDI_SUCCESS) {
10706 10706 ddi_fm_service_impact(mpt->m_dip,
10707 10707 DDI_SERVICE_UNAFFECTED);
10708 10708 status = EFAULT;
10709 10709 }
10710 10710 mptsas_dma_free(&data_dma_state);
10711 10711 }
10712 10712 if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
10713 10713 if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
10714 10714 DDI_SUCCESS) {
10715 10715 ddi_fm_service_impact(mpt->m_dip,
10716 10716 DDI_SERVICE_UNAFFECTED);
10717 10717 status = EFAULT;
10718 10718 }
10719 10719 mptsas_dma_free(&dataout_dma_state);
10720 10720 }
10721 10721 if (pt_flags & MPTSAS_CMD_TIMEOUT) {
10722 10722 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
10723 10723 mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
10724 10724 }
10725 10725 }
10726 10726 if (request_msg)
10727 10727 kmem_free(request_msg, request_size);
10728 10728
10729 10729 return (status);
10730 10730 }
10731 10731
10732 10732 static int
10733 10733 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
10734 10734 {
10735 10735 /*
10736 10736 * If timeout is 0, set timeout to default of 60 seconds.
10737 10737 */
10738 10738 if (data->Timeout == 0) {
10739 10739 data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
10740 10740 }
10741 10741
10742 10742 if (((data->DataSize == 0) &&
10743 10743 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_NONE)) ||
10744 10744 ((data->DataSize != 0) &&
10745 10745 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_READ) ||
10746 10746 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_WRITE) ||
10747 10747 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) &&
10748 10748 (data->DataOutSize != 0))))) {
10749 10749 if (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) {
10750 10750 data->DataDirection = MPTSAS_PASS_THRU_DIRECTION_READ;
10751 10751 } else {
10752 10752 data->DataOutSize = 0;
10753 10753 }
10754 10754 /*
10755 10755 * Send passthru request messages
10756 10756 */
10757 10757 return (mptsas_do_passthru(mpt,
10758 10758 (uint8_t *)((uintptr_t)data->PtrRequest),
10759 10759 (uint8_t *)((uintptr_t)data->PtrReply),
10760 10760 (uint8_t *)((uintptr_t)data->PtrData),
10761 10761 data->RequestSize, data->ReplySize,
10762 10762 data->DataSize, data->DataDirection,
10763 10763 (uint8_t *)((uintptr_t)data->PtrDataOut),
10764 10764 data->DataOutSize, data->Timeout, mode));
10765 10765 } else {
10766 10766 return (EINVAL);
10767 10767 }
10768 10768 }
10769 10769
10770 10770 static uint8_t
10771 10771 mptsas_get_fw_diag_buffer_number(mptsas_t *mpt, uint32_t unique_id)
10772 10772 {
10773 10773 uint8_t index;
10774 10774
10775 10775 for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
10776 10776 if (mpt->m_fw_diag_buffer_list[index].unique_id == unique_id) {
10777 10777 return (index);
10778 10778 }
10779 10779 }
10780 10780
10781 10781 return (MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND);
10782 10782 }
10783 10783
10784 10784 static void
10785 10785 mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd)
10786 10786 {
10787 10787 pMpi2DiagBufferPostRequest_t pDiag_post_msg;
10788 10788 pMpi2DiagReleaseRequest_t pDiag_release_msg;
10789 10789 struct scsi_pkt *pkt = cmd->cmd_pkt;
10790 10790 mptsas_diag_request_t *diag = pkt->pkt_ha_private;
10791 10791 uint32_t request_desc_low, i;
10792 10792
10793 10793 ASSERT(mutex_owned(&mpt->m_mutex));
10794 10794
10795 10795 /*
10796 10796 * Form the diag message depending on the post or release function.
10797 10797 */
10798 10798 if (diag->function == MPI2_FUNCTION_DIAG_BUFFER_POST) {
10799 10799 pDiag_post_msg = (pMpi2DiagBufferPostRequest_t)
10800 10800 (mpt->m_req_frame + (mpt->m_req_frame_size *
10801 10801 cmd->cmd_slot));
10802 10802 bzero(pDiag_post_msg, mpt->m_req_frame_size);
10803 10803 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->Function,
10804 10804 diag->function);
10805 10805 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->BufferType,
10806 10806 diag->pBuffer->buffer_type);
10807 10807 ddi_put8(mpt->m_acc_req_frame_hdl,
10808 10808 &pDiag_post_msg->ExtendedType,
10809 10809 diag->pBuffer->extended_type);
10810 10810 ddi_put32(mpt->m_acc_req_frame_hdl,
10811 10811 &pDiag_post_msg->BufferLength,
10812 10812 diag->pBuffer->buffer_data.size);
10813 10813 for (i = 0; i < (sizeof (pDiag_post_msg->ProductSpecific) / 4);
10814 10814 i++) {
10815 10815 ddi_put32(mpt->m_acc_req_frame_hdl,
10816 10816 &pDiag_post_msg->ProductSpecific[i],
10817 10817 diag->pBuffer->product_specific[i]);
10818 10818 }
10819 10819 ddi_put32(mpt->m_acc_req_frame_hdl,
10820 10820 &pDiag_post_msg->BufferAddress.Low,
10821 10821 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
10822 10822 & 0xffffffffull));
10823 10823 ddi_put32(mpt->m_acc_req_frame_hdl,
10824 10824 &pDiag_post_msg->BufferAddress.High,
10825 10825 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
10826 10826 >> 32));
10827 10827 } else {
10828 10828 pDiag_release_msg = (pMpi2DiagReleaseRequest_t)
10829 10829 (mpt->m_req_frame + (mpt->m_req_frame_size *
10830 10830 cmd->cmd_slot));
10831 10831 bzero(pDiag_release_msg, mpt->m_req_frame_size);
10832 10832 ddi_put8(mpt->m_acc_req_frame_hdl,
10833 10833 &pDiag_release_msg->Function, diag->function);
10834 10834 ddi_put8(mpt->m_acc_req_frame_hdl,
10835 10835 &pDiag_release_msg->BufferType,
10836 10836 diag->pBuffer->buffer_type);
10837 10837 }
10838 10838
10839 10839 /*
10840 10840 * Send the message
10841 10841 */
10842 10842 (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
10843 10843 DDI_DMA_SYNC_FORDEV);
10844 10844 request_desc_low = (cmd->cmd_slot << 16) +
10845 10845 MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10846 10846 cmd->cmd_rfm = NULL;
10847 10847 mpt->m_active->m_slot[cmd->cmd_slot] = cmd;
10848 10848 MPTSAS_START_CMD(mpt, request_desc_low, 0);
10849 10849 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
10850 10850 DDI_SUCCESS) ||
10851 10851 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
10852 10852 DDI_SUCCESS)) {
10853 10853 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10854 10854 }
10855 10855 }
10856 10856
10857 10857 static int
10858 10858 mptsas_post_fw_diag_buffer(mptsas_t *mpt,
10859 10859 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
10860 10860 {
10861 10861 mptsas_diag_request_t diag;
10862 10862 int status, slot_num, post_flags = 0;
10863 10863 mptsas_cmd_t *cmd = NULL;
10864 10864 struct scsi_pkt *pkt;
10865 10865 pMpi2DiagBufferPostReply_t reply;
10866 10866 uint16_t iocstatus;
10867 10867 uint32_t iocloginfo, transfer_length;
10868 10868
10869 10869 /*
10870 10870 * If buffer is not enabled, just leave.
10871 10871 */
10872 10872 *return_code = MPTSAS_FW_DIAG_ERROR_POST_FAILED;
10873 10873 if (!pBuffer->enabled) {
10874 10874 status = DDI_FAILURE;
10875 10875 goto out;
10876 10876 }
10877 10877
10878 10878 /*
10879 10879 * Clear some flags initially.
10880 10880 */
10881 10881 pBuffer->force_release = FALSE;
10882 10882 pBuffer->valid_data = FALSE;
10883 10883 pBuffer->owned_by_firmware = FALSE;
10884 10884
10885 10885 /*
10886 10886 * Get a cmd buffer from the cmd buffer pool
10887 10887 */
10888 10888 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10889 10889 status = DDI_FAILURE;
10890 10890 mptsas_log(mpt, CE_NOTE, "command pool is full: Post FW Diag");
10891 10891 goto out;
10892 10892 }
10893 10893 post_flags |= MPTSAS_REQUEST_POOL_CMD;
10894 10894
10895 10895 bzero((caddr_t)cmd, sizeof (*cmd));
10896 10896 bzero((caddr_t)pkt, scsi_pkt_size());
10897 10897
10898 10898 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
10899 10899
10900 10900 diag.pBuffer = pBuffer;
10901 10901 diag.function = MPI2_FUNCTION_DIAG_BUFFER_POST;
10902 10902
10903 10903 /*
10904 10904 * Form a blank cmd/pkt to store the acknowledgement message
10905 10905 */
10906 10906 pkt->pkt_ha_private = (opaque_t)&diag;
10907 10907 pkt->pkt_flags = FLAG_HEAD;
10908 10908 pkt->pkt_time = 60;
10909 10909 cmd->cmd_pkt = pkt;
10910 10910 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
10911 10911
10912 10912 /*
10913 10913 * Save the command in a slot
10914 10914 */
10915 10915 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10916 10916 /*
10917 10917 * Once passthru command get slot, set cmd_flags
10918 10918 * CFLAG_PREPARED.
10919 10919 */
10920 10920 cmd->cmd_flags |= CFLAG_PREPARED;
10921 10921 mptsas_start_diag(mpt, cmd);
10922 10922 } else {
10923 10923 mptsas_waitq_add(mpt, cmd);
10924 10924 }
10925 10925
10926 10926 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10927 10927 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
10928 10928 }
10929 10929
10930 10930 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10931 10931 status = DDI_FAILURE;
10932 10932 mptsas_log(mpt, CE_WARN, "Post FW Diag command timeout");
10933 10933 goto out;
10934 10934 }
10935 10935
10936 10936 /*
10937 10937 * cmd_rfm points to the reply message if a reply was given. Check the
10938 10938 * IOCStatus to make sure everything went OK with the FW diag request
10939 10939 * and set buffer flags.
10940 10940 */
10941 10941 if (cmd->cmd_rfm) {
10942 10942 post_flags |= MPTSAS_ADDRESS_REPLY;
10943 10943 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10944 10944 DDI_DMA_SYNC_FORCPU);
10945 10945 reply = (pMpi2DiagBufferPostReply_t)(mpt->m_reply_frame +
10946 10946 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
10947 10947
10948 10948 /*
10949 10949 * Get the reply message data
10950 10950 */
10951 10951 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
10952 10952 &reply->IOCStatus);
10953 10953 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
10954 10954 &reply->IOCLogInfo);
10955 10955 transfer_length = ddi_get32(mpt->m_acc_reply_frame_hdl,
10956 10956 &reply->TransferLength);
10957 10957
10958 10958 /*
10959 10959 * If post failed quit.
10960 10960 */
10961 10961 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
10962 10962 status = DDI_FAILURE;
10963 10963 NDBG13(("post FW Diag Buffer failed: IOCStatus=0x%x, "
10964 10964 "IOCLogInfo=0x%x, TransferLength=0x%x", iocstatus,
10965 10965 iocloginfo, transfer_length));
10966 10966 goto out;
10967 10967 }
10968 10968
10969 10969 /*
10970 10970 * Post was successful.
10971 10971 */
10972 10972 pBuffer->valid_data = TRUE;
10973 10973 pBuffer->owned_by_firmware = TRUE;
10974 10974 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10975 10975 status = DDI_SUCCESS;
10976 10976 }
10977 10977
10978 10978 out:
10979 10979 /*
10980 10980 * Put the reply frame back on the free queue, increment the free
10981 10981 * index, and write the new index to the free index register. But only
10982 10982 * if this reply is an ADDRESS reply.
10983 10983 */
10984 10984 if (post_flags & MPTSAS_ADDRESS_REPLY) {
10985 10985 ddi_put32(mpt->m_acc_free_queue_hdl,
10986 10986 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10987 10987 cmd->cmd_rfm);
10988 10988 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10989 10989 DDI_DMA_SYNC_FORDEV);
10990 10990 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10991 10991 mpt->m_free_index = 0;
10992 10992 }
10993 10993 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10994 10994 mpt->m_free_index);
10995 10995 }
10996 10996 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10997 10997 mptsas_remove_cmd(mpt, cmd);
10998 10998 post_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10999 10999 }
11000 11000 if (post_flags & MPTSAS_REQUEST_POOL_CMD) {
11001 11001 mptsas_return_to_pool(mpt, cmd);
11002 11002 }
11003 11003
11004 11004 return (status);
11005 11005 }
11006 11006
11007 11007 static int
11008 11008 mptsas_release_fw_diag_buffer(mptsas_t *mpt,
11009 11009 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
11010 11010 uint32_t diag_type)
11011 11011 {
11012 11012 mptsas_diag_request_t diag;
11013 11013 int status, slot_num, rel_flags = 0;
11014 11014 mptsas_cmd_t *cmd = NULL;
11015 11015 struct scsi_pkt *pkt;
11016 11016 pMpi2DiagReleaseReply_t reply;
11017 11017 uint16_t iocstatus;
11018 11018 uint32_t iocloginfo;
11019 11019
11020 11020 /*
11021 11021 * If buffer is not enabled, just leave.
11022 11022 */
11023 11023 *return_code = MPTSAS_FW_DIAG_ERROR_RELEASE_FAILED;
11024 11024 if (!pBuffer->enabled) {
11025 11025 mptsas_log(mpt, CE_NOTE, "This buffer type is not supported "
11026 11026 "by the IOC");
11027 11027 status = DDI_FAILURE;
11028 11028 goto out;
11029 11029 }
11030 11030
11031 11031 /*
11032 11032 * Clear some flags initially.
11033 11033 */
11034 11034 pBuffer->force_release = FALSE;
11035 11035 pBuffer->valid_data = FALSE;
11036 11036 pBuffer->owned_by_firmware = FALSE;
11037 11037
11038 11038 /*
11039 11039 * Get a cmd buffer from the cmd buffer pool
11040 11040 */
11041 11041 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11042 11042 status = DDI_FAILURE;
11043 11043 mptsas_log(mpt, CE_NOTE, "command pool is full: Release FW "
11044 11044 "Diag");
11045 11045 goto out;
11046 11046 }
11047 11047 rel_flags |= MPTSAS_REQUEST_POOL_CMD;
11048 11048
11049 11049 bzero((caddr_t)cmd, sizeof (*cmd));
11050 11050 bzero((caddr_t)pkt, scsi_pkt_size());
11051 11051
11052 11052 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
11053 11053
11054 11054 diag.pBuffer = pBuffer;
11055 11055 diag.function = MPI2_FUNCTION_DIAG_RELEASE;
11056 11056
11057 11057 /*
11058 11058 * Form a blank cmd/pkt to store the acknowledgement message
11059 11059 */
11060 11060 pkt->pkt_ha_private = (opaque_t)&diag;
11061 11061 pkt->pkt_flags = FLAG_HEAD;
11062 11062 pkt->pkt_time = 60;
11063 11063 cmd->cmd_pkt = pkt;
11064 11064 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
11065 11065
11066 11066 /*
11067 11067 * Save the command in a slot
11068 11068 */
11069 11069 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11070 11070 /*
11071 11071 * Once passthru command get slot, set cmd_flags
11072 11072 * CFLAG_PREPARED.
11073 11073 */
11074 11074 cmd->cmd_flags |= CFLAG_PREPARED;
11075 11075 mptsas_start_diag(mpt, cmd);
11076 11076 } else {
11077 11077 mptsas_waitq_add(mpt, cmd);
11078 11078 }
11079 11079
11080 11080 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11081 11081 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
11082 11082 }
11083 11083
11084 11084 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11085 11085 status = DDI_FAILURE;
11086 11086 mptsas_log(mpt, CE_WARN, "Release FW Diag command timeout");
11087 11087 goto out;
11088 11088 }
11089 11089
11090 11090 /*
11091 11091 * cmd_rfm points to the reply message if a reply was given. Check the
11092 11092 * IOCStatus to make sure everything went OK with the FW diag request
11093 11093 * and set buffer flags.
11094 11094 */
11095 11095 if (cmd->cmd_rfm) {
11096 11096 rel_flags |= MPTSAS_ADDRESS_REPLY;
11097 11097 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11098 11098 DDI_DMA_SYNC_FORCPU);
11099 11099 reply = (pMpi2DiagReleaseReply_t)(mpt->m_reply_frame +
11100 11100 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
11101 11101
11102 11102 /*
11103 11103 * Get the reply message data
11104 11104 */
11105 11105 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
11106 11106 &reply->IOCStatus);
11107 11107 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
11108 11108 &reply->IOCLogInfo);
11109 11109
11110 11110 /*
11111 11111 * If release failed quit.
11112 11112 */
11113 11113 if ((iocstatus != MPI2_IOCSTATUS_SUCCESS) ||
11114 11114 pBuffer->owned_by_firmware) {
11115 11115 status = DDI_FAILURE;
11116 11116 NDBG13(("release FW Diag Buffer failed: "
11117 11117 "IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus,
11118 11118 iocloginfo));
11119 11119 goto out;
11120 11120 }
11121 11121
11122 11122 /*
11123 11123 * Release was successful.
11124 11124 */
11125 11125 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11126 11126 status = DDI_SUCCESS;
11127 11127
11128 11128 /*
11129 11129 * If this was for an UNREGISTER diag type command, clear the
11130 11130 * unique ID.
11131 11131 */
11132 11132 if (diag_type == MPTSAS_FW_DIAG_TYPE_UNREGISTER) {
11133 11133 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
11134 11134 }
11135 11135 }
11136 11136
11137 11137 out:
11138 11138 /*
11139 11139 * Put the reply frame back on the free queue, increment the free
11140 11140 * index, and write the new index to the free index register. But only
11141 11141 * if this reply is an ADDRESS reply.
11142 11142 */
11143 11143 if (rel_flags & MPTSAS_ADDRESS_REPLY) {
11144 11144 ddi_put32(mpt->m_acc_free_queue_hdl,
11145 11145 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11146 11146 cmd->cmd_rfm);
11147 11147 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11148 11148 DDI_DMA_SYNC_FORDEV);
11149 11149 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11150 11150 mpt->m_free_index = 0;
11151 11151 }
11152 11152 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11153 11153 mpt->m_free_index);
11154 11154 }
11155 11155 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
11156 11156 mptsas_remove_cmd(mpt, cmd);
11157 11157 rel_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11158 11158 }
11159 11159 if (rel_flags & MPTSAS_REQUEST_POOL_CMD) {
11160 11160 mptsas_return_to_pool(mpt, cmd);
11161 11161 }
11162 11162
11163 11163 return (status);
11164 11164 }
11165 11165
11166 11166 static int
11167 11167 mptsas_diag_register(mptsas_t *mpt, mptsas_fw_diag_register_t *diag_register,
11168 11168 uint32_t *return_code)
11169 11169 {
11170 11170 mptsas_fw_diagnostic_buffer_t *pBuffer;
11171 11171 uint8_t extended_type, buffer_type, i;
11172 11172 uint32_t buffer_size;
11173 11173 uint32_t unique_id;
11174 11174 int status;
11175 11175
11176 11176 ASSERT(mutex_owned(&mpt->m_mutex));
11177 11177
11178 11178 extended_type = diag_register->ExtendedType;
11179 11179 buffer_type = diag_register->BufferType;
11180 11180 buffer_size = diag_register->RequestedBufferSize;
11181 11181 unique_id = diag_register->UniqueId;
11182 11182
11183 11183 /*
11184 11184 * Check for valid buffer type
11185 11185 */
11186 11186 if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
11187 11187 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11188 11188 return (DDI_FAILURE);
11189 11189 }
11190 11190
11191 11191 /*
11192 11192 * Get the current buffer and look up the unique ID. The unique ID
11193 11193 * should not be found. If it is, the ID is already in use.
11194 11194 */
11195 11195 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11196 11196 pBuffer = &mpt->m_fw_diag_buffer_list[buffer_type];
11197 11197 if (i != MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11198 11198 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11199 11199 return (DDI_FAILURE);
11200 11200 }
11201 11201
11202 11202 /*
11203 11203 * The buffer's unique ID should not be registered yet, and the given
11204 11204 * unique ID cannot be 0.
11205 11205 */
11206 11206 if ((pBuffer->unique_id != MPTSAS_FW_DIAG_INVALID_UID) ||
11207 11207 (unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
11208 11208 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11209 11209 return (DDI_FAILURE);
11210 11210 }
11211 11211
11212 11212 /*
11213 11213 * If this buffer is already posted as immediate, just change owner.
11214 11214 */
11215 11215 if (pBuffer->immediate && pBuffer->owned_by_firmware &&
11216 11216 (pBuffer->unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
11217 11217 pBuffer->immediate = FALSE;
11218 11218 pBuffer->unique_id = unique_id;
11219 11219 return (DDI_SUCCESS);
11220 11220 }
11221 11221
11222 11222 /*
11223 11223 * Post a new buffer after checking if it's enabled. The DMA buffer
11224 11224 * that is allocated will be contiguous (sgl_len = 1).
11225 11225 */
11226 11226 if (!pBuffer->enabled) {
11227 11227 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
11228 11228 return (DDI_FAILURE);
11229 11229 }
11230 11230 bzero(&pBuffer->buffer_data, sizeof (mptsas_dma_alloc_state_t));
11231 11231 pBuffer->buffer_data.size = buffer_size;
11232 11232 if (mptsas_dma_alloc(mpt, &pBuffer->buffer_data) != DDI_SUCCESS) {
11233 11233 mptsas_log(mpt, CE_WARN, "failed to alloc DMA resource for "
11234 11234 "diag buffer: size = %d bytes", buffer_size);
11235 11235 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
11236 11236 return (DDI_FAILURE);
11237 11237 }
11238 11238
11239 11239 /*
11240 11240 * Copy the given info to the diag buffer and post the buffer.
11241 11241 */
11242 11242 pBuffer->buffer_type = buffer_type;
11243 11243 pBuffer->immediate = FALSE;
11244 11244 if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
11245 11245 for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
11246 11246 i++) {
11247 11247 pBuffer->product_specific[i] =
11248 11248 diag_register->ProductSpecific[i];
11249 11249 }
11250 11250 }
11251 11251 pBuffer->extended_type = extended_type;
11252 11252 pBuffer->unique_id = unique_id;
11253 11253 status = mptsas_post_fw_diag_buffer(mpt, pBuffer, return_code);
11254 11254
11255 11255 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
11256 11256 DDI_SUCCESS) {
11257 11257 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed in "
11258 11258 "mptsas_diag_register.");
11259 11259 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11260 11260 status = DDI_FAILURE;
11261 11261 }
11262 11262
11263 11263 /*
11264 11264 * In case there was a failure, free the DMA buffer.
11265 11265 */
11266 11266 if (status == DDI_FAILURE) {
11267 11267 mptsas_dma_free(&pBuffer->buffer_data);
11268 11268 }
11269 11269
11270 11270 return (status);
11271 11271 }
11272 11272
11273 11273 static int
11274 11274 mptsas_diag_unregister(mptsas_t *mpt,
11275 11275 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
11276 11276 {
11277 11277 mptsas_fw_diagnostic_buffer_t *pBuffer;
11278 11278 uint8_t i;
11279 11279 uint32_t unique_id;
11280 11280 int status;
11281 11281
11282 11282 ASSERT(mutex_owned(&mpt->m_mutex));
11283 11283
11284 11284 unique_id = diag_unregister->UniqueId;
11285 11285
11286 11286 /*
11287 11287 * Get the current buffer and look up the unique ID. The unique ID
11288 11288 * should be there.
11289 11289 */
11290 11290 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11291 11291 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11292 11292 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11293 11293 return (DDI_FAILURE);
11294 11294 }
11295 11295
11296 11296 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11297 11297
11298 11298 /*
11299 11299 * Try to release the buffer from FW before freeing it. If release
11300 11300 * fails, don't free the DMA buffer in case FW tries to access it
11301 11301 * later. If buffer is not owned by firmware, can't release it.
11302 11302 */
11303 11303 if (!pBuffer->owned_by_firmware) {
11304 11304 status = DDI_SUCCESS;
11305 11305 } else {
11306 11306 status = mptsas_release_fw_diag_buffer(mpt, pBuffer,
11307 11307 return_code, MPTSAS_FW_DIAG_TYPE_UNREGISTER);
11308 11308 }
11309 11309
11310 11310 /*
11311 11311 * At this point, return the current status no matter what happens with
11312 11312 * the DMA buffer.
11313 11313 */
11314 11314 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
11315 11315 if (status == DDI_SUCCESS) {
11316 11316 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
11317 11317 DDI_SUCCESS) {
11318 11318 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed "
11319 11319 "in mptsas_diag_unregister.");
11320 11320 ddi_fm_service_impact(mpt->m_dip,
11321 11321 DDI_SERVICE_UNAFFECTED);
11322 11322 }
11323 11323 mptsas_dma_free(&pBuffer->buffer_data);
11324 11324 }
11325 11325
11326 11326 return (status);
11327 11327 }
11328 11328
11329 11329 static int
11330 11330 mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
11331 11331 uint32_t *return_code)
11332 11332 {
11333 11333 mptsas_fw_diagnostic_buffer_t *pBuffer;
11334 11334 uint8_t i;
11335 11335 uint32_t unique_id;
11336 11336
11337 11337 ASSERT(mutex_owned(&mpt->m_mutex));
11338 11338
11339 11339 unique_id = diag_query->UniqueId;
11340 11340
11341 11341 /*
11342 11342 * If ID is valid, query on ID.
11343 11343 * If ID is invalid, query on buffer type.
11344 11344 */
11345 11345 if (unique_id == MPTSAS_FW_DIAG_INVALID_UID) {
11346 11346 i = diag_query->BufferType;
11347 11347 if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
11348 11348 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11349 11349 return (DDI_FAILURE);
11350 11350 }
11351 11351 } else {
11352 11352 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11353 11353 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11354 11354 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11355 11355 return (DDI_FAILURE);
11356 11356 }
11357 11357 }
11358 11358
11359 11359 /*
11360 11360 * Fill query structure with the diag buffer info.
11361 11361 */
11362 11362 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11363 11363 diag_query->BufferType = pBuffer->buffer_type;
11364 11364 diag_query->ExtendedType = pBuffer->extended_type;
11365 11365 if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
11366 11366 for (i = 0; i < (sizeof (diag_query->ProductSpecific) / 4);
11367 11367 i++) {
11368 11368 diag_query->ProductSpecific[i] =
11369 11369 pBuffer->product_specific[i];
11370 11370 }
11371 11371 }
11372 11372 diag_query->TotalBufferSize = pBuffer->buffer_data.size;
11373 11373 diag_query->DriverAddedBufferSize = 0;
11374 11374 diag_query->UniqueId = pBuffer->unique_id;
11375 11375 diag_query->ApplicationFlags = 0;
11376 11376 diag_query->DiagnosticFlags = 0;
11377 11377
11378 11378 /*
11379 11379 * Set/Clear application flags
11380 11380 */
11381 11381 if (pBuffer->immediate) {
11382 11382 diag_query->ApplicationFlags &= ~MPTSAS_FW_DIAG_FLAG_APP_OWNED;
11383 11383 } else {
11384 11384 diag_query->ApplicationFlags |= MPTSAS_FW_DIAG_FLAG_APP_OWNED;
11385 11385 }
11386 11386 if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
11387 11387 diag_query->ApplicationFlags |=
11388 11388 MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
11389 11389 } else {
11390 11390 diag_query->ApplicationFlags &=
11391 11391 ~MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
11392 11392 }
11393 11393 if (pBuffer->owned_by_firmware) {
11394 11394 diag_query->ApplicationFlags |=
11395 11395 MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
11396 11396 } else {
11397 11397 diag_query->ApplicationFlags &=
11398 11398 ~MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
11399 11399 }
11400 11400
11401 11401 return (DDI_SUCCESS);
11402 11402 }
11403 11403
11404 11404 static int
11405 11405 mptsas_diag_read_buffer(mptsas_t *mpt,
11406 11406 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
11407 11407 uint32_t *return_code, int ioctl_mode)
11408 11408 {
11409 11409 mptsas_fw_diagnostic_buffer_t *pBuffer;
11410 11410 uint8_t i, *pData;
11411 11411 uint32_t unique_id, byte;
11412 11412 int status;
11413 11413
11414 11414 ASSERT(mutex_owned(&mpt->m_mutex));
11415 11415
11416 11416 unique_id = diag_read_buffer->UniqueId;
11417 11417
11418 11418 /*
11419 11419 * Get the current buffer and look up the unique ID. The unique ID
11420 11420 * should be there.
11421 11421 */
11422 11422 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11423 11423 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11424 11424 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11425 11425 return (DDI_FAILURE);
11426 11426 }
11427 11427
11428 11428 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11429 11429
11430 11430 /*
11431 11431 * Make sure requested read is within limits
11432 11432 */
11433 11433 if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
11434 11434 pBuffer->buffer_data.size) {
11435 11435 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11436 11436 return (DDI_FAILURE);
11437 11437 }
11438 11438
11439 11439 /*
11440 11440 * Copy the requested data from DMA to the diag_read_buffer. The DMA
11441 11441 * buffer that was allocated is one contiguous buffer.
11442 11442 */
11443 11443 pData = (uint8_t *)(pBuffer->buffer_data.memp +
11444 11444 diag_read_buffer->StartingOffset);
11445 11445 (void) ddi_dma_sync(pBuffer->buffer_data.handle, 0, 0,
11446 11446 DDI_DMA_SYNC_FORCPU);
11447 11447 for (byte = 0; byte < diag_read_buffer->BytesToRead; byte++) {
11448 11448 if (ddi_copyout(pData + byte, ioctl_buf + byte, 1, ioctl_mode)
11449 11449 != 0) {
11450 11450 return (DDI_FAILURE);
11451 11451 }
11452 11452 }
11453 11453 diag_read_buffer->Status = 0;
11454 11454
11455 11455 /*
11456 11456 * Set or clear the Force Release flag.
11457 11457 */
11458 11458 if (pBuffer->force_release) {
11459 11459 diag_read_buffer->Flags |= MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
11460 11460 } else {
11461 11461 diag_read_buffer->Flags &= ~MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
11462 11462 }
11463 11463
11464 11464 /*
11465 11465 * If buffer is to be reregistered, make sure it's not already owned by
11466 11466 * firmware first.
11467 11467 */
11468 11468 status = DDI_SUCCESS;
11469 11469 if (!pBuffer->owned_by_firmware) {
11470 11470 if (diag_read_buffer->Flags & MPTSAS_FW_DIAG_FLAG_REREGISTER) {
11471 11471 status = mptsas_post_fw_diag_buffer(mpt, pBuffer,
11472 11472 return_code);
11473 11473 }
11474 11474 }
11475 11475
11476 11476 return (status);
11477 11477 }
11478 11478
11479 11479 static int
11480 11480 mptsas_diag_release(mptsas_t *mpt, mptsas_fw_diag_release_t *diag_release,
11481 11481 uint32_t *return_code)
11482 11482 {
11483 11483 mptsas_fw_diagnostic_buffer_t *pBuffer;
11484 11484 uint8_t i;
11485 11485 uint32_t unique_id;
11486 11486 int status;
11487 11487
11488 11488 ASSERT(mutex_owned(&mpt->m_mutex));
11489 11489
11490 11490 unique_id = diag_release->UniqueId;
11491 11491
11492 11492 /*
11493 11493 * Get the current buffer and look up the unique ID. The unique ID
11494 11494 * should be there.
11495 11495 */
11496 11496 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11497 11497 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11498 11498 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11499 11499 return (DDI_FAILURE);
11500 11500 }
11501 11501
11502 11502 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11503 11503
11504 11504 /*
11505 11505 * If buffer is not owned by firmware, it's already been released.
11506 11506 */
11507 11507 if (!pBuffer->owned_by_firmware) {
11508 11508 *return_code = MPTSAS_FW_DIAG_ERROR_ALREADY_RELEASED;
11509 11509 return (DDI_FAILURE);
11510 11510 }
11511 11511
11512 11512 /*
11513 11513 * Release the buffer.
11514 11514 */
11515 11515 status = mptsas_release_fw_diag_buffer(mpt, pBuffer, return_code,
11516 11516 MPTSAS_FW_DIAG_TYPE_RELEASE);
11517 11517 return (status);
11518 11518 }
11519 11519
11520 11520 static int
11521 11521 mptsas_do_diag_action(mptsas_t *mpt, uint32_t action, uint8_t *diag_action,
11522 11522 uint32_t length, uint32_t *return_code, int ioctl_mode)
11523 11523 {
11524 11524 mptsas_fw_diag_register_t diag_register;
11525 11525 mptsas_fw_diag_unregister_t diag_unregister;
11526 11526 mptsas_fw_diag_query_t diag_query;
11527 11527 mptsas_diag_read_buffer_t diag_read_buffer;
11528 11528 mptsas_fw_diag_release_t diag_release;
11529 11529 int status = DDI_SUCCESS;
11530 11530 uint32_t original_return_code, read_buf_len;
11531 11531
11532 11532 ASSERT(mutex_owned(&mpt->m_mutex));
11533 11533
11534 11534 original_return_code = *return_code;
11535 11535 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11536 11536
11537 11537 switch (action) {
11538 11538 case MPTSAS_FW_DIAG_TYPE_REGISTER:
11539 11539 if (!length) {
11540 11540 *return_code =
11541 11541 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11542 11542 status = DDI_FAILURE;
11543 11543 break;
11544 11544 }
11545 11545 if (ddi_copyin(diag_action, &diag_register,
11546 11546 sizeof (diag_register), ioctl_mode) != 0) {
11547 11547 return (DDI_FAILURE);
11548 11548 }
11549 11549 status = mptsas_diag_register(mpt, &diag_register,
11550 11550 return_code);
11551 11551 break;
11552 11552
11553 11553 case MPTSAS_FW_DIAG_TYPE_UNREGISTER:
11554 11554 if (length < sizeof (diag_unregister)) {
11555 11555 *return_code =
11556 11556 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11557 11557 status = DDI_FAILURE;
11558 11558 break;
11559 11559 }
11560 11560 if (ddi_copyin(diag_action, &diag_unregister,
11561 11561 sizeof (diag_unregister), ioctl_mode) != 0) {
11562 11562 return (DDI_FAILURE);
11563 11563 }
11564 11564 status = mptsas_diag_unregister(mpt, &diag_unregister,
11565 11565 return_code);
11566 11566 break;
11567 11567
11568 11568 case MPTSAS_FW_DIAG_TYPE_QUERY:
11569 11569 if (length < sizeof (diag_query)) {
11570 11570 *return_code =
11571 11571 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11572 11572 status = DDI_FAILURE;
11573 11573 break;
11574 11574 }
11575 11575 if (ddi_copyin(diag_action, &diag_query,
11576 11576 sizeof (diag_query), ioctl_mode) != 0) {
11577 11577 return (DDI_FAILURE);
11578 11578 }
11579 11579 status = mptsas_diag_query(mpt, &diag_query,
11580 11580 return_code);
11581 11581 if (status == DDI_SUCCESS) {
11582 11582 if (ddi_copyout(&diag_query, diag_action,
11583 11583 sizeof (diag_query), ioctl_mode) != 0) {
11584 11584 return (DDI_FAILURE);
11585 11585 }
11586 11586 }
11587 11587 break;
11588 11588
11589 11589 case MPTSAS_FW_DIAG_TYPE_READ_BUFFER:
11590 11590 if (ddi_copyin(diag_action, &diag_read_buffer,
11591 11591 sizeof (diag_read_buffer) - 4, ioctl_mode) != 0) {
11592 11592 return (DDI_FAILURE);
11593 11593 }
11594 11594 read_buf_len = sizeof (diag_read_buffer) -
11595 11595 sizeof (diag_read_buffer.DataBuffer) +
11596 11596 diag_read_buffer.BytesToRead;
11597 11597 if (length < read_buf_len) {
11598 11598 *return_code =
11599 11599 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11600 11600 status = DDI_FAILURE;
11601 11601 break;
11602 11602 }
11603 11603 status = mptsas_diag_read_buffer(mpt,
11604 11604 &diag_read_buffer, diag_action +
11605 11605 sizeof (diag_read_buffer) - 4, return_code,
11606 11606 ioctl_mode);
11607 11607 if (status == DDI_SUCCESS) {
11608 11608 if (ddi_copyout(&diag_read_buffer, diag_action,
11609 11609 sizeof (diag_read_buffer) - 4, ioctl_mode)
11610 11610 != 0) {
11611 11611 return (DDI_FAILURE);
11612 11612 }
11613 11613 }
11614 11614 break;
11615 11615
11616 11616 case MPTSAS_FW_DIAG_TYPE_RELEASE:
11617 11617 if (length < sizeof (diag_release)) {
11618 11618 *return_code =
11619 11619 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11620 11620 status = DDI_FAILURE;
11621 11621 break;
11622 11622 }
11623 11623 if (ddi_copyin(diag_action, &diag_release,
11624 11624 sizeof (diag_release), ioctl_mode) != 0) {
11625 11625 return (DDI_FAILURE);
11626 11626 }
11627 11627 status = mptsas_diag_release(mpt, &diag_release,
11628 11628 return_code);
11629 11629 break;
11630 11630
11631 11631 default:
11632 11632 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11633 11633 status = DDI_FAILURE;
11634 11634 break;
11635 11635 }
11636 11636
11637 11637 if ((status == DDI_FAILURE) &&
11638 11638 (original_return_code == MPTSAS_FW_DIAG_NEW) &&
11639 11639 (*return_code != MPTSAS_FW_DIAG_ERROR_SUCCESS)) {
11640 11640 status = DDI_SUCCESS;
11641 11641 }
11642 11642
11643 11643 return (status);
11644 11644 }
11645 11645
11646 11646 static int
11647 11647 mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *user_data, int mode)
11648 11648 {
11649 11649 int status;
11650 11650 mptsas_diag_action_t driver_data;
11651 11651
11652 11652 ASSERT(mutex_owned(&mpt->m_mutex));
11653 11653
11654 11654 /*
11655 11655 * Copy the user data to a driver data buffer.
11656 11656 */
11657 11657 if (ddi_copyin(user_data, &driver_data, sizeof (mptsas_diag_action_t),
11658 11658 mode) == 0) {
11659 11659 /*
11660 11660 * Send diag action request if Action is valid
11661 11661 */
11662 11662 if (driver_data.Action == MPTSAS_FW_DIAG_TYPE_REGISTER ||
11663 11663 driver_data.Action == MPTSAS_FW_DIAG_TYPE_UNREGISTER ||
11664 11664 driver_data.Action == MPTSAS_FW_DIAG_TYPE_QUERY ||
11665 11665 driver_data.Action == MPTSAS_FW_DIAG_TYPE_READ_BUFFER ||
11666 11666 driver_data.Action == MPTSAS_FW_DIAG_TYPE_RELEASE) {
11667 11667 status = mptsas_do_diag_action(mpt, driver_data.Action,
11668 11668 (void *)(uintptr_t)driver_data.PtrDiagAction,
11669 11669 driver_data.Length, &driver_data.ReturnCode,
11670 11670 mode);
11671 11671 if (status == DDI_SUCCESS) {
11672 11672 if (ddi_copyout(&driver_data.ReturnCode,
11673 11673 &user_data->ReturnCode,
11674 11674 sizeof (user_data->ReturnCode), mode)
11675 11675 != 0) {
11676 11676 status = EFAULT;
11677 11677 } else {
11678 11678 status = 0;
11679 11679 }
11680 11680 } else {
11681 11681 status = EIO;
11682 11682 }
11683 11683 } else {
11684 11684 status = EINVAL;
11685 11685 }
11686 11686 } else {
11687 11687 status = EFAULT;
11688 11688 }
11689 11689
11690 11690 return (status);
11691 11691 }
11692 11692
11693 11693 /*
11694 11694 * This routine handles the "event query" ioctl.
11695 11695 */
11696 11696 static int
11697 11697 mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, int mode,
11698 11698 int *rval)
11699 11699 {
11700 11700 int status;
11701 11701 mptsas_event_query_t driverdata;
11702 11702 uint8_t i;
11703 11703
11704 11704 driverdata.Entries = MPTSAS_EVENT_QUEUE_SIZE;
11705 11705
11706 11706 mutex_enter(&mpt->m_mutex);
11707 11707 for (i = 0; i < 4; i++) {
11708 11708 driverdata.Types[i] = mpt->m_event_mask[i];
11709 11709 }
11710 11710 mutex_exit(&mpt->m_mutex);
11711 11711
11712 11712 if (ddi_copyout(&driverdata, data, sizeof (driverdata), mode) != 0) {
11713 11713 status = EFAULT;
11714 11714 } else {
11715 11715 *rval = MPTIOCTL_STATUS_GOOD;
11716 11716 status = 0;
11717 11717 }
11718 11718
11719 11719 return (status);
11720 11720 }
11721 11721
11722 11722 /*
11723 11723 * This routine handles the "event enable" ioctl.
11724 11724 */
11725 11725 static int
11726 11726 mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, int mode,
11727 11727 int *rval)
11728 11728 {
11729 11729 int status;
11730 11730 mptsas_event_enable_t driverdata;
11731 11731 uint8_t i;
11732 11732
11733 11733 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11734 11734 mutex_enter(&mpt->m_mutex);
11735 11735 for (i = 0; i < 4; i++) {
11736 11736 mpt->m_event_mask[i] = driverdata.Types[i];
11737 11737 }
11738 11738 mutex_exit(&mpt->m_mutex);
11739 11739
11740 11740 *rval = MPTIOCTL_STATUS_GOOD;
11741 11741 status = 0;
11742 11742 } else {
11743 11743 status = EFAULT;
11744 11744 }
11745 11745 return (status);
11746 11746 }
11747 11747
11748 11748 /*
11749 11749 * This routine handles the "event report" ioctl.
11750 11750 */
11751 11751 static int
11752 11752 mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, int mode,
11753 11753 int *rval)
11754 11754 {
11755 11755 int status;
11756 11756 mptsas_event_report_t driverdata;
11757 11757
11758 11758 mutex_enter(&mpt->m_mutex);
11759 11759
11760 11760 if (ddi_copyin(&data->Size, &driverdata.Size, sizeof (driverdata.Size),
11761 11761 mode) == 0) {
11762 11762 if (driverdata.Size >= sizeof (mpt->m_events)) {
11763 11763 if (ddi_copyout(mpt->m_events, data->Events,
11764 11764 sizeof (mpt->m_events), mode) != 0) {
11765 11765 status = EFAULT;
11766 11766 } else {
11767 11767 if (driverdata.Size > sizeof (mpt->m_events)) {
11768 11768 driverdata.Size =
11769 11769 sizeof (mpt->m_events);
11770 11770 if (ddi_copyout(&driverdata.Size,
11771 11771 &data->Size,
11772 11772 sizeof (driverdata.Size),
11773 11773 mode) != 0) {
11774 11774 status = EFAULT;
11775 11775 } else {
11776 11776 *rval = MPTIOCTL_STATUS_GOOD;
11777 11777 status = 0;
11778 11778 }
11779 11779 } else {
11780 11780 *rval = MPTIOCTL_STATUS_GOOD;
11781 11781 status = 0;
11782 11782 }
11783 11783 }
11784 11784 } else {
11785 11785 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
11786 11786 status = 0;
11787 11787 }
11788 11788 } else {
11789 11789 status = EFAULT;
11790 11790 }
11791 11791
11792 11792 mutex_exit(&mpt->m_mutex);
11793 11793 return (status);
11794 11794 }
11795 11795
11796 11796 static void
11797 11797 mptsas_lookup_pci_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
11798 11798 {
11799 11799 int *reg_data;
11800 11800 uint_t reglen;
11801 11801
11802 11802 /*
11803 11803 * Lookup the 'reg' property and extract the other data
11804 11804 */
11805 11805 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
11806 11806 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
11807 11807 DDI_PROP_SUCCESS) {
11808 11808 /*
11809 11809 * Extract the PCI data from the 'reg' property first DWORD.
11810 11810 * The entry looks like the following:
11811 11811 * First DWORD:
11812 11812 * Bits 0 - 7 8-bit Register number
11813 11813 * Bits 8 - 10 3-bit Function number
11814 11814 * Bits 11 - 15 5-bit Device number
11815 11815 * Bits 16 - 23 8-bit Bus number
11816 11816 * Bits 24 - 25 2-bit Address Space type identifier
11817 11817 *
11818 11818 */
11819 11819 adapter_data->PciInformation.u.bits.BusNumber =
11820 11820 (reg_data[0] & 0x00FF0000) >> 16;
11821 11821 adapter_data->PciInformation.u.bits.DeviceNumber =
11822 11822 (reg_data[0] & 0x0000F800) >> 11;
11823 11823 adapter_data->PciInformation.u.bits.FunctionNumber =
11824 11824 (reg_data[0] & 0x00000700) >> 8;
11825 11825 ddi_prop_free((void *)reg_data);
11826 11826 } else {
11827 11827 /*
11828 11828 * If we can't determine the PCI data then we fill in FF's for
11829 11829 * the data to indicate this.
11830 11830 */
11831 11831 adapter_data->PCIDeviceHwId = 0xFFFFFFFF;
11832 11832 adapter_data->MpiPortNumber = 0xFFFFFFFF;
11833 11833 adapter_data->PciInformation.u.AsDWORD = 0xFFFFFFFF;
11834 11834 }
11835 11835
11836 11836 /*
11837 11837 * Saved in the mpt->m_fwversion
11838 11838 */
11839 11839 adapter_data->MpiFirmwareVersion = mpt->m_fwversion;
11840 11840 }
11841 11841
11842 11842 static void
11843 11843 mptsas_read_adapter_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
11844 11844 {
11845 11845 char *driver_verstr = MPTSAS_MOD_STRING;
11846 11846
11847 11847 mptsas_lookup_pci_data(mpt, adapter_data);
11848 11848 adapter_data->AdapterType = MPTIOCTL_ADAPTER_TYPE_SAS2;
11849 11849 adapter_data->PCIDeviceHwId = (uint32_t)mpt->m_devid;
11850 11850 adapter_data->PCIDeviceHwRev = (uint32_t)mpt->m_revid;
11851 11851 adapter_data->SubSystemId = (uint32_t)mpt->m_ssid;
11852 11852 adapter_data->SubsystemVendorId = (uint32_t)mpt->m_svid;
11853 11853 (void) strcpy((char *)&adapter_data->DriverVersion[0], driver_verstr);
11854 11854 adapter_data->BiosVersion = 0;
11855 11855 (void) mptsas_get_bios_page3(mpt, &adapter_data->BiosVersion);
11856 11856 }
11857 11857
11858 11858 static void
11859 11859 mptsas_read_pci_info(mptsas_t *mpt, mptsas_pci_info_t *pci_info)
11860 11860 {
11861 11861 int *reg_data, i;
11862 11862 uint_t reglen;
11863 11863
11864 11864 /*
11865 11865 * Lookup the 'reg' property and extract the other data
11866 11866 */
11867 11867 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
11868 11868 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
11869 11869 DDI_PROP_SUCCESS) {
11870 11870 /*
11871 11871 * Extract the PCI data from the 'reg' property first DWORD.
11872 11872 * The entry looks like the following:
11873 11873 * First DWORD:
11874 11874 * Bits 8 - 10 3-bit Function number
11875 11875 * Bits 11 - 15 5-bit Device number
11876 11876 * Bits 16 - 23 8-bit Bus number
11877 11877 */
11878 11878 pci_info->BusNumber = (reg_data[0] & 0x00FF0000) >> 16;
11879 11879 pci_info->DeviceNumber = (reg_data[0] & 0x0000F800) >> 11;
11880 11880 pci_info->FunctionNumber = (reg_data[0] & 0x00000700) >> 8;
11881 11881 ddi_prop_free((void *)reg_data);
11882 11882 } else {
11883 11883 /*
11884 11884 * If we can't determine the PCI info then we fill in FF's for
11885 11885 * the data to indicate this.
11886 11886 */
11887 11887 pci_info->BusNumber = 0xFFFFFFFF;
11888 11888 pci_info->DeviceNumber = 0xFF;
11889 11889 pci_info->FunctionNumber = 0xFF;
11890 11890 }
11891 11891
11892 11892 /*
11893 11893 * Now get the interrupt vector and the pci header. The vector can
11894 11894 * only be 0 right now. The header is the first 256 bytes of config
11895 11895 * space.
11896 11896 */
11897 11897 pci_info->InterruptVector = 0;
11898 11898 for (i = 0; i < sizeof (pci_info->PciHeader); i++) {
11899 11899 pci_info->PciHeader[i] = pci_config_get8(mpt->m_config_handle,
11900 11900 i);
11901 11901 }
11902 11902 }
11903 11903
11904 11904 static int
11905 11905 mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data, int mode)
11906 11906 {
11907 11907 int status = 0;
11908 11908 mptsas_reg_access_t driverdata;
11909 11909
11910 11910 mutex_enter(&mpt->m_mutex);
11911 11911 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11912 11912 switch (driverdata.Command) {
11913 11913 /*
11914 11914 * IO access is not supported.
11915 11915 */
11916 11916 case REG_IO_READ:
11917 11917 case REG_IO_WRITE:
11918 11918 mptsas_log(mpt, CE_WARN, "IO access is not "
11919 11919 "supported. Use memory access.");
11920 11920 status = EINVAL;
11921 11921 break;
11922 11922
11923 11923 case REG_MEM_READ:
11924 11924 driverdata.RegData = ddi_get32(mpt->m_datap,
11925 11925 (uint32_t *)(void *)mpt->m_reg +
11926 11926 driverdata.RegOffset);
11927 11927 if (ddi_copyout(&driverdata.RegData,
11928 11928 &data->RegData,
11929 11929 sizeof (driverdata.RegData), mode) != 0) {
11930 11930 mptsas_log(mpt, CE_WARN, "Register "
11931 11931 "Read Failed");
11932 11932 status = EFAULT;
11933 11933 }
11934 11934 break;
11935 11935
11936 11936 case REG_MEM_WRITE:
11937 11937 ddi_put32(mpt->m_datap,
11938 11938 (uint32_t *)(void *)mpt->m_reg +
11939 11939 driverdata.RegOffset,
11940 11940 driverdata.RegData);
11941 11941 break;
11942 11942
11943 11943 default:
11944 11944 status = EINVAL;
11945 11945 break;
↓ open down ↓ |
5479 lines elided |
↑ open up ↑ |
11946 11946 }
11947 11947 } else {
11948 11948 status = EFAULT;
11949 11949 }
11950 11950
11951 11951 mutex_exit(&mpt->m_mutex);
11952 11952 return (status);
11953 11953 }
11954 11954
11955 11955 static int
11956 +led_control(mptsas_t *mpt, intptr_t data, int mode)
11957 +{
11958 + int ret = 0;
11959 + mptsas_led_control_t lc;
11960 + mptsas_target_t *ptgt;
11961 +
11962 + if (ddi_copyin((void *)data, &lc, sizeof (lc), mode) != 0) {
11963 + return (EFAULT);
11964 + }
11965 +
11966 + if ((lc.Command != MPTSAS_LEDCTL_FLAG_SET &&
11967 + lc.Command != MPTSAS_LEDCTL_FLAG_GET) ||
11968 + lc.Led < MPTSAS_LEDCTL_LED_IDENT ||
11969 + lc.Led > MPTSAS_LEDCTL_LED_OK2RM ||
11970 + (lc.Command == MPTSAS_LEDCTL_FLAG_SET && lc.LedStatus != 0 &&
11971 + lc.LedStatus != 1)) {
11972 + return (EINVAL);
11973 + }
11974 +
11975 + /* Locate the target we're interrogating... */
11976 + mutex_enter(&mpt->m_mutex);
11977 + ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
11978 + MPTSAS_HASH_FIRST);
11979 + while (ptgt != NULL) {
11980 + if (ptgt->m_enclosure == lc.Enclosure &&
11981 + ptgt->m_slot_num == lc.Slot) {
11982 + break;
11983 + }
11984 + ptgt = (mptsas_target_t *)mptsas_hash_traverse(
11985 + &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
11986 + }
11987 + if (ptgt == NULL) {
11988 + /* We could not find a target for that enclosure/slot. */
11989 + mutex_exit(&mpt->m_mutex);
11990 + return (ENOENT);
11991 + }
11992 +
11993 + if (lc.Command == MPTSAS_LEDCTL_FLAG_SET) {
11994 + /* Update our internal LED state. */
11995 + ptgt->m_led_status &= ~(1 << (lc.Led - 1));
11996 + ptgt->m_led_status |= lc.LedStatus << (lc.Led - 1);
11997 +
11998 + /* Flush it to the controller. */
11999 + ret = mptsas_flush_led_status(mpt, ptgt);
12000 + mutex_exit(&mpt->m_mutex);
12001 + return (ret);
12002 + }
12003 +
12004 + /* Return our internal LED state. */
12005 + lc.LedStatus = (ptgt->m_led_status >> (lc.Led - 1)) & 1;
12006 + mutex_exit(&mpt->m_mutex);
12007 +
12008 + if (ddi_copyout(&lc, (void *)data, sizeof (lc), mode) != 0) {
12009 + return (EFAULT);
12010 + }
12011 +
12012 + return (0);
12013 +}
12014 +
12015 +static int
11956 12016 get_disk_info(mptsas_t *mpt, intptr_t data, int mode)
11957 12017 {
11958 12018 int i = 0;
11959 12019 int count = 0;
11960 12020 int ret = 0;
11961 12021 mptsas_target_t *ptgt;
11962 12022 mptsas_disk_info_t *di;
11963 12023 STRUCT_DECL(mptsas_get_disk_info, gdi);
11964 12024
11965 12025 STRUCT_INIT(gdi, get_udatamodel());
11966 12026
11967 12027 if (ddi_copyin((void *)data, STRUCT_BUF(gdi), STRUCT_SIZE(gdi),
11968 12028 mode) != 0) {
11969 12029 return (EFAULT);
11970 12030 }
11971 12031
11972 12032 /* Find out how many targets there are. */
11973 12033 mutex_enter(&mpt->m_mutex);
11974 12034 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
11975 12035 MPTSAS_HASH_FIRST);
11976 12036 while (ptgt != NULL) {
11977 12037 count++;
11978 12038 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
11979 12039 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
11980 12040 }
11981 12041 mutex_exit(&mpt->m_mutex);
11982 12042
11983 12043 /*
11984 12044 * If we haven't been asked to copy out information on each target,
11985 12045 * then just return the count.
11986 12046 */
11987 12047 STRUCT_FSET(gdi, DiskCount, count);
11988 12048 if (STRUCT_FGETP(gdi, PtrDiskInfoArray) == NULL)
11989 12049 goto copy_out;
11990 12050
11991 12051 /*
11992 12052 * If we haven't been given a large enough buffer to copy out into,
11993 12053 * let the caller know.
11994 12054 */
11995 12055 if (STRUCT_FGET(gdi, DiskInfoArraySize) <
11996 12056 count * sizeof (mptsas_disk_info_t)) {
11997 12057 ret = ENOSPC;
11998 12058 goto copy_out;
11999 12059 }
12000 12060
12001 12061 di = kmem_zalloc(count * sizeof (mptsas_disk_info_t), KM_SLEEP);
12002 12062
12003 12063 mutex_enter(&mpt->m_mutex);
12004 12064 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
12005 12065 MPTSAS_HASH_FIRST);
12006 12066 while (ptgt != NULL) {
12007 12067 if (i >= count) {
12008 12068 /*
12009 12069 * The number of targets changed while we weren't
12010 12070 * looking, so give up.
12011 12071 */
12012 12072 mutex_exit(&mpt->m_mutex);
12013 12073 kmem_free(di, count * sizeof (mptsas_disk_info_t));
12014 12074 return (EAGAIN);
12015 12075 }
12016 12076 di[i].Instance = mpt->m_instance;
12017 12077 di[i].Enclosure = ptgt->m_enclosure;
12018 12078 di[i].Slot = ptgt->m_slot_num;
12019 12079 di[i].SasAddress = ptgt->m_sas_wwn;
12020 12080
12021 12081 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
12022 12082 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
12023 12083 i++;
12024 12084 }
12025 12085 mutex_exit(&mpt->m_mutex);
12026 12086 STRUCT_FSET(gdi, DiskCount, i);
12027 12087
12028 12088 /* Copy out the disk information to the caller. */
12029 12089 if (ddi_copyout((void *)di, STRUCT_FGETP(gdi, PtrDiskInfoArray),
12030 12090 i * sizeof (mptsas_disk_info_t), mode) != 0) {
12031 12091 ret = EFAULT;
12032 12092 }
12033 12093
12034 12094 kmem_free(di, count * sizeof (mptsas_disk_info_t));
12035 12095
12036 12096 copy_out:
12037 12097 if (ddi_copyout(STRUCT_BUF(gdi), (void *)data, STRUCT_SIZE(gdi),
12038 12098 mode) != 0) {
12039 12099 ret = EFAULT;
12040 12100 }
12041 12101
12042 12102 return (ret);
12043 12103 }
12044 12104
12045 12105 static int
12046 12106 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
12047 12107 int *rval)
12048 12108 {
12049 12109 int status = 0;
12050 12110 mptsas_t *mpt;
12051 12111 mptsas_update_flash_t flashdata;
12052 12112 mptsas_pass_thru_t passthru_data;
12053 12113 mptsas_adapter_data_t adapter_data;
12054 12114 mptsas_pci_info_t pci_info;
12055 12115 int copylen;
12056 12116
12057 12117 int iport_flag = 0;
12058 12118 dev_info_t *dip = NULL;
12059 12119 mptsas_phymask_t phymask = 0;
12060 12120 struct devctl_iocdata *dcp = NULL;
12061 12121 uint32_t slotstatus = 0;
12062 12122 char *addr = NULL;
12063 12123 mptsas_target_t *ptgt = NULL;
12064 12124
12065 12125 *rval = MPTIOCTL_STATUS_GOOD;
12066 12126 if (secpolicy_sys_config(credp, B_FALSE) != 0) {
12067 12127 return (EPERM);
12068 12128 }
12069 12129
12070 12130 mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
12071 12131 if (mpt == NULL) {
12072 12132 /*
12073 12133 * Called from iport node, get the states
12074 12134 */
12075 12135 iport_flag = 1;
12076 12136 dip = mptsas_get_dip_from_dev(dev, &phymask);
12077 12137 if (dip == NULL) {
12078 12138 return (ENXIO);
12079 12139 }
12080 12140 mpt = DIP2MPT(dip);
12081 12141 }
12082 12142 /* Make sure power level is D0 before accessing registers */
12083 12143 mutex_enter(&mpt->m_mutex);
12084 12144 if (mpt->m_options & MPTSAS_OPT_PM) {
12085 12145 (void) pm_busy_component(mpt->m_dip, 0);
12086 12146 if (mpt->m_power_level != PM_LEVEL_D0) {
12087 12147 mutex_exit(&mpt->m_mutex);
12088 12148 if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
12089 12149 DDI_SUCCESS) {
12090 12150 mptsas_log(mpt, CE_WARN,
12091 12151 "mptsas%d: mptsas_ioctl: Raise power "
12092 12152 "request failed.", mpt->m_instance);
12093 12153 (void) pm_idle_component(mpt->m_dip, 0);
12094 12154 return (ENXIO);
12095 12155 }
12096 12156 } else {
12097 12157 mutex_exit(&mpt->m_mutex);
12098 12158 }
12099 12159 } else {
12100 12160 mutex_exit(&mpt->m_mutex);
12101 12161 }
12102 12162
12103 12163 if (iport_flag) {
12104 12164 status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
12105 12165 if (status != 0) {
12106 12166 goto out;
12107 12167 }
12108 12168 /*
12109 12169 * The following code control the OK2RM LED, it doesn't affect
12110 12170 * the ioctl return status.
12111 12171 */
12112 12172 if ((cmd == DEVCTL_DEVICE_ONLINE) ||
12113 12173 (cmd == DEVCTL_DEVICE_OFFLINE)) {
12114 12174 if (ndi_dc_allochdl((void *)data, &dcp) !=
12115 12175 NDI_SUCCESS) {
12116 12176 goto out;
12117 12177 }
12118 12178 addr = ndi_dc_getaddr(dcp);
12119 12179 ptgt = mptsas_addr_to_ptgt(mpt, addr, phymask);
12120 12180 if (ptgt == NULL) {
12121 12181 NDBG14(("mptsas_ioctl led control: tgt %s not "
↓ open down ↓ |
156 lines elided |
↑ open up ↑ |
12122 12182 "found", addr));
12123 12183 ndi_dc_freehdl(dcp);
12124 12184 goto out;
12125 12185 }
12126 12186 mutex_enter(&mpt->m_mutex);
12127 12187 if (cmd == DEVCTL_DEVICE_ONLINE) {
12128 12188 ptgt->m_tgt_unconfigured = 0;
12129 12189 } else if (cmd == DEVCTL_DEVICE_OFFLINE) {
12130 12190 ptgt->m_tgt_unconfigured = 1;
12131 12191 }
12132 - slotstatus = 0;
12133 -#ifdef MPTSAS_GET_LED
12134 - /*
12135 - * The get led status can't get a valid/reasonable
12136 - * state, so ignore the get led status, and write the
12137 - * required value directly
12138 - */
12139 - if (mptsas_get_led_status(mpt, ptgt, &slotstatus) !=
12140 - DDI_SUCCESS) {
12141 - NDBG14(("mptsas_ioctl: get LED for tgt %s "
12142 - "failed %x", addr, slotstatus));
12143 - slotstatus = 0;
12144 - }
12145 - NDBG14(("mptsas_ioctl: LED status %x for %s",
12146 - slotstatus, addr));
12147 -#endif
12148 12192 if (cmd == DEVCTL_DEVICE_OFFLINE) {
12149 - slotstatus |=
12150 - MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
12193 + ptgt->m_led_status |=
12194 + (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1));
12151 12195 } else {
12152 - slotstatus &=
12153 - ~MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
12196 + ptgt->m_led_status &=
12197 + ~(1 << (MPTSAS_LEDCTL_LED_OK2RM - 1));
12154 12198 }
12155 - if (mptsas_set_led_status(mpt, ptgt, slotstatus) !=
12156 - DDI_SUCCESS) {
12199 + if (mptsas_flush_led_status(mpt, ptgt) != DDI_SUCCESS) {
12157 12200 NDBG14(("mptsas_ioctl: set LED for tgt %s "
12158 12201 "failed %x", addr, slotstatus));
12159 12202 }
12160 12203 mutex_exit(&mpt->m_mutex);
12161 12204 ndi_dc_freehdl(dcp);
12162 12205 }
12163 12206 goto out;
12164 12207 }
12165 12208 switch (cmd) {
12166 12209 case MPTIOCTL_GET_DISK_INFO:
12167 12210 status = get_disk_info(mpt, data, mode);
12168 12211 break;
12212 + case MPTIOCTL_LED_CONTROL:
12213 + status = led_control(mpt, data, mode);
12214 + break;
12169 12215 case MPTIOCTL_UPDATE_FLASH:
12170 12216 if (ddi_copyin((void *)data, &flashdata,
12171 12217 sizeof (struct mptsas_update_flash), mode)) {
12172 12218 status = EFAULT;
12173 12219 break;
12174 12220 }
12175 12221
12176 12222 mutex_enter(&mpt->m_mutex);
12177 12223 if (mptsas_update_flash(mpt,
12178 12224 (caddr_t)(long)flashdata.PtrBuffer,
12179 12225 flashdata.ImageSize, flashdata.ImageType, mode)) {
12180 12226 status = EFAULT;
12181 12227 }
12182 12228
12183 12229 /*
12184 12230 * Reset the chip to start using the new
12185 12231 * firmware. Reset if failed also.
12186 12232 */
12187 12233 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12188 12234 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
12189 12235 status = EFAULT;
12190 12236 }
12191 12237 mutex_exit(&mpt->m_mutex);
12192 12238 break;
12193 12239 case MPTIOCTL_PASS_THRU:
12194 12240 /*
12195 12241 * The user has requested to pass through a command to
12196 12242 * be executed by the MPT firmware. Call our routine
12197 12243 * which does this. Only allow one passthru IOCTL at
12198 12244 * one time. Other threads will block on
12199 12245 * m_passthru_mutex, which is of adaptive variant.
12200 12246 */
12201 12247 if (ddi_copyin((void *)data, &passthru_data,
12202 12248 sizeof (mptsas_pass_thru_t), mode)) {
12203 12249 status = EFAULT;
12204 12250 break;
12205 12251 }
12206 12252 mutex_enter(&mpt->m_passthru_mutex);
12207 12253 mutex_enter(&mpt->m_mutex);
12208 12254 status = mptsas_pass_thru(mpt, &passthru_data, mode);
12209 12255 mutex_exit(&mpt->m_mutex);
12210 12256 mutex_exit(&mpt->m_passthru_mutex);
12211 12257
12212 12258 break;
12213 12259 case MPTIOCTL_GET_ADAPTER_DATA:
12214 12260 /*
12215 12261 * The user has requested to read adapter data. Call
12216 12262 * our routine which does this.
12217 12263 */
12218 12264 bzero(&adapter_data, sizeof (mptsas_adapter_data_t));
12219 12265 if (ddi_copyin((void *)data, (void *)&adapter_data,
12220 12266 sizeof (mptsas_adapter_data_t), mode)) {
12221 12267 status = EFAULT;
12222 12268 break;
12223 12269 }
12224 12270 if (adapter_data.StructureLength >=
12225 12271 sizeof (mptsas_adapter_data_t)) {
12226 12272 adapter_data.StructureLength = (uint32_t)
12227 12273 sizeof (mptsas_adapter_data_t);
12228 12274 copylen = sizeof (mptsas_adapter_data_t);
12229 12275 mutex_enter(&mpt->m_mutex);
12230 12276 mptsas_read_adapter_data(mpt, &adapter_data);
12231 12277 mutex_exit(&mpt->m_mutex);
12232 12278 } else {
12233 12279 adapter_data.StructureLength = (uint32_t)
12234 12280 sizeof (mptsas_adapter_data_t);
12235 12281 copylen = sizeof (adapter_data.StructureLength);
12236 12282 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
12237 12283 }
12238 12284 if (ddi_copyout((void *)(&adapter_data), (void *)data,
12239 12285 copylen, mode) != 0) {
12240 12286 status = EFAULT;
12241 12287 }
12242 12288 break;
12243 12289 case MPTIOCTL_GET_PCI_INFO:
12244 12290 /*
12245 12291 * The user has requested to read pci info. Call
12246 12292 * our routine which does this.
12247 12293 */
12248 12294 bzero(&pci_info, sizeof (mptsas_pci_info_t));
12249 12295 mutex_enter(&mpt->m_mutex);
12250 12296 mptsas_read_pci_info(mpt, &pci_info);
12251 12297 mutex_exit(&mpt->m_mutex);
12252 12298 if (ddi_copyout((void *)(&pci_info), (void *)data,
12253 12299 sizeof (mptsas_pci_info_t), mode) != 0) {
12254 12300 status = EFAULT;
12255 12301 }
12256 12302 break;
12257 12303 case MPTIOCTL_RESET_ADAPTER:
12258 12304 mutex_enter(&mpt->m_mutex);
12259 12305 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12260 12306 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
12261 12307 mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
12262 12308 "failed");
12263 12309 status = EFAULT;
12264 12310 }
12265 12311 mutex_exit(&mpt->m_mutex);
12266 12312 break;
12267 12313 case MPTIOCTL_DIAG_ACTION:
12268 12314 /*
12269 12315 * The user has done a diag buffer action. Call our
12270 12316 * routine which does this. Only allow one diag action
12271 12317 * at one time.
12272 12318 */
12273 12319 mutex_enter(&mpt->m_mutex);
12274 12320 if (mpt->m_diag_action_in_progress) {
12275 12321 mutex_exit(&mpt->m_mutex);
12276 12322 return (EBUSY);
12277 12323 }
12278 12324 mpt->m_diag_action_in_progress = 1;
12279 12325 status = mptsas_diag_action(mpt,
12280 12326 (mptsas_diag_action_t *)data, mode);
12281 12327 mpt->m_diag_action_in_progress = 0;
12282 12328 mutex_exit(&mpt->m_mutex);
12283 12329 break;
12284 12330 case MPTIOCTL_EVENT_QUERY:
12285 12331 /*
12286 12332 * The user has done an event query. Call our routine
12287 12333 * which does this.
12288 12334 */
12289 12335 status = mptsas_event_query(mpt,
12290 12336 (mptsas_event_query_t *)data, mode, rval);
12291 12337 break;
12292 12338 case MPTIOCTL_EVENT_ENABLE:
12293 12339 /*
12294 12340 * The user has done an event enable. Call our routine
12295 12341 * which does this.
12296 12342 */
12297 12343 status = mptsas_event_enable(mpt,
12298 12344 (mptsas_event_enable_t *)data, mode, rval);
12299 12345 break;
12300 12346 case MPTIOCTL_EVENT_REPORT:
12301 12347 /*
12302 12348 * The user has done an event report. Call our routine
12303 12349 * which does this.
12304 12350 */
12305 12351 status = mptsas_event_report(mpt,
12306 12352 (mptsas_event_report_t *)data, mode, rval);
12307 12353 break;
12308 12354 case MPTIOCTL_REG_ACCESS:
12309 12355 /*
12310 12356 * The user has requested register access. Call our
12311 12357 * routine which does this.
12312 12358 */
12313 12359 status = mptsas_reg_access(mpt,
12314 12360 (mptsas_reg_access_t *)data, mode);
12315 12361 break;
12316 12362 default:
12317 12363 status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
12318 12364 rval);
12319 12365 break;
12320 12366 }
12321 12367
12322 12368 out:
12323 12369 if (mpt->m_options & MPTSAS_OPT_PM)
12324 12370 (void) pm_idle_component(mpt->m_dip, 0);
12325 12371 return (status);
12326 12372 }
12327 12373
12328 12374 int
12329 12375 mptsas_restart_ioc(mptsas_t *mpt)
12330 12376 {
12331 12377 int rval = DDI_SUCCESS;
12332 12378 mptsas_target_t *ptgt = NULL;
12333 12379
12334 12380 ASSERT(mutex_owned(&mpt->m_mutex));
12335 12381
12336 12382 /*
12337 12383 * Set a flag telling I/O path that we're processing a reset. This is
12338 12384 * needed because after the reset is complete, the hash table still
12339 12385 * needs to be rebuilt. If I/Os are started before the hash table is
12340 12386 * rebuilt, I/O errors will occur. This flag allows I/Os to be marked
12341 12387 * so that they can be retried.
12342 12388 */
12343 12389 mpt->m_in_reset = TRUE;
12344 12390
12345 12391 /*
12346 12392 * Set all throttles to HOLD
12347 12393 */
12348 12394 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
12349 12395 MPTSAS_HASH_FIRST);
12350 12396 while (ptgt != NULL) {
12351 12397 mutex_enter(&ptgt->m_tgt_intr_mutex);
12352 12398 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
12353 12399 mutex_exit(&ptgt->m_tgt_intr_mutex);
12354 12400
12355 12401 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
12356 12402 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
12357 12403 }
12358 12404
12359 12405 /*
12360 12406 * Disable interrupts
12361 12407 */
12362 12408 MPTSAS_DISABLE_INTR(mpt);
12363 12409
12364 12410 /*
12365 12411 * Abort all commands: outstanding commands, commands in waitq
12366 12412 */
12367 12413 mptsas_flush_hba(mpt);
12368 12414
12369 12415 /*
12370 12416 * Reinitialize the chip.
12371 12417 */
12372 12418 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
12373 12419 rval = DDI_FAILURE;
12374 12420 }
12375 12421
12376 12422 /*
12377 12423 * Enable interrupts again
12378 12424 */
12379 12425 MPTSAS_ENABLE_INTR(mpt);
12380 12426
12381 12427 /*
12382 12428 * If mptsas_init_chip was successful, update the driver data.
12383 12429 */
12384 12430 if (rval == DDI_SUCCESS) {
12385 12431 mptsas_update_driver_data(mpt);
12386 12432 }
12387 12433
12388 12434 /*
12389 12435 * Reset the throttles
12390 12436 */
12391 12437 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
12392 12438 MPTSAS_HASH_FIRST);
12393 12439 while (ptgt != NULL) {
12394 12440 mutex_enter(&ptgt->m_tgt_intr_mutex);
12395 12441 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
12396 12442 mutex_exit(&ptgt->m_tgt_intr_mutex);
12397 12443
12398 12444 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
12399 12445 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
12400 12446 }
12401 12447
12402 12448 mptsas_doneq_empty(mpt);
12403 12449 mptsas_restart_hba(mpt);
12404 12450
12405 12451 if (rval != DDI_SUCCESS) {
12406 12452 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
12407 12453 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
12408 12454 }
12409 12455
12410 12456 /*
12411 12457 * Clear the reset flag so that I/Os can continue.
12412 12458 */
12413 12459 mpt->m_in_reset = FALSE;
12414 12460
12415 12461 return (rval);
12416 12462 }
12417 12463
12418 12464 static int
12419 12465 mptsas_init_chip(mptsas_t *mpt, int first_time)
12420 12466 {
12421 12467 ddi_dma_cookie_t cookie;
12422 12468 uint32_t i;
12423 12469 int rval;
12424 12470
12425 12471 /*
12426 12472 * Check to see if the firmware image is valid
12427 12473 */
12428 12474 if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) &
12429 12475 MPI2_DIAG_FLASH_BAD_SIG) {
12430 12476 mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
12431 12477 goto fail;
12432 12478 }
12433 12479
12434 12480 /*
12435 12481 * Reset the chip
12436 12482 */
12437 12483 rval = mptsas_ioc_reset(mpt, first_time);
12438 12484 if (rval == MPTSAS_RESET_FAIL) {
12439 12485 mptsas_log(mpt, CE_WARN, "hard reset failed!");
12440 12486 goto fail;
12441 12487 }
12442 12488
12443 12489 if ((rval == MPTSAS_SUCCESS_MUR) && (!first_time)) {
12444 12490 goto mur;
12445 12491 }
12446 12492 /*
12447 12493 * Setup configuration space
12448 12494 */
12449 12495 if (mptsas_config_space_init(mpt) == FALSE) {
12450 12496 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
12451 12497 "failed!");
12452 12498 goto fail;
12453 12499 }
12454 12500
12455 12501 /*
12456 12502 * IOC facts can change after a diag reset so all buffers that are
12457 12503 * based on these numbers must be de-allocated and re-allocated. Get
12458 12504 * new IOC facts each time chip is initialized.
12459 12505 */
12460 12506 if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
12461 12507 mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed");
12462 12508 goto fail;
12463 12509 }
12464 12510
12465 12511 if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
12466 12512 goto fail;
12467 12513 }
12468 12514 /*
12469 12515 * Allocate request message frames, reply free queue, reply descriptor
12470 12516 * post queue, and reply message frames using latest IOC facts.
12471 12517 */
12472 12518 if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
12473 12519 mptsas_log(mpt, CE_WARN, "mptsas_alloc_request_frames failed");
12474 12520 goto fail;
12475 12521 }
12476 12522 if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
12477 12523 mptsas_log(mpt, CE_WARN, "mptsas_alloc_free_queue failed!");
12478 12524 goto fail;
12479 12525 }
12480 12526 if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
12481 12527 mptsas_log(mpt, CE_WARN, "mptsas_alloc_post_queue failed!");
12482 12528 goto fail;
12483 12529 }
12484 12530 if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
12485 12531 mptsas_log(mpt, CE_WARN, "mptsas_alloc_reply_frames failed!");
12486 12532 goto fail;
12487 12533 }
12488 12534
12489 12535 mur:
12490 12536 /*
12491 12537 * Re-Initialize ioc to operational state
12492 12538 */
12493 12539 if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
12494 12540 mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
12495 12541 goto fail;
12496 12542 }
12497 12543
12498 12544 mptsas_alloc_reply_args(mpt);
12499 12545
12500 12546 /*
12501 12547 * Initialize reply post index. Reply free index is initialized after
12502 12548 * the next loop.
12503 12549 */
12504 12550 mpt->m_post_index = 0;
12505 12551
12506 12552 /*
12507 12553 * Initialize the Reply Free Queue with the physical addresses of our
12508 12554 * reply frames.
12509 12555 */
12510 12556 cookie.dmac_address = mpt->m_reply_frame_dma_addr;
12511 12557 for (i = 0; i < mpt->m_max_replies; i++) {
12512 12558 ddi_put32(mpt->m_acc_free_queue_hdl,
12513 12559 &((uint32_t *)(void *)mpt->m_free_queue)[i],
12514 12560 cookie.dmac_address);
12515 12561 cookie.dmac_address += mpt->m_reply_frame_size;
12516 12562 }
12517 12563 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
12518 12564 DDI_DMA_SYNC_FORDEV);
12519 12565
12520 12566 /*
12521 12567 * Initialize the reply free index to one past the last frame on the
12522 12568 * queue. This will signify that the queue is empty to start with.
12523 12569 */
12524 12570 mpt->m_free_index = i;
12525 12571 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, i);
12526 12572
12527 12573 /*
12528 12574 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
12529 12575 */
12530 12576 for (i = 0; i < mpt->m_post_queue_depth; i++) {
12531 12577 ddi_put64(mpt->m_acc_post_queue_hdl,
12532 12578 &((uint64_t *)(void *)mpt->m_post_queue)[i],
12533 12579 0xFFFFFFFFFFFFFFFF);
12534 12580 }
12535 12581 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
12536 12582 DDI_DMA_SYNC_FORDEV);
12537 12583
12538 12584 /*
12539 12585 * Enable ports
12540 12586 */
12541 12587 if (mptsas_ioc_enable_port(mpt) == DDI_FAILURE) {
12542 12588 mptsas_log(mpt, CE_WARN, "mptsas_ioc_enable_port failed");
12543 12589 goto fail;
12544 12590 }
12545 12591
12546 12592 /*
12547 12593 * enable events
12548 12594 */
12549 12595 if (mptsas_ioc_enable_event_notification(mpt)) {
12550 12596 goto fail;
12551 12597 }
12552 12598
12553 12599 /*
12554 12600 * We need checks in attach and these.
12555 12601 * chip_init is called in mult. places
12556 12602 */
12557 12603
12558 12604 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
12559 12605 DDI_SUCCESS) ||
12560 12606 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
12561 12607 DDI_SUCCESS) ||
12562 12608 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
12563 12609 DDI_SUCCESS) ||
12564 12610 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
12565 12611 DDI_SUCCESS) ||
12566 12612 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
12567 12613 DDI_SUCCESS)) {
12568 12614 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
12569 12615 goto fail;
12570 12616 }
12571 12617
12572 12618 /* Check all acc handles */
12573 12619 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
12574 12620 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
12575 12621 DDI_SUCCESS) ||
12576 12622 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
12577 12623 DDI_SUCCESS) ||
12578 12624 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
12579 12625 DDI_SUCCESS) ||
12580 12626 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
12581 12627 DDI_SUCCESS) ||
12582 12628 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
12583 12629 DDI_SUCCESS) ||
12584 12630 (mptsas_check_acc_handle(mpt->m_config_handle) !=
12585 12631 DDI_SUCCESS)) {
12586 12632 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
12587 12633 goto fail;
12588 12634 }
12589 12635
12590 12636 return (DDI_SUCCESS);
12591 12637
12592 12638 fail:
12593 12639 return (DDI_FAILURE);
12594 12640 }
12595 12641
12596 12642 static int
12597 12643 mptsas_get_pci_cap(mptsas_t *mpt)
12598 12644 {
12599 12645 ushort_t caps_ptr, cap, cap_count;
12600 12646
12601 12647 if (mpt->m_config_handle == NULL)
12602 12648 return (FALSE);
12603 12649 /*
12604 12650 * Check if capabilities list is supported and if so,
12605 12651 * get initial capabilities pointer and clear bits 0,1.
12606 12652 */
12607 12653 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
12608 12654 & PCI_STAT_CAP) {
12609 12655 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
12610 12656 PCI_CONF_CAP_PTR), 4);
12611 12657 } else {
12612 12658 caps_ptr = PCI_CAP_NEXT_PTR_NULL;
12613 12659 }
12614 12660
12615 12661 /*
12616 12662 * Walk capabilities if supported.
12617 12663 */
12618 12664 for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
12619 12665
12620 12666 /*
12621 12667 * Check that we haven't exceeded the maximum number of
12622 12668 * capabilities and that the pointer is in a valid range.
12623 12669 */
12624 12670 if (++cap_count > 48) {
12625 12671 mptsas_log(mpt, CE_WARN,
12626 12672 "too many device capabilities.\n");
12627 12673 break;
12628 12674 }
12629 12675 if (caps_ptr < 64) {
12630 12676 mptsas_log(mpt, CE_WARN,
12631 12677 "capabilities pointer 0x%x out of range.\n",
12632 12678 caps_ptr);
12633 12679 break;
12634 12680 }
12635 12681
12636 12682 /*
12637 12683 * Get next capability and check that it is valid.
12638 12684 * For now, we only support power management.
12639 12685 */
12640 12686 cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
12641 12687 switch (cap) {
12642 12688 case PCI_CAP_ID_PM:
12643 12689 mptsas_log(mpt, CE_NOTE,
12644 12690 "?mptsas%d supports power management.\n",
12645 12691 mpt->m_instance);
12646 12692 mpt->m_options |= MPTSAS_OPT_PM;
12647 12693
12648 12694 /* Save PMCSR offset */
12649 12695 mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
12650 12696 break;
12651 12697 /*
12652 12698 * The following capabilities are valid. Any others
12653 12699 * will cause a message to be logged.
12654 12700 */
12655 12701 case PCI_CAP_ID_VPD:
12656 12702 case PCI_CAP_ID_MSI:
12657 12703 case PCI_CAP_ID_PCIX:
12658 12704 case PCI_CAP_ID_PCI_E:
12659 12705 case PCI_CAP_ID_MSI_X:
12660 12706 break;
12661 12707 default:
12662 12708 mptsas_log(mpt, CE_NOTE,
12663 12709 "?mptsas%d unrecognized capability "
12664 12710 "0x%x.\n", mpt->m_instance, cap);
12665 12711 break;
12666 12712 }
12667 12713
12668 12714 /*
12669 12715 * Get next capabilities pointer and clear bits 0,1.
12670 12716 */
12671 12717 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
12672 12718 (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
12673 12719 }
12674 12720 return (TRUE);
12675 12721 }
12676 12722
12677 12723 static int
12678 12724 mptsas_init_pm(mptsas_t *mpt)
12679 12725 {
12680 12726 char pmc_name[16];
12681 12727 char *pmc[] = {
12682 12728 NULL,
12683 12729 "0=Off (PCI D3 State)",
12684 12730 "3=On (PCI D0 State)",
12685 12731 NULL
12686 12732 };
12687 12733 uint16_t pmcsr_stat;
12688 12734
12689 12735 if (mptsas_get_pci_cap(mpt) == FALSE) {
12690 12736 return (DDI_FAILURE);
12691 12737 }
12692 12738 /*
12693 12739 * If PCI's capability does not support PM, then don't need
12694 12740 * to registe the pm-components
12695 12741 */
12696 12742 if (!(mpt->m_options & MPTSAS_OPT_PM))
12697 12743 return (DDI_SUCCESS);
12698 12744 /*
12699 12745 * If power management is supported by this chip, create
12700 12746 * pm-components property for the power management framework
12701 12747 */
12702 12748 (void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
12703 12749 pmc[0] = pmc_name;
12704 12750 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
12705 12751 "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
12706 12752 mutex_enter(&mpt->m_intr_mutex);
12707 12753 mpt->m_options &= ~MPTSAS_OPT_PM;
12708 12754 mutex_exit(&mpt->m_intr_mutex);
12709 12755 mptsas_log(mpt, CE_WARN,
12710 12756 "mptsas%d: pm-component property creation failed.",
12711 12757 mpt->m_instance);
12712 12758 return (DDI_FAILURE);
12713 12759 }
12714 12760
12715 12761 /*
12716 12762 * Power on device.
12717 12763 */
12718 12764 (void) pm_busy_component(mpt->m_dip, 0);
12719 12765 pmcsr_stat = pci_config_get16(mpt->m_config_handle,
12720 12766 mpt->m_pmcsr_offset);
12721 12767 if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
12722 12768 mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
12723 12769 mpt->m_instance);
12724 12770 pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
12725 12771 PCI_PMCSR_D0);
12726 12772 }
12727 12773 if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
12728 12774 mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
12729 12775 return (DDI_FAILURE);
12730 12776 }
12731 12777 mutex_enter(&mpt->m_intr_mutex);
12732 12778 mpt->m_power_level = PM_LEVEL_D0;
12733 12779 mutex_exit(&mpt->m_intr_mutex);
12734 12780 /*
12735 12781 * Set pm idle delay.
12736 12782 */
12737 12783 mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
12738 12784 mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
12739 12785
12740 12786 return (DDI_SUCCESS);
12741 12787 }
12742 12788
12743 12789 static int
12744 12790 mptsas_register_intrs(mptsas_t *mpt)
12745 12791 {
12746 12792 dev_info_t *dip;
12747 12793 int intr_types;
12748 12794
12749 12795 dip = mpt->m_dip;
12750 12796
12751 12797 /* Get supported interrupt types */
12752 12798 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
12753 12799 mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
12754 12800 "failed\n");
12755 12801 return (FALSE);
12756 12802 }
12757 12803
12758 12804 NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
12759 12805
12760 12806 /*
12761 12807 * Try MSI, but fall back to FIXED
12762 12808 */
12763 12809 if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
12764 12810 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
12765 12811 NDBG0(("Using MSI interrupt type"));
12766 12812 mpt->m_intr_type = DDI_INTR_TYPE_MSI;
12767 12813 return (TRUE);
12768 12814 }
12769 12815 }
12770 12816 if (intr_types & DDI_INTR_TYPE_FIXED) {
12771 12817 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
12772 12818 NDBG0(("Using FIXED interrupt type"));
12773 12819 mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
12774 12820 return (TRUE);
12775 12821 } else {
12776 12822 NDBG0(("FIXED interrupt registration failed"));
12777 12823 return (FALSE);
12778 12824 }
12779 12825 }
12780 12826
12781 12827 return (FALSE);
12782 12828 }
12783 12829
12784 12830 static void
12785 12831 mptsas_unregister_intrs(mptsas_t *mpt)
12786 12832 {
12787 12833 mptsas_rem_intrs(mpt);
12788 12834 }
12789 12835
12790 12836 /*
12791 12837 * mptsas_add_intrs:
12792 12838 *
12793 12839 * Register FIXED or MSI interrupts.
12794 12840 */
12795 12841 static int
12796 12842 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
12797 12843 {
12798 12844 dev_info_t *dip = mpt->m_dip;
12799 12845 int avail, actual, count = 0;
12800 12846 int i, flag, ret;
12801 12847
12802 12848 NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
12803 12849
12804 12850 /* Get number of interrupts */
12805 12851 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
12806 12852 if ((ret != DDI_SUCCESS) || (count <= 0)) {
12807 12853 mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
12808 12854 "ret %d count %d\n", ret, count);
12809 12855
12810 12856 return (DDI_FAILURE);
12811 12857 }
12812 12858
12813 12859 /* Get number of available interrupts */
12814 12860 ret = ddi_intr_get_navail(dip, intr_type, &avail);
12815 12861 if ((ret != DDI_SUCCESS) || (avail == 0)) {
12816 12862 mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
12817 12863 "ret %d avail %d\n", ret, avail);
12818 12864
12819 12865 return (DDI_FAILURE);
12820 12866 }
12821 12867
12822 12868 if (avail < count) {
12823 12869 mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, "
12824 12870 "navail() returned %d", count, avail);
12825 12871 }
12826 12872
12827 12873 /* Mpt only have one interrupt routine */
12828 12874 if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
12829 12875 count = 1;
12830 12876 }
12831 12877
12832 12878 /* Allocate an array of interrupt handles */
12833 12879 mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
12834 12880 mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
12835 12881
12836 12882 flag = DDI_INTR_ALLOC_NORMAL;
12837 12883
12838 12884 /* call ddi_intr_alloc() */
12839 12885 ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
12840 12886 count, &actual, flag);
12841 12887
12842 12888 if ((ret != DDI_SUCCESS) || (actual == 0)) {
12843 12889 mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d\n",
12844 12890 ret);
12845 12891 kmem_free(mpt->m_htable, mpt->m_intr_size);
12846 12892 return (DDI_FAILURE);
12847 12893 }
12848 12894
12849 12895 /* use interrupt count returned or abort? */
12850 12896 if (actual < count) {
12851 12897 mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d\n",
12852 12898 count, actual);
12853 12899 }
12854 12900
12855 12901 mpt->m_intr_cnt = actual;
12856 12902
12857 12903 /*
12858 12904 * Get priority for first msi, assume remaining are all the same
12859 12905 */
12860 12906 if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
12861 12907 &mpt->m_intr_pri)) != DDI_SUCCESS) {
12862 12908 mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d\n", ret);
12863 12909
12864 12910 /* Free already allocated intr */
12865 12911 for (i = 0; i < actual; i++) {
12866 12912 (void) ddi_intr_free(mpt->m_htable[i]);
12867 12913 }
12868 12914
12869 12915 kmem_free(mpt->m_htable, mpt->m_intr_size);
12870 12916 return (DDI_FAILURE);
12871 12917 }
12872 12918
12873 12919 /* Test for high level mutex */
12874 12920 if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
12875 12921 mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
12876 12922 "Hi level interrupt not supported\n");
12877 12923
12878 12924 /* Free already allocated intr */
12879 12925 for (i = 0; i < actual; i++) {
12880 12926 (void) ddi_intr_free(mpt->m_htable[i]);
12881 12927 }
12882 12928
12883 12929 kmem_free(mpt->m_htable, mpt->m_intr_size);
12884 12930 return (DDI_FAILURE);
12885 12931 }
12886 12932
12887 12933 /* Call ddi_intr_add_handler() */
12888 12934 for (i = 0; i < actual; i++) {
12889 12935 if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
12890 12936 (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
12891 12937 mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
12892 12938 "failed %d\n", ret);
12893 12939
12894 12940 /* Free already allocated intr */
12895 12941 for (i = 0; i < actual; i++) {
12896 12942 (void) ddi_intr_free(mpt->m_htable[i]);
12897 12943 }
12898 12944
12899 12945 kmem_free(mpt->m_htable, mpt->m_intr_size);
12900 12946 return (DDI_FAILURE);
12901 12947 }
12902 12948 }
12903 12949
12904 12950 if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
12905 12951 != DDI_SUCCESS) {
12906 12952 mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d\n", ret);
12907 12953
12908 12954 /* Free already allocated intr */
12909 12955 for (i = 0; i < actual; i++) {
12910 12956 (void) ddi_intr_free(mpt->m_htable[i]);
12911 12957 }
12912 12958
12913 12959 kmem_free(mpt->m_htable, mpt->m_intr_size);
12914 12960 return (DDI_FAILURE);
12915 12961 }
12916 12962
12917 12963 /*
12918 12964 * Enable interrupts
12919 12965 */
12920 12966 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
12921 12967 /* Call ddi_intr_block_enable() for MSI interrupts */
12922 12968 (void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
12923 12969 } else {
12924 12970 /* Call ddi_intr_enable for MSI or FIXED interrupts */
12925 12971 for (i = 0; i < mpt->m_intr_cnt; i++) {
12926 12972 (void) ddi_intr_enable(mpt->m_htable[i]);
12927 12973 }
12928 12974 }
12929 12975 return (DDI_SUCCESS);
12930 12976 }
12931 12977
12932 12978 /*
12933 12979 * mptsas_rem_intrs:
12934 12980 *
12935 12981 * Unregister FIXED or MSI interrupts
12936 12982 */
12937 12983 static void
12938 12984 mptsas_rem_intrs(mptsas_t *mpt)
12939 12985 {
12940 12986 int i;
12941 12987
12942 12988 NDBG6(("mptsas_rem_intrs"));
12943 12989
12944 12990 /* Disable all interrupts */
12945 12991 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
12946 12992 /* Call ddi_intr_block_disable() */
12947 12993 (void) ddi_intr_block_disable(mpt->m_htable, mpt->m_intr_cnt);
12948 12994 } else {
12949 12995 for (i = 0; i < mpt->m_intr_cnt; i++) {
12950 12996 (void) ddi_intr_disable(mpt->m_htable[i]);
12951 12997 }
12952 12998 }
12953 12999
12954 13000 /* Call ddi_intr_remove_handler() */
12955 13001 for (i = 0; i < mpt->m_intr_cnt; i++) {
12956 13002 (void) ddi_intr_remove_handler(mpt->m_htable[i]);
12957 13003 (void) ddi_intr_free(mpt->m_htable[i]);
12958 13004 }
12959 13005
12960 13006 kmem_free(mpt->m_htable, mpt->m_intr_size);
12961 13007 }
12962 13008
12963 13009 /*
12964 13010 * The IO fault service error handling callback function
12965 13011 */
12966 13012 /*ARGSUSED*/
12967 13013 static int
12968 13014 mptsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
12969 13015 {
12970 13016 /*
12971 13017 * as the driver can always deal with an error in any dma or
12972 13018 * access handle, we can just return the fme_status value.
12973 13019 */
12974 13020 pci_ereport_post(dip, err, NULL);
12975 13021 return (err->fme_status);
12976 13022 }
12977 13023
12978 13024 /*
12979 13025 * mptsas_fm_init - initialize fma capabilities and register with IO
12980 13026 * fault services.
12981 13027 */
12982 13028 static void
12983 13029 mptsas_fm_init(mptsas_t *mpt)
12984 13030 {
12985 13031 /*
12986 13032 * Need to change iblock to priority for new MSI intr
12987 13033 */
12988 13034 ddi_iblock_cookie_t fm_ibc;
12989 13035
12990 13036 /* Only register with IO Fault Services if we have some capability */
12991 13037 if (mpt->m_fm_capabilities) {
12992 13038 /* Adjust access and dma attributes for FMA */
12993 13039 mpt->m_reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
12994 13040 mpt->m_msg_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12995 13041 mpt->m_io_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12996 13042
12997 13043 /*
12998 13044 * Register capabilities with IO Fault Services.
12999 13045 * mpt->m_fm_capabilities will be updated to indicate
13000 13046 * capabilities actually supported (not requested.)
13001 13047 */
13002 13048 ddi_fm_init(mpt->m_dip, &mpt->m_fm_capabilities, &fm_ibc);
13003 13049
13004 13050 /*
13005 13051 * Initialize pci ereport capabilities if ereport
13006 13052 * capable (should always be.)
13007 13053 */
13008 13054 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
13009 13055 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13010 13056 pci_ereport_setup(mpt->m_dip);
13011 13057 }
13012 13058
13013 13059 /*
13014 13060 * Register error callback if error callback capable.
13015 13061 */
13016 13062 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13017 13063 ddi_fm_handler_register(mpt->m_dip,
13018 13064 mptsas_fm_error_cb, (void *) mpt);
13019 13065 }
13020 13066 }
13021 13067 }
13022 13068
13023 13069 /*
13024 13070 * mptsas_fm_fini - Releases fma capabilities and un-registers with IO
13025 13071 * fault services.
13026 13072 *
13027 13073 */
13028 13074 static void
13029 13075 mptsas_fm_fini(mptsas_t *mpt)
13030 13076 {
13031 13077 /* Only unregister FMA capabilities if registered */
13032 13078 if (mpt->m_fm_capabilities) {
13033 13079
13034 13080 /*
13035 13081 * Un-register error callback if error callback capable.
13036 13082 */
13037 13083
13038 13084 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13039 13085 ddi_fm_handler_unregister(mpt->m_dip);
13040 13086 }
13041 13087
13042 13088 /*
13043 13089 * Release any resources allocated by pci_ereport_setup()
13044 13090 */
13045 13091
13046 13092 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
13047 13093 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13048 13094 pci_ereport_teardown(mpt->m_dip);
13049 13095 }
13050 13096
13051 13097 /* Unregister from IO Fault Services */
13052 13098 ddi_fm_fini(mpt->m_dip);
13053 13099
13054 13100 /* Adjust access and dma attributes for FMA */
13055 13101 mpt->m_reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
13056 13102 mpt->m_msg_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
13057 13103 mpt->m_io_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
13058 13104
13059 13105 }
13060 13106 }
13061 13107
13062 13108 int
13063 13109 mptsas_check_acc_handle(ddi_acc_handle_t handle)
13064 13110 {
13065 13111 ddi_fm_error_t de;
13066 13112
13067 13113 if (handle == NULL)
13068 13114 return (DDI_FAILURE);
13069 13115 ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
13070 13116 return (de.fme_status);
13071 13117 }
13072 13118
13073 13119 int
13074 13120 mptsas_check_dma_handle(ddi_dma_handle_t handle)
13075 13121 {
13076 13122 ddi_fm_error_t de;
13077 13123
13078 13124 if (handle == NULL)
13079 13125 return (DDI_FAILURE);
13080 13126 ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
13081 13127 return (de.fme_status);
13082 13128 }
13083 13129
13084 13130 void
13085 13131 mptsas_fm_ereport(mptsas_t *mpt, char *detail)
13086 13132 {
13087 13133 uint64_t ena;
13088 13134 char buf[FM_MAX_CLASS];
13089 13135
13090 13136 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
13091 13137 ena = fm_ena_generate(0, FM_ENA_FMT1);
13092 13138 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities)) {
13093 13139 ddi_fm_ereport_post(mpt->m_dip, buf, ena, DDI_NOSLEEP,
13094 13140 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
13095 13141 }
13096 13142 }
13097 13143
13098 13144 static int
13099 13145 mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
13100 13146 uint16_t *dev_handle, mptsas_target_t **pptgt)
13101 13147 {
13102 13148 int rval;
13103 13149 uint32_t dev_info;
13104 13150 uint64_t sas_wwn;
13105 13151 mptsas_phymask_t phymask;
13106 13152 uint8_t physport, phynum, config, disk;
13107 13153 mptsas_slots_t *slots = mpt->m_active;
13108 13154 uint64_t devicename;
13109 13155 uint16_t pdev_hdl;
13110 13156 mptsas_target_t *tmp_tgt = NULL;
13111 13157 uint16_t bay_num, enclosure;
13112 13158
13113 13159 ASSERT(*pptgt == NULL);
13114 13160
13115 13161 rval = mptsas_get_sas_device_page0(mpt, page_address, dev_handle,
13116 13162 &sas_wwn, &dev_info, &physport, &phynum, &pdev_hdl,
13117 13163 &bay_num, &enclosure);
13118 13164 if (rval != DDI_SUCCESS) {
13119 13165 rval = DEV_INFO_FAIL_PAGE0;
13120 13166 return (rval);
13121 13167 }
13122 13168
13123 13169 if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
13124 13170 MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13125 13171 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) == NULL) {
13126 13172 rval = DEV_INFO_WRONG_DEVICE_TYPE;
13127 13173 return (rval);
13128 13174 }
13129 13175
13130 13176 /*
13131 13177 * Check if the dev handle is for a Phys Disk. If so, set return value
13132 13178 * and exit. Don't add Phys Disks to hash.
13133 13179 */
13134 13180 for (config = 0; config < slots->m_num_raid_configs; config++) {
13135 13181 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
13136 13182 if (*dev_handle == slots->m_raidconfig[config].
13137 13183 m_physdisk_devhdl[disk]) {
13138 13184 rval = DEV_INFO_PHYS_DISK;
13139 13185 return (rval);
13140 13186 }
13141 13187 }
13142 13188 }
13143 13189
13144 13190 /*
13145 13191 * Get SATA Device Name from SAS device page0 for
13146 13192 * sata device, if device name doesn't exist, set m_sas_wwn to
13147 13193 * 0 for direct attached SATA. For the device behind the expander
13148 13194 * we still can use STP address assigned by expander.
13149 13195 */
13150 13196 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13151 13197 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
13152 13198 mutex_exit(&mpt->m_mutex);
13153 13199 /* alloc a tmp_tgt to send the cmd */
13154 13200 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target),
13155 13201 KM_SLEEP);
13156 13202 tmp_tgt->m_devhdl = *dev_handle;
13157 13203 tmp_tgt->m_deviceinfo = dev_info;
13158 13204 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
13159 13205 tmp_tgt->m_qfull_retry_interval =
13160 13206 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
13161 13207 tmp_tgt->m_t_throttle = MAX_THROTTLE;
13162 13208 devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
13163 13209 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
13164 13210 mutex_enter(&mpt->m_mutex);
13165 13211 if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
13166 13212 sas_wwn = devicename;
13167 13213 } else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
13168 13214 sas_wwn = 0;
13169 13215 }
13170 13216 }
13171 13217
13172 13218 phymask = mptsas_physport_to_phymask(mpt, physport);
13173 13219 *pptgt = mptsas_tgt_alloc(&slots->m_tgttbl, *dev_handle, sas_wwn,
13174 13220 dev_info, phymask, phynum, mpt);
13175 13221 if (*pptgt == NULL) {
13176 13222 mptsas_log(mpt, CE_WARN, "Failed to allocated target"
13177 13223 "structure!");
13178 13224 rval = DEV_INFO_FAIL_ALLOC;
13179 13225 return (rval);
13180 13226 }
13181 13227 (*pptgt)->m_enclosure = enclosure;
13182 13228 (*pptgt)->m_slot_num = bay_num;
13183 13229 return (DEV_INFO_SUCCESS);
13184 13230 }
13185 13231
13186 13232 uint64_t
13187 13233 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
13188 13234 {
13189 13235 uint64_t sata_guid = 0, *pwwn = NULL;
13190 13236 int target = ptgt->m_devhdl;
13191 13237 uchar_t *inq83 = NULL;
13192 13238 int inq83_len = 0xFF;
13193 13239 uchar_t *dblk = NULL;
13194 13240 int inq83_retry = 3;
13195 13241 int rval = DDI_FAILURE;
13196 13242
13197 13243 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
13198 13244
13199 13245 inq83_retry:
13200 13246 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
13201 13247 inq83_len, NULL, 1);
13202 13248 if (rval != DDI_SUCCESS) {
13203 13249 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
13204 13250 "0x83 for target:%x, lun:%x failed!", target, lun);
13205 13251 goto out;
13206 13252 }
13207 13253 /* According to SAT2, the first descriptor is logic unit name */
13208 13254 dblk = &inq83[4];
13209 13255 if ((dblk[1] & 0x30) != 0) {
13210 13256 mptsas_log(mpt, CE_WARN, "!Descriptor is not lun associated.");
13211 13257 goto out;
13212 13258 }
13213 13259 pwwn = (uint64_t *)(void *)(&dblk[4]);
13214 13260 if ((dblk[4] & 0xf0) == 0x50) {
13215 13261 sata_guid = BE_64(*pwwn);
13216 13262 goto out;
13217 13263 } else if (dblk[4] == 'A') {
13218 13264 NDBG20(("SATA drive has no NAA format GUID."));
13219 13265 goto out;
13220 13266 } else {
13221 13267 /* The data is not ready, wait and retry */
13222 13268 inq83_retry--;
13223 13269 if (inq83_retry <= 0) {
13224 13270 goto out;
13225 13271 }
13226 13272 NDBG20(("The GUID is not ready, retry..."));
13227 13273 delay(1 * drv_usectohz(1000000));
13228 13274 goto inq83_retry;
13229 13275 }
13230 13276 out:
13231 13277 kmem_free(inq83, inq83_len);
13232 13278 return (sata_guid);
13233 13279 }
13234 13280
13235 13281 static int
13236 13282 mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, uchar_t page,
13237 13283 unsigned char *buf, int len, int *reallen, uchar_t evpd)
13238 13284 {
13239 13285 uchar_t cdb[CDB_GROUP0];
13240 13286 struct scsi_address ap;
13241 13287 struct buf *data_bp = NULL;
13242 13288 int resid = 0;
13243 13289 int ret = DDI_FAILURE;
13244 13290
13245 13291 ASSERT(len <= 0xffff);
13246 13292
13247 13293 ap.a_target = MPTSAS_INVALID_DEVHDL;
13248 13294 ap.a_lun = (uchar_t)(lun);
13249 13295 ap.a_hba_tran = mpt->m_tran;
13250 13296
13251 13297 data_bp = scsi_alloc_consistent_buf(&ap,
13252 13298 (struct buf *)NULL, len, B_READ, NULL_FUNC, NULL);
13253 13299 if (data_bp == NULL) {
13254 13300 return (ret);
13255 13301 }
13256 13302 bzero(cdb, CDB_GROUP0);
13257 13303 cdb[0] = SCMD_INQUIRY;
13258 13304 cdb[1] = evpd;
13259 13305 cdb[2] = page;
13260 13306 cdb[3] = (len & 0xff00) >> 8;
13261 13307 cdb[4] = (len & 0x00ff);
13262 13308 cdb[5] = 0;
13263 13309
13264 13310 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP0, data_bp,
13265 13311 &resid);
13266 13312 if (ret == DDI_SUCCESS) {
13267 13313 if (reallen) {
13268 13314 *reallen = len - resid;
13269 13315 }
13270 13316 bcopy((caddr_t)data_bp->b_un.b_addr, buf, len);
13271 13317 }
13272 13318 if (data_bp) {
13273 13319 scsi_free_consistent_buf(data_bp);
13274 13320 }
13275 13321 return (ret);
13276 13322 }
13277 13323
13278 13324 static int
13279 13325 mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
13280 13326 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
13281 13327 int *resid)
13282 13328 {
13283 13329 struct scsi_pkt *pktp = NULL;
13284 13330 scsi_hba_tran_t *tran_clone = NULL;
13285 13331 mptsas_tgt_private_t *tgt_private = NULL;
13286 13332 int ret = DDI_FAILURE;
13287 13333
13288 13334 /*
13289 13335 * scsi_hba_tran_t->tran_tgt_private is used to pass the address
13290 13336 * information to scsi_init_pkt, allocate a scsi_hba_tran structure
13291 13337 * to simulate the cmds from sd
13292 13338 */
13293 13339 tran_clone = kmem_alloc(
13294 13340 sizeof (scsi_hba_tran_t), KM_SLEEP);
13295 13341 if (tran_clone == NULL) {
13296 13342 goto out;
13297 13343 }
13298 13344 bcopy((caddr_t)mpt->m_tran,
13299 13345 (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
13300 13346 tgt_private = kmem_alloc(
13301 13347 sizeof (mptsas_tgt_private_t), KM_SLEEP);
13302 13348 if (tgt_private == NULL) {
13303 13349 goto out;
13304 13350 }
13305 13351 tgt_private->t_lun = ap->a_lun;
13306 13352 tgt_private->t_private = ptgt;
13307 13353 tran_clone->tran_tgt_private = tgt_private;
13308 13354 ap->a_hba_tran = tran_clone;
13309 13355
13310 13356 pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
13311 13357 data_bp, cdblen, sizeof (struct scsi_arq_status),
13312 13358 0, PKT_CONSISTENT, NULL, NULL);
13313 13359 if (pktp == NULL) {
13314 13360 goto out;
13315 13361 }
13316 13362 bcopy(cdb, pktp->pkt_cdbp, cdblen);
13317 13363 pktp->pkt_flags = FLAG_NOPARITY;
13318 13364 if (scsi_poll(pktp) < 0) {
13319 13365 goto out;
13320 13366 }
13321 13367 if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
13322 13368 goto out;
13323 13369 }
13324 13370 if (resid != NULL) {
13325 13371 *resid = pktp->pkt_resid;
13326 13372 }
13327 13373
13328 13374 ret = DDI_SUCCESS;
13329 13375 out:
13330 13376 if (pktp) {
13331 13377 scsi_destroy_pkt(pktp);
13332 13378 }
13333 13379 if (tran_clone) {
13334 13380 kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
13335 13381 }
13336 13382 if (tgt_private) {
13337 13383 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
13338 13384 }
13339 13385 return (ret);
13340 13386 }
13341 13387 static int
13342 13388 mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, int *lun)
13343 13389 {
13344 13390 char *cp = NULL;
13345 13391 char *ptr = NULL;
13346 13392 size_t s = 0;
13347 13393 char *wwid_str = NULL;
13348 13394 char *lun_str = NULL;
13349 13395 long lunnum;
13350 13396 long phyid = -1;
13351 13397 int rc = DDI_FAILURE;
13352 13398
13353 13399 ptr = name;
13354 13400 ASSERT(ptr[0] == 'w' || ptr[0] == 'p');
13355 13401 ptr++;
13356 13402 if ((cp = strchr(ptr, ',')) == NULL) {
13357 13403 return (DDI_FAILURE);
13358 13404 }
13359 13405
13360 13406 wwid_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13361 13407 s = (uintptr_t)cp - (uintptr_t)ptr;
13362 13408
13363 13409 bcopy(ptr, wwid_str, s);
13364 13410 wwid_str[s] = '\0';
13365 13411
13366 13412 ptr = ++cp;
13367 13413
13368 13414 if ((cp = strchr(ptr, '\0')) == NULL) {
13369 13415 goto out;
13370 13416 }
13371 13417 lun_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13372 13418 s = (uintptr_t)cp - (uintptr_t)ptr;
13373 13419
13374 13420 bcopy(ptr, lun_str, s);
13375 13421 lun_str[s] = '\0';
13376 13422
13377 13423 if (name[0] == 'p') {
13378 13424 rc = ddi_strtol(wwid_str, NULL, 0x10, &phyid);
13379 13425 } else {
13380 13426 rc = scsi_wwnstr_to_wwn(wwid_str, wwid);
13381 13427 }
13382 13428 if (rc != DDI_SUCCESS)
13383 13429 goto out;
13384 13430
13385 13431 if (phyid != -1) {
13386 13432 ASSERT(phyid < MPTSAS_MAX_PHYS);
13387 13433 *phy = (uint8_t)phyid;
13388 13434 }
13389 13435 rc = ddi_strtol(lun_str, NULL, 0x10, &lunnum);
13390 13436 if (rc != 0)
13391 13437 goto out;
13392 13438
13393 13439 *lun = (int)lunnum;
13394 13440 rc = DDI_SUCCESS;
13395 13441 out:
13396 13442 if (wwid_str)
13397 13443 kmem_free(wwid_str, SCSI_MAXNAMELEN);
13398 13444 if (lun_str)
13399 13445 kmem_free(lun_str, SCSI_MAXNAMELEN);
13400 13446
13401 13447 return (rc);
13402 13448 }
13403 13449
13404 13450 /*
13405 13451 * mptsas_parse_smp_name() is to parse sas wwn string
13406 13452 * which format is "wWWN"
13407 13453 */
13408 13454 static int
13409 13455 mptsas_parse_smp_name(char *name, uint64_t *wwn)
13410 13456 {
13411 13457 char *ptr = name;
13412 13458
13413 13459 if (*ptr != 'w') {
13414 13460 return (DDI_FAILURE);
13415 13461 }
13416 13462
13417 13463 ptr++;
13418 13464 if (scsi_wwnstr_to_wwn(ptr, wwn)) {
13419 13465 return (DDI_FAILURE);
13420 13466 }
13421 13467 return (DDI_SUCCESS);
13422 13468 }
13423 13469
13424 13470 static int
13425 13471 mptsas_bus_config(dev_info_t *pdip, uint_t flag,
13426 13472 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
13427 13473 {
13428 13474 int ret = NDI_FAILURE;
13429 13475 int circ = 0;
13430 13476 int circ1 = 0;
13431 13477 mptsas_t *mpt;
13432 13478 char *ptr = NULL;
13433 13479 char *devnm = NULL;
13434 13480 uint64_t wwid = 0;
13435 13481 uint8_t phy = 0xFF;
13436 13482 int lun = 0;
13437 13483 uint_t mflags = flag;
13438 13484 int bconfig = TRUE;
13439 13485
13440 13486 if (scsi_hba_iport_unit_address(pdip) == 0) {
13441 13487 return (DDI_FAILURE);
13442 13488 }
13443 13489
13444 13490 mpt = DIP2MPT(pdip);
13445 13491 if (!mpt) {
13446 13492 return (DDI_FAILURE);
13447 13493 }
13448 13494 /*
13449 13495 * Hold the nexus across the bus_config
13450 13496 */
13451 13497 ndi_devi_enter(scsi_vhci_dip, &circ);
13452 13498 ndi_devi_enter(pdip, &circ1);
13453 13499 switch (op) {
13454 13500 case BUS_CONFIG_ONE:
13455 13501 /* parse wwid/target name out of name given */
13456 13502 if ((ptr = strchr((char *)arg, '@')) == NULL) {
13457 13503 ret = NDI_FAILURE;
13458 13504 break;
13459 13505 }
13460 13506 ptr++;
13461 13507 if (strncmp((char *)arg, "smp", 3) == 0) {
13462 13508 /*
13463 13509 * This is a SMP target device
13464 13510 */
13465 13511 ret = mptsas_parse_smp_name(ptr, &wwid);
13466 13512 if (ret != DDI_SUCCESS) {
13467 13513 ret = NDI_FAILURE;
13468 13514 break;
13469 13515 }
13470 13516 ret = mptsas_config_smp(pdip, wwid, childp);
13471 13517 } else if ((ptr[0] == 'w') || (ptr[0] == 'p')) {
13472 13518 /*
13473 13519 * OBP could pass down a non-canonical form
13474 13520 * bootpath without LUN part when LUN is 0.
13475 13521 * So driver need adjust the string.
13476 13522 */
13477 13523 if (strchr(ptr, ',') == NULL) {
13478 13524 devnm = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13479 13525 (void) sprintf(devnm, "%s,0", (char *)arg);
13480 13526 ptr = strchr(devnm, '@');
13481 13527 ptr++;
13482 13528 }
13483 13529
13484 13530 /*
13485 13531 * The device path is wWWID format and the device
13486 13532 * is not SMP target device.
13487 13533 */
13488 13534 ret = mptsas_parse_address(ptr, &wwid, &phy, &lun);
13489 13535 if (ret != DDI_SUCCESS) {
13490 13536 ret = NDI_FAILURE;
13491 13537 break;
13492 13538 }
13493 13539 *childp = NULL;
13494 13540 if (ptr[0] == 'w') {
13495 13541 ret = mptsas_config_one_addr(pdip, wwid,
13496 13542 lun, childp);
13497 13543 } else if (ptr[0] == 'p') {
13498 13544 ret = mptsas_config_one_phy(pdip, phy, lun,
13499 13545 childp);
13500 13546 }
13501 13547
13502 13548 /*
13503 13549 * If this is CD/DVD device in OBP path, the
13504 13550 * ndi_busop_bus_config can be skipped as config one
13505 13551 * operation is done above.
13506 13552 */
13507 13553 if ((ret == NDI_SUCCESS) && (*childp != NULL) &&
13508 13554 (strcmp(ddi_node_name(*childp), "cdrom") == 0) &&
13509 13555 (strncmp((char *)arg, "disk", 4) == 0)) {
13510 13556 bconfig = FALSE;
13511 13557 ndi_hold_devi(*childp);
13512 13558 }
13513 13559 } else {
13514 13560 ret = NDI_FAILURE;
13515 13561 break;
13516 13562 }
13517 13563
13518 13564 /*
13519 13565 * DDI group instructed us to use this flag.
13520 13566 */
13521 13567 mflags |= NDI_MDI_FALLBACK;
13522 13568 break;
13523 13569 case BUS_CONFIG_DRIVER:
13524 13570 case BUS_CONFIG_ALL:
13525 13571 mptsas_config_all(pdip);
13526 13572 ret = NDI_SUCCESS;
13527 13573 break;
13528 13574 }
13529 13575
13530 13576 if ((ret == NDI_SUCCESS) && bconfig) {
13531 13577 ret = ndi_busop_bus_config(pdip, mflags, op,
13532 13578 (devnm == NULL) ? arg : devnm, childp, 0);
13533 13579 }
13534 13580
13535 13581 ndi_devi_exit(pdip, circ1);
13536 13582 ndi_devi_exit(scsi_vhci_dip, circ);
13537 13583 if (devnm != NULL)
13538 13584 kmem_free(devnm, SCSI_MAXNAMELEN);
13539 13585 return (ret);
13540 13586 }
13541 13587
13542 13588 static int
13543 13589 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
13544 13590 mptsas_target_t *ptgt)
13545 13591 {
13546 13592 int rval = DDI_FAILURE;
13547 13593 struct scsi_inquiry *sd_inq = NULL;
13548 13594 mptsas_t *mpt = DIP2MPT(pdip);
13549 13595
13550 13596 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
13551 13597
13552 13598 rval = mptsas_inquiry(mpt, ptgt, lun, 0, (uchar_t *)sd_inq,
13553 13599 SUN_INQSIZE, 0, (uchar_t)0);
13554 13600
13555 13601 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
13556 13602 rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
13557 13603 } else {
13558 13604 rval = DDI_FAILURE;
13559 13605 }
13560 13606
13561 13607 kmem_free(sd_inq, SUN_INQSIZE);
13562 13608 return (rval);
13563 13609 }
13564 13610
13565 13611 static int
13566 13612 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
13567 13613 dev_info_t **lundip)
13568 13614 {
13569 13615 int rval;
13570 13616 mptsas_t *mpt = DIP2MPT(pdip);
13571 13617 int phymask;
13572 13618 mptsas_target_t *ptgt = NULL;
13573 13619
13574 13620 /*
13575 13621 * Get the physical port associated to the iport
13576 13622 */
13577 13623 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13578 13624 "phymask", 0);
13579 13625
13580 13626 ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
13581 13627 if (ptgt == NULL) {
13582 13628 /*
13583 13629 * didn't match any device by searching
13584 13630 */
13585 13631 return (DDI_FAILURE);
13586 13632 }
13587 13633 /*
13588 13634 * If the LUN already exists and the status is online,
13589 13635 * we just return the pointer to dev_info_t directly.
13590 13636 * For the mdi_pathinfo node, we'll handle it in
13591 13637 * mptsas_create_virt_lun()
13592 13638 * TODO should be also in mptsas_handle_dr
13593 13639 */
13594 13640
13595 13641 *lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
13596 13642 if (*lundip != NULL) {
13597 13643 /*
13598 13644 * TODO Another senario is, we hotplug the same disk
13599 13645 * on the same slot, the devhdl changed, is this
13600 13646 * possible?
13601 13647 * tgt_private->t_private != ptgt
13602 13648 */
13603 13649 if (sasaddr != ptgt->m_sas_wwn) {
13604 13650 /*
13605 13651 * The device has changed although the devhdl is the
13606 13652 * same (Enclosure mapping mode, change drive on the
13607 13653 * same slot)
13608 13654 */
13609 13655 return (DDI_FAILURE);
13610 13656 }
13611 13657 return (DDI_SUCCESS);
13612 13658 }
13613 13659
13614 13660 if (phymask == 0) {
13615 13661 /*
13616 13662 * Configure IR volume
13617 13663 */
13618 13664 rval = mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
13619 13665 return (rval);
13620 13666 }
13621 13667 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
13622 13668
13623 13669 return (rval);
13624 13670 }
13625 13671
13626 13672 static int
13627 13673 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
13628 13674 dev_info_t **lundip)
13629 13675 {
13630 13676 int rval;
13631 13677 mptsas_t *mpt = DIP2MPT(pdip);
13632 13678 int phymask;
13633 13679 mptsas_target_t *ptgt = NULL;
13634 13680
13635 13681 /*
13636 13682 * Get the physical port associated to the iport
13637 13683 */
13638 13684 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13639 13685 "phymask", 0);
13640 13686
13641 13687 ptgt = mptsas_phy_to_tgt(mpt, phymask, phy);
13642 13688 if (ptgt == NULL) {
13643 13689 /*
13644 13690 * didn't match any device by searching
13645 13691 */
13646 13692 return (DDI_FAILURE);
13647 13693 }
13648 13694
13649 13695 /*
13650 13696 * If the LUN already exists and the status is online,
13651 13697 * we just return the pointer to dev_info_t directly.
13652 13698 * For the mdi_pathinfo node, we'll handle it in
13653 13699 * mptsas_create_virt_lun().
13654 13700 */
13655 13701
13656 13702 *lundip = mptsas_find_child_phy(pdip, phy);
13657 13703 if (*lundip != NULL) {
13658 13704 return (DDI_SUCCESS);
13659 13705 }
13660 13706
13661 13707 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
13662 13708
13663 13709 return (rval);
13664 13710 }
13665 13711
13666 13712 static int
13667 13713 mptsas_retrieve_lundata(int lun_cnt, uint8_t *buf, uint16_t *lun_num,
13668 13714 uint8_t *lun_addr_type)
13669 13715 {
13670 13716 uint32_t lun_idx = 0;
13671 13717
13672 13718 ASSERT(lun_num != NULL);
13673 13719 ASSERT(lun_addr_type != NULL);
13674 13720
13675 13721 lun_idx = (lun_cnt + 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
13676 13722 /* determine report luns addressing type */
13677 13723 switch (buf[lun_idx] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) {
13678 13724 /*
13679 13725 * Vendors in the field have been found to be concatenating
13680 13726 * bus/target/lun to equal the complete lun value instead
13681 13727 * of switching to flat space addressing
13682 13728 */
13683 13729 /* 00b - peripheral device addressing method */
13684 13730 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL:
13685 13731 /* FALLTHRU */
13686 13732 /* 10b - logical unit addressing method */
13687 13733 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT:
13688 13734 /* FALLTHRU */
13689 13735 /* 01b - flat space addressing method */
13690 13736 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE:
13691 13737 /* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
13692 13738 *lun_addr_type = (buf[lun_idx] &
13693 13739 MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) >> 6;
13694 13740 *lun_num = (buf[lun_idx] & 0x3F) << 8;
13695 13741 *lun_num |= buf[lun_idx + 1];
13696 13742 return (DDI_SUCCESS);
13697 13743 default:
13698 13744 return (DDI_FAILURE);
13699 13745 }
13700 13746 }
13701 13747
13702 13748 static int
13703 13749 mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt)
13704 13750 {
13705 13751 struct buf *repluns_bp = NULL;
13706 13752 struct scsi_address ap;
13707 13753 uchar_t cdb[CDB_GROUP5];
13708 13754 int ret = DDI_FAILURE;
13709 13755 int retry = 0;
13710 13756 int lun_list_len = 0;
13711 13757 uint16_t lun_num = 0;
13712 13758 uint8_t lun_addr_type = 0;
13713 13759 uint32_t lun_cnt = 0;
13714 13760 uint32_t lun_total = 0;
13715 13761 dev_info_t *cdip = NULL;
13716 13762 uint16_t *saved_repluns = NULL;
13717 13763 char *buffer = NULL;
13718 13764 int buf_len = 128;
13719 13765 mptsas_t *mpt = DIP2MPT(pdip);
13720 13766 uint64_t sas_wwn = 0;
13721 13767 uint8_t phy = 0xFF;
13722 13768 uint32_t dev_info = 0;
13723 13769
13724 13770 mutex_enter(&mpt->m_mutex);
13725 13771 sas_wwn = ptgt->m_sas_wwn;
13726 13772 phy = ptgt->m_phynum;
13727 13773 dev_info = ptgt->m_deviceinfo;
13728 13774 mutex_exit(&mpt->m_mutex);
13729 13775
13730 13776 if (sas_wwn == 0) {
13731 13777 /*
13732 13778 * It's a SATA without Device Name
13733 13779 * So don't try multi-LUNs
13734 13780 */
13735 13781 if (mptsas_find_child_phy(pdip, phy)) {
13736 13782 return (DDI_SUCCESS);
13737 13783 } else {
13738 13784 /*
13739 13785 * need configure and create node
13740 13786 */
13741 13787 return (DDI_FAILURE);
13742 13788 }
13743 13789 }
13744 13790
13745 13791 /*
13746 13792 * WWN (SAS address or Device Name exist)
13747 13793 */
13748 13794 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13749 13795 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
13750 13796 /*
13751 13797 * SATA device with Device Name
13752 13798 * So don't try multi-LUNs
13753 13799 */
13754 13800 if (mptsas_find_child_addr(pdip, sas_wwn, 0)) {
13755 13801 return (DDI_SUCCESS);
13756 13802 } else {
13757 13803 return (DDI_FAILURE);
13758 13804 }
13759 13805 }
13760 13806
13761 13807 do {
13762 13808 ap.a_target = MPTSAS_INVALID_DEVHDL;
13763 13809 ap.a_lun = 0;
13764 13810 ap.a_hba_tran = mpt->m_tran;
13765 13811 repluns_bp = scsi_alloc_consistent_buf(&ap,
13766 13812 (struct buf *)NULL, buf_len, B_READ, NULL_FUNC, NULL);
13767 13813 if (repluns_bp == NULL) {
13768 13814 retry++;
13769 13815 continue;
13770 13816 }
13771 13817 bzero(cdb, CDB_GROUP5);
13772 13818 cdb[0] = SCMD_REPORT_LUNS;
13773 13819 cdb[6] = (buf_len & 0xff000000) >> 24;
13774 13820 cdb[7] = (buf_len & 0x00ff0000) >> 16;
13775 13821 cdb[8] = (buf_len & 0x0000ff00) >> 8;
13776 13822 cdb[9] = (buf_len & 0x000000ff);
13777 13823
13778 13824 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP5,
13779 13825 repluns_bp, NULL);
13780 13826 if (ret != DDI_SUCCESS) {
13781 13827 scsi_free_consistent_buf(repluns_bp);
13782 13828 retry++;
13783 13829 continue;
13784 13830 }
13785 13831 lun_list_len = BE_32(*(int *)((void *)(
13786 13832 repluns_bp->b_un.b_addr)));
13787 13833 if (buf_len >= lun_list_len + 8) {
13788 13834 ret = DDI_SUCCESS;
13789 13835 break;
13790 13836 }
13791 13837 scsi_free_consistent_buf(repluns_bp);
13792 13838 buf_len = lun_list_len + 8;
13793 13839
13794 13840 } while (retry < 3);
13795 13841
13796 13842 if (ret != DDI_SUCCESS)
13797 13843 return (ret);
13798 13844 buffer = (char *)repluns_bp->b_un.b_addr;
13799 13845 /*
13800 13846 * find out the number of luns returned by the SCSI ReportLun call
13801 13847 * and allocate buffer space
13802 13848 */
13803 13849 lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
13804 13850 saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
13805 13851 if (saved_repluns == NULL) {
13806 13852 scsi_free_consistent_buf(repluns_bp);
13807 13853 return (DDI_FAILURE);
13808 13854 }
13809 13855 for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
13810 13856 if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
13811 13857 &lun_num, &lun_addr_type) != DDI_SUCCESS) {
13812 13858 continue;
13813 13859 }
13814 13860 saved_repluns[lun_cnt] = lun_num;
13815 13861 if (cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num))
13816 13862 ret = DDI_SUCCESS;
13817 13863 else
13818 13864 ret = mptsas_probe_lun(pdip, lun_num, &cdip,
13819 13865 ptgt);
13820 13866 if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
13821 13867 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
13822 13868 MPTSAS_DEV_GONE);
13823 13869 }
13824 13870 }
13825 13871 mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
13826 13872 kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
13827 13873 scsi_free_consistent_buf(repluns_bp);
13828 13874 return (DDI_SUCCESS);
13829 13875 }
13830 13876
13831 13877 static int
13832 13878 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
13833 13879 {
13834 13880 int rval = DDI_FAILURE;
13835 13881 struct scsi_inquiry *sd_inq = NULL;
13836 13882 mptsas_t *mpt = DIP2MPT(pdip);
13837 13883 mptsas_target_t *ptgt = NULL;
13838 13884
13839 13885 mutex_enter(&mpt->m_mutex);
13840 13886 ptgt = mptsas_search_by_devhdl(&mpt->m_active->m_tgttbl, target);
13841 13887 mutex_exit(&mpt->m_mutex);
13842 13888 if (ptgt == NULL) {
13843 13889 mptsas_log(mpt, CE_WARN, "Volume with VolDevHandle of 0x%x "
13844 13890 "not found.", target);
13845 13891 return (rval);
13846 13892 }
13847 13893
13848 13894 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
13849 13895 rval = mptsas_inquiry(mpt, ptgt, 0, 0, (uchar_t *)sd_inq,
13850 13896 SUN_INQSIZE, 0, (uchar_t)0);
13851 13897
13852 13898 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
13853 13899 rval = mptsas_create_phys_lun(pdip, sd_inq, NULL, dip, ptgt,
13854 13900 0);
13855 13901 } else {
13856 13902 rval = DDI_FAILURE;
13857 13903 }
13858 13904
13859 13905 kmem_free(sd_inq, SUN_INQSIZE);
13860 13906 return (rval);
13861 13907 }
13862 13908
13863 13909 /*
13864 13910 * configure all RAID volumes for virtual iport
13865 13911 */
13866 13912 static void
13867 13913 mptsas_config_all_viport(dev_info_t *pdip)
13868 13914 {
13869 13915 mptsas_t *mpt = DIP2MPT(pdip);
13870 13916 int config, vol;
13871 13917 int target;
13872 13918 dev_info_t *lundip = NULL;
13873 13919 mptsas_slots_t *slots = mpt->m_active;
13874 13920
13875 13921 /*
13876 13922 * Get latest RAID info and search for any Volume DevHandles. If any
13877 13923 * are found, configure the volume.
13878 13924 */
13879 13925 mutex_enter(&mpt->m_mutex);
13880 13926 for (config = 0; config < slots->m_num_raid_configs; config++) {
13881 13927 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
13882 13928 if (slots->m_raidconfig[config].m_raidvol[vol].m_israid
13883 13929 == 1) {
13884 13930 target = slots->m_raidconfig[config].
13885 13931 m_raidvol[vol].m_raidhandle;
13886 13932 mutex_exit(&mpt->m_mutex);
13887 13933 (void) mptsas_config_raid(pdip, target,
13888 13934 &lundip);
13889 13935 mutex_enter(&mpt->m_mutex);
13890 13936 }
13891 13937 }
13892 13938 }
13893 13939 mutex_exit(&mpt->m_mutex);
13894 13940 }
13895 13941
13896 13942 static void
13897 13943 mptsas_offline_missed_luns(dev_info_t *pdip, uint16_t *repluns,
13898 13944 int lun_cnt, mptsas_target_t *ptgt)
13899 13945 {
13900 13946 dev_info_t *child = NULL, *savechild = NULL;
13901 13947 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
13902 13948 uint64_t sas_wwn, wwid;
13903 13949 uint8_t phy;
13904 13950 int lun;
13905 13951 int i;
13906 13952 int find;
13907 13953 char *addr;
13908 13954 char *nodename;
13909 13955 mptsas_t *mpt = DIP2MPT(pdip);
13910 13956
13911 13957 mutex_enter(&mpt->m_mutex);
13912 13958 wwid = ptgt->m_sas_wwn;
13913 13959 mutex_exit(&mpt->m_mutex);
13914 13960
13915 13961 child = ddi_get_child(pdip);
13916 13962 while (child) {
13917 13963 find = 0;
13918 13964 savechild = child;
13919 13965 child = ddi_get_next_sibling(child);
13920 13966
13921 13967 nodename = ddi_node_name(savechild);
13922 13968 if (strcmp(nodename, "smp") == 0) {
13923 13969 continue;
13924 13970 }
13925 13971
13926 13972 addr = ddi_get_name_addr(savechild);
13927 13973 if (addr == NULL) {
13928 13974 continue;
13929 13975 }
13930 13976
13931 13977 if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
13932 13978 DDI_SUCCESS) {
13933 13979 continue;
13934 13980 }
13935 13981
13936 13982 if (wwid == sas_wwn) {
13937 13983 for (i = 0; i < lun_cnt; i++) {
13938 13984 if (repluns[i] == lun) {
13939 13985 find = 1;
13940 13986 break;
13941 13987 }
13942 13988 }
13943 13989 } else {
13944 13990 continue;
13945 13991 }
13946 13992 if (find == 0) {
13947 13993 /*
13948 13994 * The lun has not been there already
13949 13995 */
13950 13996 (void) mptsas_offline_lun(pdip, savechild, NULL,
13951 13997 NDI_DEVI_REMOVE);
13952 13998 }
13953 13999 }
13954 14000
13955 14001 pip = mdi_get_next_client_path(pdip, NULL);
13956 14002 while (pip) {
13957 14003 find = 0;
13958 14004 savepip = pip;
13959 14005 addr = MDI_PI(pip)->pi_addr;
13960 14006
13961 14007 pip = mdi_get_next_client_path(pdip, pip);
13962 14008
13963 14009 if (addr == NULL) {
13964 14010 continue;
13965 14011 }
13966 14012
13967 14013 if (mptsas_parse_address(addr, &sas_wwn, &phy,
13968 14014 &lun) != DDI_SUCCESS) {
13969 14015 continue;
13970 14016 }
13971 14017
13972 14018 if (sas_wwn == wwid) {
13973 14019 for (i = 0; i < lun_cnt; i++) {
13974 14020 if (repluns[i] == lun) {
13975 14021 find = 1;
13976 14022 break;
13977 14023 }
13978 14024 }
13979 14025 } else {
13980 14026 continue;
13981 14027 }
13982 14028
13983 14029 if (find == 0) {
13984 14030 /*
13985 14031 * The lun has not been there already
13986 14032 */
13987 14033 (void) mptsas_offline_lun(pdip, NULL, savepip,
13988 14034 NDI_DEVI_REMOVE);
13989 14035 }
13990 14036 }
13991 14037 }
13992 14038
13993 14039 void
13994 14040 mptsas_update_hashtab(struct mptsas *mpt)
13995 14041 {
13996 14042 uint32_t page_address;
13997 14043 int rval = 0;
13998 14044 uint16_t dev_handle;
13999 14045 mptsas_target_t *ptgt = NULL;
14000 14046 mptsas_smp_t smp_node;
14001 14047
14002 14048 /*
14003 14049 * Get latest RAID info.
14004 14050 */
14005 14051 (void) mptsas_get_raid_info(mpt);
14006 14052
14007 14053 dev_handle = mpt->m_smp_devhdl;
14008 14054 for (; mpt->m_done_traverse_smp == 0; ) {
14009 14055 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
14010 14056 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)dev_handle;
14011 14057 if (mptsas_get_sas_expander_page0(mpt, page_address, &smp_node)
14012 14058 != DDI_SUCCESS) {
14013 14059 break;
14014 14060 }
14015 14061 mpt->m_smp_devhdl = dev_handle = smp_node.m_devhdl;
14016 14062 (void) mptsas_smp_alloc(&mpt->m_active->m_smptbl, &smp_node);
14017 14063 }
14018 14064
14019 14065 /*
14020 14066 * Config target devices
14021 14067 */
14022 14068 dev_handle = mpt->m_dev_handle;
14023 14069
14024 14070 /*
14025 14071 * Do loop to get sas device page 0 by GetNextHandle till the
14026 14072 * the last handle. If the sas device is a SATA/SSP target,
14027 14073 * we try to config it.
14028 14074 */
14029 14075 for (; mpt->m_done_traverse_dev == 0; ) {
14030 14076 ptgt = NULL;
14031 14077 page_address =
14032 14078 (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
14033 14079 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14034 14080 (uint32_t)dev_handle;
14035 14081 rval = mptsas_get_target_device_info(mpt, page_address,
14036 14082 &dev_handle, &ptgt);
14037 14083 if ((rval == DEV_INFO_FAIL_PAGE0) ||
14038 14084 (rval == DEV_INFO_FAIL_ALLOC)) {
14039 14085 break;
14040 14086 }
14041 14087
14042 14088 mpt->m_dev_handle = dev_handle;
14043 14089 }
14044 14090
14045 14091 }
14046 14092
14047 14093 void
14048 14094 mptsas_invalid_hashtab(mptsas_hash_table_t *hashtab)
14049 14095 {
14050 14096 mptsas_hash_data_t *data;
14051 14097 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST);
14052 14098 while (data != NULL) {
14053 14099 data->devhdl = MPTSAS_INVALID_DEVHDL;
14054 14100 data->device_info = 0;
14055 14101 /*
14056 14102 * For tgttbl, clear dr_flag.
14057 14103 */
14058 14104 data->dr_flag = MPTSAS_DR_INACTIVE;
14059 14105 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT);
14060 14106 }
14061 14107 }
14062 14108
14063 14109 void
14064 14110 mptsas_update_driver_data(struct mptsas *mpt)
14065 14111 {
14066 14112 /*
14067 14113 * TODO after hard reset, update the driver data structures
14068 14114 * 1. update port/phymask mapping table mpt->m_phy_info
14069 14115 * 2. invalid all the entries in hash table
14070 14116 * m_devhdl = 0xffff and m_deviceinfo = 0
14071 14117 * 3. call sas_device_page/expander_page to update hash table
14072 14118 */
14073 14119 mptsas_update_phymask(mpt);
14074 14120 /*
14075 14121 * Invalid the existing entries
14076 14122 */
14077 14123 mptsas_invalid_hashtab(&mpt->m_active->m_tgttbl);
14078 14124 mptsas_invalid_hashtab(&mpt->m_active->m_smptbl);
14079 14125 mpt->m_done_traverse_dev = 0;
14080 14126 mpt->m_done_traverse_smp = 0;
14081 14127 mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
14082 14128 mptsas_update_hashtab(mpt);
14083 14129 }
14084 14130
14085 14131 static void
14086 14132 mptsas_config_all(dev_info_t *pdip)
14087 14133 {
14088 14134 dev_info_t *smpdip = NULL;
14089 14135 mptsas_t *mpt = DIP2MPT(pdip);
14090 14136 int phymask = 0;
14091 14137 mptsas_phymask_t phy_mask;
14092 14138 mptsas_target_t *ptgt = NULL;
14093 14139 mptsas_smp_t *psmp;
14094 14140
14095 14141 /*
14096 14142 * Get the phymask associated to the iport
14097 14143 */
14098 14144 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14099 14145 "phymask", 0);
14100 14146
14101 14147 /*
14102 14148 * Enumerate RAID volumes here (phymask == 0).
14103 14149 */
14104 14150 if (phymask == 0) {
14105 14151 mptsas_config_all_viport(pdip);
14106 14152 return;
14107 14153 }
14108 14154
14109 14155 mutex_enter(&mpt->m_mutex);
14110 14156
14111 14157 if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp) {
14112 14158 mptsas_update_hashtab(mpt);
14113 14159 }
14114 14160
14115 14161 psmp = (mptsas_smp_t *)mptsas_hash_traverse(&mpt->m_active->m_smptbl,
14116 14162 MPTSAS_HASH_FIRST);
14117 14163 while (psmp != NULL) {
14118 14164 phy_mask = psmp->m_phymask;
14119 14165 if (phy_mask == phymask) {
14120 14166 smpdip = NULL;
14121 14167 mutex_exit(&mpt->m_mutex);
14122 14168 (void) mptsas_online_smp(pdip, psmp, &smpdip);
14123 14169 mutex_enter(&mpt->m_mutex);
14124 14170 }
14125 14171 psmp = (mptsas_smp_t *)mptsas_hash_traverse(
14126 14172 &mpt->m_active->m_smptbl, MPTSAS_HASH_NEXT);
14127 14173 }
14128 14174
14129 14175 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
14130 14176 MPTSAS_HASH_FIRST);
14131 14177 while (ptgt != NULL) {
14132 14178 phy_mask = ptgt->m_phymask;
14133 14179 if (phy_mask == phymask) {
14134 14180 mutex_exit(&mpt->m_mutex);
14135 14181 (void) mptsas_config_target(pdip, ptgt);
14136 14182 mutex_enter(&mpt->m_mutex);
14137 14183 }
14138 14184
14139 14185 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
14140 14186 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
14141 14187 }
14142 14188 mutex_exit(&mpt->m_mutex);
14143 14189 }
14144 14190
14145 14191 static int
14146 14192 mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt)
14147 14193 {
14148 14194 int rval = DDI_FAILURE;
14149 14195 dev_info_t *tdip;
14150 14196
14151 14197 rval = mptsas_config_luns(pdip, ptgt);
14152 14198 if (rval != DDI_SUCCESS) {
14153 14199 /*
14154 14200 * The return value means the SCMD_REPORT_LUNS
14155 14201 * did not execute successfully. The target maybe
14156 14202 * doesn't support such command.
14157 14203 */
14158 14204 rval = mptsas_probe_lun(pdip, 0, &tdip, ptgt);
14159 14205 }
14160 14206 return (rval);
14161 14207 }
14162 14208
14163 14209 /*
14164 14210 * Return fail if not all the childs/paths are freed.
14165 14211 * if there is any path under the HBA, the return value will be always fail
14166 14212 * because we didn't call mdi_pi_free for path
14167 14213 */
14168 14214 static int
14169 14215 mptsas_offline_target(dev_info_t *pdip, char *name)
14170 14216 {
14171 14217 dev_info_t *child = NULL, *prechild = NULL;
14172 14218 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
14173 14219 int tmp_rval, rval = DDI_SUCCESS;
14174 14220 char *addr, *cp;
14175 14221 size_t s;
14176 14222 mptsas_t *mpt = DIP2MPT(pdip);
14177 14223
14178 14224 child = ddi_get_child(pdip);
14179 14225 while (child) {
14180 14226 addr = ddi_get_name_addr(child);
14181 14227 prechild = child;
14182 14228 child = ddi_get_next_sibling(child);
14183 14229
14184 14230 if (addr == NULL) {
14185 14231 continue;
14186 14232 }
14187 14233 if ((cp = strchr(addr, ',')) == NULL) {
14188 14234 continue;
14189 14235 }
14190 14236
14191 14237 s = (uintptr_t)cp - (uintptr_t)addr;
14192 14238
14193 14239 if (strncmp(addr, name, s) != 0) {
14194 14240 continue;
14195 14241 }
14196 14242
14197 14243 tmp_rval = mptsas_offline_lun(pdip, prechild, NULL,
14198 14244 NDI_DEVI_REMOVE);
14199 14245 if (tmp_rval != DDI_SUCCESS) {
14200 14246 rval = DDI_FAILURE;
14201 14247 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14202 14248 prechild, MPTSAS_DEV_GONE) !=
14203 14249 DDI_PROP_SUCCESS) {
14204 14250 mptsas_log(mpt, CE_WARN, "mptsas driver "
14205 14251 "unable to create property for "
14206 14252 "SAS %s (MPTSAS_DEV_GONE)", addr);
14207 14253 }
14208 14254 }
14209 14255 }
14210 14256
14211 14257 pip = mdi_get_next_client_path(pdip, NULL);
14212 14258 while (pip) {
14213 14259 addr = MDI_PI(pip)->pi_addr;
14214 14260 savepip = pip;
14215 14261 pip = mdi_get_next_client_path(pdip, pip);
14216 14262 if (addr == NULL) {
14217 14263 continue;
14218 14264 }
14219 14265
14220 14266 if ((cp = strchr(addr, ',')) == NULL) {
14221 14267 continue;
14222 14268 }
14223 14269
14224 14270 s = (uintptr_t)cp - (uintptr_t)addr;
14225 14271
14226 14272 if (strncmp(addr, name, s) != 0) {
14227 14273 continue;
14228 14274 }
14229 14275
14230 14276 (void) mptsas_offline_lun(pdip, NULL, savepip,
14231 14277 NDI_DEVI_REMOVE);
14232 14278 /*
14233 14279 * driver will not invoke mdi_pi_free, so path will not
14234 14280 * be freed forever, return DDI_FAILURE.
14235 14281 */
14236 14282 rval = DDI_FAILURE;
14237 14283 }
14238 14284 return (rval);
14239 14285 }
14240 14286
14241 14287 static int
14242 14288 mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
14243 14289 mdi_pathinfo_t *rpip, uint_t flags)
14244 14290 {
14245 14291 int rval = DDI_FAILURE;
14246 14292 char *devname;
14247 14293 dev_info_t *cdip, *parent;
14248 14294
14249 14295 if (rpip != NULL) {
14250 14296 parent = scsi_vhci_dip;
14251 14297 cdip = mdi_pi_get_client(rpip);
14252 14298 } else if (rdip != NULL) {
14253 14299 parent = pdip;
14254 14300 cdip = rdip;
14255 14301 } else {
14256 14302 return (DDI_FAILURE);
14257 14303 }
14258 14304
14259 14305 /*
14260 14306 * Make sure node is attached otherwise
14261 14307 * it won't have related cache nodes to
14262 14308 * clean up. i_ddi_devi_attached is
14263 14309 * similiar to i_ddi_node_state(cdip) >=
14264 14310 * DS_ATTACHED.
14265 14311 */
14266 14312 if (i_ddi_devi_attached(cdip)) {
14267 14313
14268 14314 /* Get full devname */
14269 14315 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
14270 14316 (void) ddi_deviname(cdip, devname);
14271 14317 /* Clean cache */
14272 14318 (void) devfs_clean(parent, devname + 1,
14273 14319 DV_CLEAN_FORCE);
14274 14320 kmem_free(devname, MAXNAMELEN + 1);
14275 14321 }
14276 14322 if (rpip != NULL) {
14277 14323 if (MDI_PI_IS_OFFLINE(rpip)) {
14278 14324 rval = DDI_SUCCESS;
14279 14325 } else {
14280 14326 rval = mdi_pi_offline(rpip, 0);
14281 14327 }
14282 14328 } else {
14283 14329 rval = ndi_devi_offline(cdip, flags);
14284 14330 }
14285 14331
14286 14332 return (rval);
14287 14333 }
14288 14334
14289 14335 static dev_info_t *
14290 14336 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
14291 14337 {
14292 14338 dev_info_t *child = NULL;
14293 14339 char *smp_wwn = NULL;
14294 14340
14295 14341 child = ddi_get_child(parent);
14296 14342 while (child) {
14297 14343 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
14298 14344 DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
14299 14345 != DDI_SUCCESS) {
14300 14346 child = ddi_get_next_sibling(child);
14301 14347 continue;
14302 14348 }
14303 14349
14304 14350 if (strcmp(smp_wwn, str_wwn) == 0) {
14305 14351 ddi_prop_free(smp_wwn);
14306 14352 break;
14307 14353 }
14308 14354 child = ddi_get_next_sibling(child);
14309 14355 ddi_prop_free(smp_wwn);
14310 14356 }
14311 14357 return (child);
14312 14358 }
14313 14359
14314 14360 static int
14315 14361 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, uint_t flags)
14316 14362 {
14317 14363 int rval = DDI_FAILURE;
14318 14364 char *devname;
14319 14365 char wwn_str[MPTSAS_WWN_STRLEN];
14320 14366 dev_info_t *cdip;
14321 14367
14322 14368 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_sasaddr);
14323 14369
14324 14370 cdip = mptsas_find_smp_child(pdip, wwn_str);
14325 14371
14326 14372 if (cdip == NULL)
14327 14373 return (DDI_SUCCESS);
14328 14374
14329 14375 /*
14330 14376 * Make sure node is attached otherwise
14331 14377 * it won't have related cache nodes to
14332 14378 * clean up. i_ddi_devi_attached is
14333 14379 * similiar to i_ddi_node_state(cdip) >=
14334 14380 * DS_ATTACHED.
14335 14381 */
14336 14382 if (i_ddi_devi_attached(cdip)) {
14337 14383
14338 14384 /* Get full devname */
14339 14385 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
14340 14386 (void) ddi_deviname(cdip, devname);
14341 14387 /* Clean cache */
14342 14388 (void) devfs_clean(pdip, devname + 1,
14343 14389 DV_CLEAN_FORCE);
14344 14390 kmem_free(devname, MAXNAMELEN + 1);
14345 14391 }
14346 14392
14347 14393 rval = ndi_devi_offline(cdip, flags);
14348 14394
14349 14395 return (rval);
14350 14396 }
14351 14397
14352 14398 static dev_info_t *
14353 14399 mptsas_find_child(dev_info_t *pdip, char *name)
14354 14400 {
14355 14401 dev_info_t *child = NULL;
14356 14402 char *rname = NULL;
14357 14403 int rval = DDI_FAILURE;
14358 14404
14359 14405 rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14360 14406
14361 14407 child = ddi_get_child(pdip);
14362 14408 while (child) {
14363 14409 rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
14364 14410 if (rval != DDI_SUCCESS) {
14365 14411 child = ddi_get_next_sibling(child);
14366 14412 bzero(rname, SCSI_MAXNAMELEN);
14367 14413 continue;
14368 14414 }
14369 14415
14370 14416 if (strcmp(rname, name) == 0) {
14371 14417 break;
14372 14418 }
14373 14419 child = ddi_get_next_sibling(child);
14374 14420 bzero(rname, SCSI_MAXNAMELEN);
14375 14421 }
14376 14422
14377 14423 kmem_free(rname, SCSI_MAXNAMELEN);
14378 14424
14379 14425 return (child);
14380 14426 }
14381 14427
14382 14428
14383 14429 static dev_info_t *
14384 14430 mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, int lun)
14385 14431 {
14386 14432 dev_info_t *child = NULL;
14387 14433 char *name = NULL;
14388 14434 char *addr = NULL;
14389 14435
14390 14436 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14391 14437 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14392 14438 (void) sprintf(name, "%016"PRIx64, sasaddr);
14393 14439 (void) sprintf(addr, "w%s,%x", name, lun);
14394 14440 child = mptsas_find_child(pdip, addr);
14395 14441 kmem_free(name, SCSI_MAXNAMELEN);
14396 14442 kmem_free(addr, SCSI_MAXNAMELEN);
14397 14443 return (child);
14398 14444 }
14399 14445
14400 14446 static dev_info_t *
14401 14447 mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy)
14402 14448 {
14403 14449 dev_info_t *child;
14404 14450 char *addr;
14405 14451
14406 14452 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14407 14453 (void) sprintf(addr, "p%x,0", phy);
14408 14454 child = mptsas_find_child(pdip, addr);
14409 14455 kmem_free(addr, SCSI_MAXNAMELEN);
14410 14456 return (child);
14411 14457 }
14412 14458
14413 14459 static mdi_pathinfo_t *
14414 14460 mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy)
14415 14461 {
14416 14462 mdi_pathinfo_t *path;
14417 14463 char *addr = NULL;
14418 14464
14419 14465 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14420 14466 (void) sprintf(addr, "p%x,0", phy);
14421 14467 path = mdi_pi_find(pdip, NULL, addr);
14422 14468 kmem_free(addr, SCSI_MAXNAMELEN);
14423 14469 return (path);
14424 14470 }
14425 14471
14426 14472 static mdi_pathinfo_t *
14427 14473 mptsas_find_path_addr(dev_info_t *parent, uint64_t sasaddr, int lun)
14428 14474 {
14429 14475 mdi_pathinfo_t *path;
14430 14476 char *name = NULL;
14431 14477 char *addr = NULL;
14432 14478
14433 14479 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14434 14480 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14435 14481 (void) sprintf(name, "%016"PRIx64, sasaddr);
14436 14482 (void) sprintf(addr, "w%s,%x", name, lun);
14437 14483 path = mdi_pi_find(parent, NULL, addr);
14438 14484 kmem_free(name, SCSI_MAXNAMELEN);
14439 14485 kmem_free(addr, SCSI_MAXNAMELEN);
14440 14486
14441 14487 return (path);
14442 14488 }
14443 14489
14444 14490 static int
14445 14491 mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
14446 14492 dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
14447 14493 {
14448 14494 int i = 0;
14449 14495 uchar_t *inq83 = NULL;
14450 14496 int inq83_len1 = 0xFF;
14451 14497 int inq83_len = 0;
14452 14498 int rval = DDI_FAILURE;
14453 14499 ddi_devid_t devid;
14454 14500 char *guid = NULL;
14455 14501 int target = ptgt->m_devhdl;
14456 14502 mdi_pathinfo_t *pip = NULL;
14457 14503 mptsas_t *mpt = DIP2MPT(pdip);
14458 14504
14459 14505 /*
14460 14506 * For DVD/CD ROM and tape devices and optical
14461 14507 * devices, we won't try to enumerate them under
14462 14508 * scsi_vhci, so no need to try page83
14463 14509 */
14464 14510 if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
14465 14511 sd_inq->inq_dtype == DTYPE_OPTICAL ||
14466 14512 sd_inq->inq_dtype == DTYPE_ESI))
14467 14513 goto create_lun;
14468 14514
14469 14515 /*
14470 14516 * The LCA returns good SCSI status, but corrupt page 83 data the first
14471 14517 * time it is queried. The solution is to keep trying to request page83
14472 14518 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
14473 14519 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
14474 14520 * give up to get VPD page at this stage and fail the enumeration.
14475 14521 */
14476 14522
14477 14523 inq83 = kmem_zalloc(inq83_len1, KM_SLEEP);
14478 14524
14479 14525 for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
14480 14526 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
14481 14527 inq83_len1, &inq83_len, 1);
14482 14528 if (rval != 0) {
14483 14529 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
14484 14530 "0x83 for target:%x, lun:%x failed!", target, lun);
14485 14531 if (mptsas_physical_bind_failed_page_83 != B_FALSE)
14486 14532 goto create_lun;
14487 14533 goto out;
14488 14534 }
14489 14535 /*
14490 14536 * create DEVID from inquiry data
14491 14537 */
14492 14538 if ((rval = ddi_devid_scsi_encode(
14493 14539 DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
14494 14540 sizeof (struct scsi_inquiry), NULL, 0, inq83,
14495 14541 (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
14496 14542 /*
14497 14543 * extract GUID from DEVID
14498 14544 */
14499 14545 guid = ddi_devid_to_guid(devid);
14500 14546
14501 14547 /*
14502 14548 * Do not enable MPXIO if the strlen(guid) is greater
14503 14549 * than MPTSAS_MAX_GUID_LEN, this constrain would be
14504 14550 * handled by framework later.
14505 14551 */
14506 14552 if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
14507 14553 ddi_devid_free_guid(guid);
14508 14554 guid = NULL;
14509 14555 if (mpt->m_mpxio_enable == TRUE) {
14510 14556 mptsas_log(mpt, CE_NOTE, "!Target:%x, "
14511 14557 "lun:%x doesn't have a valid GUID, "
14512 14558 "multipathing for this drive is "
14513 14559 "not enabled", target, lun);
14514 14560 }
14515 14561 }
14516 14562
14517 14563 /*
14518 14564 * devid no longer needed
14519 14565 */
14520 14566 ddi_devid_free(devid);
14521 14567 break;
14522 14568 } else if (rval == DDI_NOT_WELL_FORMED) {
14523 14569 /*
14524 14570 * return value of ddi_devid_scsi_encode equal to
14525 14571 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
14526 14572 * to retry inquiry page 0x83 and get GUID.
14527 14573 */
14528 14574 NDBG20(("Not well formed devid, retry..."));
14529 14575 delay(1 * drv_usectohz(1000000));
14530 14576 continue;
14531 14577 } else {
14532 14578 mptsas_log(mpt, CE_WARN, "!Encode devid failed for "
14533 14579 "path target:%x, lun:%x", target, lun);
14534 14580 rval = DDI_FAILURE;
14535 14581 goto create_lun;
14536 14582 }
14537 14583 }
14538 14584
14539 14585 if (i == mptsas_inq83_retry_timeout) {
14540 14586 mptsas_log(mpt, CE_WARN, "!Repeated page83 requests timeout "
14541 14587 "for path target:%x, lun:%x", target, lun);
14542 14588 }
14543 14589
14544 14590 rval = DDI_FAILURE;
14545 14591
14546 14592 create_lun:
14547 14593 if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
14548 14594 rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
14549 14595 ptgt, lun);
14550 14596 }
14551 14597 if (rval != DDI_SUCCESS) {
14552 14598 rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
14553 14599 ptgt, lun);
14554 14600
14555 14601 }
14556 14602 out:
14557 14603 if (guid != NULL) {
14558 14604 /*
14559 14605 * guid no longer needed
14560 14606 */
14561 14607 ddi_devid_free_guid(guid);
14562 14608 }
14563 14609 if (inq83 != NULL)
14564 14610 kmem_free(inq83, inq83_len1);
14565 14611 return (rval);
14566 14612 }
14567 14613
14568 14614 static int
14569 14615 mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid,
14570 14616 dev_info_t **lun_dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, int lun)
14571 14617 {
14572 14618 int target;
14573 14619 char *nodename = NULL;
14574 14620 char **compatible = NULL;
14575 14621 int ncompatible = 0;
14576 14622 int mdi_rtn = MDI_FAILURE;
14577 14623 int rval = DDI_FAILURE;
14578 14624 char *old_guid = NULL;
14579 14625 mptsas_t *mpt = DIP2MPT(pdip);
14580 14626 char *lun_addr = NULL;
14581 14627 char *wwn_str = NULL;
14582 14628 char *attached_wwn_str = NULL;
14583 14629 char *component = NULL;
14584 14630 uint8_t phy = 0xFF;
14585 14631 uint64_t sas_wwn;
14586 14632 int64_t lun64 = 0;
14587 14633 uint32_t devinfo;
14588 14634 uint16_t dev_hdl;
14589 14635 uint16_t pdev_hdl;
14590 14636 uint64_t dev_sas_wwn;
14591 14637 uint64_t pdev_sas_wwn;
14592 14638 uint32_t pdev_info;
14593 14639 uint8_t physport;
14594 14640 uint8_t phy_id;
14595 14641 uint32_t page_address;
14596 14642 uint16_t bay_num, enclosure;
14597 14643 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
14598 14644 uint32_t dev_info;
14599 14645
14600 14646 mutex_enter(&mpt->m_mutex);
14601 14647 target = ptgt->m_devhdl;
14602 14648 sas_wwn = ptgt->m_sas_wwn;
14603 14649 devinfo = ptgt->m_deviceinfo;
14604 14650 phy = ptgt->m_phynum;
14605 14651 mutex_exit(&mpt->m_mutex);
14606 14652
14607 14653 if (sas_wwn) {
14608 14654 *pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
14609 14655 } else {
14610 14656 *pip = mptsas_find_path_phy(pdip, phy);
14611 14657 }
14612 14658
14613 14659 if (*pip != NULL) {
14614 14660 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
14615 14661 ASSERT(*lun_dip != NULL);
14616 14662 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
14617 14663 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
14618 14664 MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
↓ open down ↓ |
2440 lines elided |
↑ open up ↑ |
14619 14665 if (strncmp(guid, old_guid, strlen(guid)) == 0) {
14620 14666 /*
14621 14667 * Same path back online again.
14622 14668 */
14623 14669 (void) ddi_prop_free(old_guid);
14624 14670 if ((!MDI_PI_IS_ONLINE(*pip)) &&
14625 14671 (!MDI_PI_IS_STANDBY(*pip)) &&
14626 14672 (ptgt->m_tgt_unconfigured == 0)) {
14627 14673 rval = mdi_pi_online(*pip, 0);
14628 14674 mutex_enter(&mpt->m_mutex);
14629 - (void) mptsas_set_led_status(mpt, ptgt,
14630 - 0);
14675 + ptgt->m_led_status = 0;
14676 + (void) mptsas_flush_led_status(mpt,
14677 + ptgt);
14631 14678 mutex_exit(&mpt->m_mutex);
14632 14679 } else {
14633 14680 rval = DDI_SUCCESS;
14634 14681 }
14635 14682 if (rval != DDI_SUCCESS) {
14636 14683 mptsas_log(mpt, CE_WARN, "path:target: "
14637 14684 "%x, lun:%x online failed!", target,
14638 14685 lun);
14639 14686 *pip = NULL;
14640 14687 *lun_dip = NULL;
14641 14688 }
14642 14689 return (rval);
14643 14690 } else {
14644 14691 /*
14645 14692 * The GUID of the LUN has changed which maybe
14646 14693 * because customer mapped another volume to the
14647 14694 * same LUN.
14648 14695 */
14649 14696 mptsas_log(mpt, CE_WARN, "The GUID of the "
14650 14697 "target:%x, lun:%x was changed, maybe "
14651 14698 "because someone mapped another volume "
14652 14699 "to the same LUN", target, lun);
14653 14700 (void) ddi_prop_free(old_guid);
14654 14701 if (!MDI_PI_IS_OFFLINE(*pip)) {
14655 14702 rval = mdi_pi_offline(*pip, 0);
14656 14703 if (rval != MDI_SUCCESS) {
14657 14704 mptsas_log(mpt, CE_WARN, "path:"
14658 14705 "target:%x, lun:%x offline "
14659 14706 "failed!", target, lun);
14660 14707 *pip = NULL;
14661 14708 *lun_dip = NULL;
14662 14709 return (DDI_FAILURE);
14663 14710 }
14664 14711 }
14665 14712 if (mdi_pi_free(*pip, 0) != MDI_SUCCESS) {
14666 14713 mptsas_log(mpt, CE_WARN, "path:target:"
14667 14714 "%x, lun:%x free failed!", target,
14668 14715 lun);
14669 14716 *pip = NULL;
14670 14717 *lun_dip = NULL;
14671 14718 return (DDI_FAILURE);
14672 14719 }
14673 14720 }
14674 14721 } else {
14675 14722 mptsas_log(mpt, CE_WARN, "Can't get client-guid "
14676 14723 "property for path:target:%x, lun:%x", target, lun);
14677 14724 *pip = NULL;
14678 14725 *lun_dip = NULL;
14679 14726 return (DDI_FAILURE);
14680 14727 }
14681 14728 }
14682 14729 scsi_hba_nodename_compatible_get(inq, NULL,
14683 14730 inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
14684 14731
14685 14732 /*
14686 14733 * if nodename can't be determined then print a message and skip it
14687 14734 */
14688 14735 if (nodename == NULL) {
14689 14736 mptsas_log(mpt, CE_WARN, "mptsas driver found no compatible "
14690 14737 "driver for target%d lun %d dtype:0x%02x", target, lun,
14691 14738 inq->inq_dtype);
14692 14739 return (DDI_FAILURE);
14693 14740 }
14694 14741
14695 14742 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
14696 14743 /* The property is needed by MPAPI */
14697 14744 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
14698 14745
14699 14746 lun_addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14700 14747 if (guid) {
14701 14748 (void) sprintf(lun_addr, "w%s,%x", wwn_str, lun);
14702 14749 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14703 14750 } else {
14704 14751 (void) sprintf(lun_addr, "p%x,%x", phy, lun);
14705 14752 (void) sprintf(wwn_str, "p%x", phy);
14706 14753 }
14707 14754
14708 14755 mdi_rtn = mdi_pi_alloc_compatible(pdip, nodename,
14709 14756 guid, lun_addr, compatible, ncompatible,
14710 14757 0, pip);
14711 14758 if (mdi_rtn == MDI_SUCCESS) {
14712 14759
14713 14760 if (mdi_prop_update_string(*pip, MDI_GUID,
14714 14761 guid) != DDI_SUCCESS) {
14715 14762 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14716 14763 "create prop for target %d lun %d (MDI_GUID)",
14717 14764 target, lun);
14718 14765 mdi_rtn = MDI_FAILURE;
14719 14766 goto virt_create_done;
14720 14767 }
14721 14768
14722 14769 if (mdi_prop_update_int(*pip, LUN_PROP,
14723 14770 lun) != DDI_SUCCESS) {
14724 14771 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14725 14772 "create prop for target %d lun %d (LUN_PROP)",
14726 14773 target, lun);
14727 14774 mdi_rtn = MDI_FAILURE;
14728 14775 goto virt_create_done;
14729 14776 }
14730 14777 lun64 = (int64_t)lun;
14731 14778 if (mdi_prop_update_int64(*pip, LUN64_PROP,
14732 14779 lun64) != DDI_SUCCESS) {
14733 14780 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14734 14781 "create prop for target %d (LUN64_PROP)",
14735 14782 target);
14736 14783 mdi_rtn = MDI_FAILURE;
14737 14784 goto virt_create_done;
14738 14785 }
14739 14786 if (mdi_prop_update_string_array(*pip, "compatible",
14740 14787 compatible, ncompatible) !=
14741 14788 DDI_PROP_SUCCESS) {
14742 14789 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14743 14790 "create prop for target %d lun %d (COMPATIBLE)",
14744 14791 target, lun);
14745 14792 mdi_rtn = MDI_FAILURE;
14746 14793 goto virt_create_done;
14747 14794 }
14748 14795 if (sas_wwn && (mdi_prop_update_string(*pip,
14749 14796 SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS)) {
14750 14797 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14751 14798 "create prop for target %d lun %d "
14752 14799 "(target-port)", target, lun);
14753 14800 mdi_rtn = MDI_FAILURE;
14754 14801 goto virt_create_done;
14755 14802 } else if ((sas_wwn == 0) && (mdi_prop_update_int(*pip,
14756 14803 "sata-phy", phy) != DDI_PROP_SUCCESS)) {
14757 14804 /*
14758 14805 * Direct attached SATA device without DeviceName
14759 14806 */
14760 14807 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14761 14808 "create prop for SAS target %d lun %d "
14762 14809 "(sata-phy)", target, lun);
14763 14810 mdi_rtn = MDI_FAILURE;
14764 14811 goto virt_create_done;
14765 14812 }
14766 14813 mutex_enter(&mpt->m_mutex);
14767 14814
14768 14815 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14769 14816 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14770 14817 (uint32_t)ptgt->m_devhdl;
14771 14818 rval = mptsas_get_sas_device_page0(mpt, page_address,
14772 14819 &dev_hdl, &dev_sas_wwn, &dev_info, &physport,
14773 14820 &phy_id, &pdev_hdl, &bay_num, &enclosure);
14774 14821 if (rval != DDI_SUCCESS) {
14775 14822 mutex_exit(&mpt->m_mutex);
14776 14823 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14777 14824 "parent device for handle %d", page_address);
14778 14825 mdi_rtn = MDI_FAILURE;
14779 14826 goto virt_create_done;
14780 14827 }
14781 14828
14782 14829 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14783 14830 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
14784 14831 rval = mptsas_get_sas_device_page0(mpt, page_address,
14785 14832 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
14786 14833 &phy_id, &pdev_hdl, &bay_num, &enclosure);
14787 14834 if (rval != DDI_SUCCESS) {
14788 14835 mutex_exit(&mpt->m_mutex);
14789 14836 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
14790 14837 "device info for handle %d", page_address);
14791 14838 mdi_rtn = MDI_FAILURE;
14792 14839 goto virt_create_done;
14793 14840 }
14794 14841
14795 14842 mutex_exit(&mpt->m_mutex);
14796 14843
14797 14844 /*
14798 14845 * If this device direct attached to the controller
14799 14846 * set the attached-port to the base wwid
14800 14847 */
14801 14848 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14802 14849 != DEVINFO_DIRECT_ATTACHED) {
14803 14850 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14804 14851 pdev_sas_wwn);
14805 14852 } else {
14806 14853 /*
14807 14854 * Update the iport's attached-port to guid
14808 14855 */
14809 14856 if (sas_wwn == 0) {
14810 14857 (void) sprintf(wwn_str, "p%x", phy);
14811 14858 } else {
14812 14859 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14813 14860 }
14814 14861 if (ddi_prop_update_string(DDI_DEV_T_NONE,
14815 14862 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14816 14863 DDI_PROP_SUCCESS) {
14817 14864 mptsas_log(mpt, CE_WARN,
14818 14865 "mptsas unable to create "
14819 14866 "property for iport target-port"
14820 14867 " %s (sas_wwn)",
14821 14868 wwn_str);
14822 14869 mdi_rtn = MDI_FAILURE;
14823 14870 goto virt_create_done;
14824 14871 }
14825 14872
14826 14873 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14827 14874 mpt->un.m_base_wwid);
14828 14875 }
14829 14876
14830 14877 if (mdi_prop_update_string(*pip,
14831 14878 SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
14832 14879 DDI_PROP_SUCCESS) {
14833 14880 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14834 14881 "property for iport attached-port %s (sas_wwn)",
14835 14882 attached_wwn_str);
14836 14883 mdi_rtn = MDI_FAILURE;
14837 14884 goto virt_create_done;
14838 14885 }
14839 14886
14840 14887
14841 14888 if (inq->inq_dtype == 0) {
14842 14889 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
14843 14890 /*
14844 14891 * set obp path for pathinfo
14845 14892 */
14846 14893 (void) snprintf(component, MAXPATHLEN,
14847 14894 "disk@%s", lun_addr);
14848 14895
14849 14896 if (mdi_pi_pathname_obp_set(*pip, component) !=
14850 14897 DDI_SUCCESS) {
14851 14898 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
14852 14899 "unable to set obp-path for object %s",
14853 14900 component);
14854 14901 mdi_rtn = MDI_FAILURE;
14855 14902 goto virt_create_done;
14856 14903 }
14857 14904 }
14858 14905
14859 14906 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
14860 14907 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14861 14908 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14862 14909 if ((ndi_prop_update_int(DDI_DEV_T_NONE, *lun_dip,
14863 14910 "pm-capable", 1)) !=
14864 14911 DDI_PROP_SUCCESS) {
14865 14912 mptsas_log(mpt, CE_WARN, "mptsas driver"
14866 14913 "failed to create pm-capable "
14867 14914 "property, target %d", target);
14868 14915 mdi_rtn = MDI_FAILURE;
14869 14916 goto virt_create_done;
14870 14917 }
14871 14918 }
14872 14919 /*
14873 14920 * Create the phy-num property
14874 14921 */
14875 14922 if (mdi_prop_update_int(*pip, "phy-num",
14876 14923 ptgt->m_phynum) != DDI_SUCCESS) {
↓ open down ↓ |
236 lines elided |
↑ open up ↑ |
14877 14924 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14878 14925 "create phy-num property for target %d lun %d",
14879 14926 target, lun);
14880 14927 mdi_rtn = MDI_FAILURE;
14881 14928 goto virt_create_done;
14882 14929 }
14883 14930 NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
14884 14931 mdi_rtn = mdi_pi_online(*pip, 0);
14885 14932 if (mdi_rtn == MDI_SUCCESS) {
14886 14933 mutex_enter(&mpt->m_mutex);
14887 - if (mptsas_set_led_status(mpt, ptgt, 0) !=
14888 - DDI_SUCCESS) {
14934 + ptgt->m_led_status = 0;
14935 + if (mptsas_flush_led_status(mpt, ptgt) != DDI_SUCCESS) {
14889 14936 NDBG14(("mptsas: clear LED for slot %x "
14890 14937 "failed", ptgt->m_slot_num));
14891 14938 }
14892 14939 mutex_exit(&mpt->m_mutex);
14893 14940 }
14894 14941 if (mdi_rtn == MDI_NOT_SUPPORTED) {
14895 14942 mdi_rtn = MDI_FAILURE;
14896 14943 }
14897 14944 virt_create_done:
14898 14945 if (*pip && mdi_rtn != MDI_SUCCESS) {
14899 14946 (void) mdi_pi_free(*pip, 0);
14900 14947 *pip = NULL;
14901 14948 *lun_dip = NULL;
14902 14949 }
14903 14950 }
14904 14951
14905 14952 scsi_hba_nodename_compatible_free(nodename, compatible);
14906 14953 if (lun_addr != NULL) {
14907 14954 kmem_free(lun_addr, SCSI_MAXNAMELEN);
14908 14955 }
14909 14956 if (wwn_str != NULL) {
14910 14957 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
14911 14958 }
14912 14959 if (component != NULL) {
14913 14960 kmem_free(component, MAXPATHLEN);
14914 14961 }
14915 14962
14916 14963 return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14917 14964 }
14918 14965
14919 14966 static int
14920 14967 mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq,
14921 14968 char *guid, dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
14922 14969 {
14923 14970 int target;
14924 14971 int rval;
14925 14972 int ndi_rtn = NDI_FAILURE;
14926 14973 uint64_t be_sas_wwn;
14927 14974 char *nodename = NULL;
14928 14975 char **compatible = NULL;
14929 14976 int ncompatible = 0;
14930 14977 int instance = 0;
14931 14978 mptsas_t *mpt = DIP2MPT(pdip);
14932 14979 char *wwn_str = NULL;
14933 14980 char *component = NULL;
14934 14981 char *attached_wwn_str = NULL;
14935 14982 uint8_t phy = 0xFF;
14936 14983 uint64_t sas_wwn;
14937 14984 uint32_t devinfo;
14938 14985 uint16_t dev_hdl;
14939 14986 uint16_t pdev_hdl;
14940 14987 uint64_t pdev_sas_wwn;
14941 14988 uint64_t dev_sas_wwn;
14942 14989 uint32_t pdev_info;
14943 14990 uint8_t physport;
14944 14991 uint8_t phy_id;
14945 14992 uint32_t page_address;
14946 14993 uint16_t bay_num, enclosure;
14947 14994 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
14948 14995 uint32_t dev_info;
14949 14996 int64_t lun64 = 0;
14950 14997
14951 14998 mutex_enter(&mpt->m_mutex);
14952 14999 target = ptgt->m_devhdl;
14953 15000 sas_wwn = ptgt->m_sas_wwn;
14954 15001 devinfo = ptgt->m_deviceinfo;
14955 15002 phy = ptgt->m_phynum;
14956 15003 mutex_exit(&mpt->m_mutex);
14957 15004
14958 15005 /*
14959 15006 * generate compatible property with binding-set "mpt"
14960 15007 */
14961 15008 scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL,
14962 15009 &nodename, &compatible, &ncompatible);
14963 15010
14964 15011 /*
14965 15012 * if nodename can't be determined then print a message and skip it
14966 15013 */
14967 15014 if (nodename == NULL) {
14968 15015 mptsas_log(mpt, CE_WARN, "mptsas found no compatible driver "
14969 15016 "for target %d lun %d", target, lun);
14970 15017 return (DDI_FAILURE);
14971 15018 }
14972 15019
14973 15020 ndi_rtn = ndi_devi_alloc(pdip, nodename,
14974 15021 DEVI_SID_NODEID, lun_dip);
14975 15022
14976 15023 /*
14977 15024 * if lun alloc success, set props
14978 15025 */
14979 15026 if (ndi_rtn == NDI_SUCCESS) {
14980 15027
14981 15028 if (ndi_prop_update_int(DDI_DEV_T_NONE,
14982 15029 *lun_dip, LUN_PROP, lun) !=
14983 15030 DDI_PROP_SUCCESS) {
14984 15031 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14985 15032 "property for target %d lun %d (LUN_PROP)",
14986 15033 target, lun);
14987 15034 ndi_rtn = NDI_FAILURE;
14988 15035 goto phys_create_done;
14989 15036 }
14990 15037
14991 15038 lun64 = (int64_t)lun;
14992 15039 if (ndi_prop_update_int64(DDI_DEV_T_NONE,
14993 15040 *lun_dip, LUN64_PROP, lun64) !=
14994 15041 DDI_PROP_SUCCESS) {
14995 15042 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14996 15043 "property for target %d lun64 %d (LUN64_PROP)",
14997 15044 target, lun);
14998 15045 ndi_rtn = NDI_FAILURE;
14999 15046 goto phys_create_done;
15000 15047 }
15001 15048 if (ndi_prop_update_string_array(DDI_DEV_T_NONE,
15002 15049 *lun_dip, "compatible", compatible, ncompatible)
15003 15050 != DDI_PROP_SUCCESS) {
15004 15051 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15005 15052 "property for target %d lun %d (COMPATIBLE)",
15006 15053 target, lun);
15007 15054 ndi_rtn = NDI_FAILURE;
15008 15055 goto phys_create_done;
15009 15056 }
15010 15057
15011 15058 /*
15012 15059 * We need the SAS WWN for non-multipath devices, so
15013 15060 * we'll use the same property as that multipathing
15014 15061 * devices need to present for MPAPI. If we don't have
15015 15062 * a WWN (e.g. parallel SCSI), don't create the prop.
15016 15063 */
15017 15064 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
15018 15065 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15019 15066 if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE,
15020 15067 *lun_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str)
15021 15068 != DDI_PROP_SUCCESS) {
15022 15069 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15023 15070 "create property for SAS target %d lun %d "
15024 15071 "(target-port)", target, lun);
15025 15072 ndi_rtn = NDI_FAILURE;
15026 15073 goto phys_create_done;
15027 15074 }
15028 15075
15029 15076 be_sas_wwn = BE_64(sas_wwn);
15030 15077 if (sas_wwn && ndi_prop_update_byte_array(
15031 15078 DDI_DEV_T_NONE, *lun_dip, "port-wwn",
15032 15079 (uchar_t *)&be_sas_wwn, 8) != DDI_PROP_SUCCESS) {
15033 15080 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15034 15081 "create property for SAS target %d lun %d "
15035 15082 "(port-wwn)", target, lun);
15036 15083 ndi_rtn = NDI_FAILURE;
15037 15084 goto phys_create_done;
15038 15085 } else if ((sas_wwn == 0) && (ndi_prop_update_int(
15039 15086 DDI_DEV_T_NONE, *lun_dip, "sata-phy", phy) !=
15040 15087 DDI_PROP_SUCCESS)) {
15041 15088 /*
15042 15089 * Direct attached SATA device without DeviceName
15043 15090 */
15044 15091 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15045 15092 "create property for SAS target %d lun %d "
15046 15093 "(sata-phy)", target, lun);
15047 15094 ndi_rtn = NDI_FAILURE;
15048 15095 goto phys_create_done;
15049 15096 }
15050 15097
15051 15098 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15052 15099 *lun_dip, SAS_PROP) != DDI_PROP_SUCCESS) {
15053 15100 mptsas_log(mpt, CE_WARN, "mptsas unable to"
15054 15101 "create property for SAS target %d lun %d"
15055 15102 " (SAS_PROP)", target, lun);
15056 15103 ndi_rtn = NDI_FAILURE;
15057 15104 goto phys_create_done;
15058 15105 }
15059 15106 if (guid && (ndi_prop_update_string(DDI_DEV_T_NONE,
15060 15107 *lun_dip, NDI_GUID, guid) != DDI_SUCCESS)) {
15061 15108 mptsas_log(mpt, CE_WARN, "mptsas unable "
15062 15109 "to create guid property for target %d "
15063 15110 "lun %d", target, lun);
15064 15111 ndi_rtn = NDI_FAILURE;
15065 15112 goto phys_create_done;
15066 15113 }
15067 15114
15068 15115 /*
15069 15116 * The following code is to set properties for SM-HBA support,
15070 15117 * it doesn't apply to RAID volumes
15071 15118 */
15072 15119 if (ptgt->m_phymask == 0)
15073 15120 goto phys_raid_lun;
15074 15121
15075 15122 mutex_enter(&mpt->m_mutex);
15076 15123
15077 15124 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15078 15125 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15079 15126 (uint32_t)ptgt->m_devhdl;
15080 15127 rval = mptsas_get_sas_device_page0(mpt, page_address,
15081 15128 &dev_hdl, &dev_sas_wwn, &dev_info,
15082 15129 &physport, &phy_id, &pdev_hdl,
15083 15130 &bay_num, &enclosure);
15084 15131 if (rval != DDI_SUCCESS) {
15085 15132 mutex_exit(&mpt->m_mutex);
15086 15133 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
15087 15134 "parent device for handle %d.", page_address);
15088 15135 ndi_rtn = NDI_FAILURE;
15089 15136 goto phys_create_done;
15090 15137 }
15091 15138
15092 15139 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15093 15140 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
15094 15141 rval = mptsas_get_sas_device_page0(mpt, page_address,
15095 15142 &dev_hdl, &pdev_sas_wwn, &pdev_info,
15096 15143 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
15097 15144 if (rval != DDI_SUCCESS) {
15098 15145 mutex_exit(&mpt->m_mutex);
15099 15146 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15100 15147 "device for handle %d.", page_address);
15101 15148 ndi_rtn = NDI_FAILURE;
15102 15149 goto phys_create_done;
15103 15150 }
15104 15151
15105 15152 mutex_exit(&mpt->m_mutex);
15106 15153
15107 15154 /*
15108 15155 * If this device direct attached to the controller
15109 15156 * set the attached-port to the base wwid
15110 15157 */
15111 15158 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15112 15159 != DEVINFO_DIRECT_ATTACHED) {
15113 15160 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15114 15161 pdev_sas_wwn);
15115 15162 } else {
15116 15163 /*
15117 15164 * Update the iport's attached-port to guid
15118 15165 */
15119 15166 if (sas_wwn == 0) {
15120 15167 (void) sprintf(wwn_str, "p%x", phy);
15121 15168 } else {
15122 15169 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15123 15170 }
15124 15171 if (ddi_prop_update_string(DDI_DEV_T_NONE,
15125 15172 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15126 15173 DDI_PROP_SUCCESS) {
15127 15174 mptsas_log(mpt, CE_WARN,
15128 15175 "mptsas unable to create "
15129 15176 "property for iport target-port"
15130 15177 " %s (sas_wwn)",
15131 15178 wwn_str);
15132 15179 ndi_rtn = NDI_FAILURE;
15133 15180 goto phys_create_done;
15134 15181 }
15135 15182
15136 15183 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15137 15184 mpt->un.m_base_wwid);
15138 15185 }
15139 15186
15140 15187 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15141 15188 *lun_dip, SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
15142 15189 DDI_PROP_SUCCESS) {
15143 15190 mptsas_log(mpt, CE_WARN,
15144 15191 "mptsas unable to create "
15145 15192 "property for iport attached-port %s (sas_wwn)",
15146 15193 attached_wwn_str);
15147 15194 ndi_rtn = NDI_FAILURE;
15148 15195 goto phys_create_done;
15149 15196 }
15150 15197
15151 15198 if (IS_SATA_DEVICE(dev_info)) {
15152 15199 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15153 15200 *lun_dip, MPTSAS_VARIANT, "sata") !=
15154 15201 DDI_PROP_SUCCESS) {
15155 15202 mptsas_log(mpt, CE_WARN,
15156 15203 "mptsas unable to create "
15157 15204 "property for device variant ");
15158 15205 ndi_rtn = NDI_FAILURE;
15159 15206 goto phys_create_done;
15160 15207 }
15161 15208 }
15162 15209
15163 15210 if (IS_ATAPI_DEVICE(dev_info)) {
15164 15211 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15165 15212 *lun_dip, MPTSAS_VARIANT, "atapi") !=
15166 15213 DDI_PROP_SUCCESS) {
15167 15214 mptsas_log(mpt, CE_WARN,
15168 15215 "mptsas unable to create "
15169 15216 "property for device variant ");
15170 15217 ndi_rtn = NDI_FAILURE;
15171 15218 goto phys_create_done;
15172 15219 }
15173 15220 }
15174 15221
15175 15222 phys_raid_lun:
15176 15223 /*
15177 15224 * if this is a SAS controller, and the target is a SATA
15178 15225 * drive, set the 'pm-capable' property for sd and if on
15179 15226 * an OPL platform, also check if this is an ATAPI
15180 15227 * device.
15181 15228 */
15182 15229 instance = ddi_get_instance(mpt->m_dip);
15183 15230 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
15184 15231 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
15185 15232 NDBG2(("mptsas%d: creating pm-capable property, "
15186 15233 "target %d", instance, target));
15187 15234
15188 15235 if ((ndi_prop_update_int(DDI_DEV_T_NONE,
15189 15236 *lun_dip, "pm-capable", 1)) !=
15190 15237 DDI_PROP_SUCCESS) {
15191 15238 mptsas_log(mpt, CE_WARN, "mptsas "
15192 15239 "failed to create pm-capable "
15193 15240 "property, target %d", target);
15194 15241 ndi_rtn = NDI_FAILURE;
15195 15242 goto phys_create_done;
15196 15243 }
15197 15244
15198 15245 }
15199 15246
15200 15247 if ((inq->inq_dtype == 0) || (inq->inq_dtype == 5)) {
15201 15248 /*
15202 15249 * add 'obp-path' properties for devinfo
15203 15250 */
15204 15251 bzero(wwn_str, sizeof (wwn_str));
15205 15252 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
15206 15253 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
15207 15254 if (guid) {
15208 15255 (void) snprintf(component, MAXPATHLEN,
15209 15256 "disk@w%s,%x", wwn_str, lun);
15210 15257 } else {
15211 15258 (void) snprintf(component, MAXPATHLEN,
15212 15259 "disk@p%x,%x", phy, lun);
15213 15260 }
15214 15261 if (ddi_pathname_obp_set(*lun_dip, component)
15215 15262 != DDI_SUCCESS) {
15216 15263 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
15217 15264 "unable to set obp-path for SAS "
15218 15265 "object %s", component);
15219 15266 ndi_rtn = NDI_FAILURE;
15220 15267 goto phys_create_done;
15221 15268 }
15222 15269 }
15223 15270 /*
15224 15271 * Create the phy-num property for non-raid disk
15225 15272 */
15226 15273 if (ptgt->m_phymask != 0) {
15227 15274 if (ndi_prop_update_int(DDI_DEV_T_NONE,
15228 15275 *lun_dip, "phy-num", ptgt->m_phynum) !=
15229 15276 DDI_PROP_SUCCESS) {
15230 15277 mptsas_log(mpt, CE_WARN, "mptsas driver "
15231 15278 "failed to create phy-num property for "
15232 15279 "target %d", target);
15233 15280 ndi_rtn = NDI_FAILURE;
15234 15281 goto phys_create_done;
15235 15282 }
15236 15283 }
15237 15284 phys_create_done:
15238 15285 /*
↓ open down ↓ |
340 lines elided |
↑ open up ↑ |
15239 15286 * If props were setup ok, online the lun
15240 15287 */
15241 15288 if (ndi_rtn == NDI_SUCCESS) {
15242 15289 /*
15243 15290 * Try to online the new node
15244 15291 */
15245 15292 ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
15246 15293 }
15247 15294 if (ndi_rtn == NDI_SUCCESS) {
15248 15295 mutex_enter(&mpt->m_mutex);
15249 - if (mptsas_set_led_status(mpt, ptgt, 0) !=
15250 - DDI_SUCCESS) {
15296 + ptgt->m_led_status = 0;
15297 + if (mptsas_flush_led_status(mpt, ptgt) != DDI_SUCCESS) {
15251 15298 NDBG14(("mptsas: clear LED for tgt %x "
15252 15299 "failed", ptgt->m_slot_num));
15253 15300 }
15254 15301 mutex_exit(&mpt->m_mutex);
15255 15302 }
15256 15303
15257 15304 /*
15258 15305 * If success set rtn flag, else unwire alloc'd lun
15259 15306 */
15260 15307 if (ndi_rtn != NDI_SUCCESS) {
15261 15308 NDBG12(("mptsas driver unable to online "
15262 15309 "target %d lun %d", target, lun));
15263 15310 ndi_prop_remove_all(*lun_dip);
15264 15311 (void) ndi_devi_free(*lun_dip);
15265 15312 *lun_dip = NULL;
15266 15313 }
15267 15314 }
15268 15315
15269 15316 scsi_hba_nodename_compatible_free(nodename, compatible);
15270 15317
15271 15318 if (wwn_str != NULL) {
15272 15319 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
15273 15320 }
15274 15321 if (component != NULL) {
15275 15322 kmem_free(component, MAXPATHLEN);
15276 15323 }
15277 15324
15278 15325
15279 15326 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15280 15327 }
15281 15328
15282 15329 static int
15283 15330 mptsas_probe_smp(dev_info_t *pdip, uint64_t wwn)
15284 15331 {
15285 15332 mptsas_t *mpt = DIP2MPT(pdip);
15286 15333 struct smp_device smp_sd;
15287 15334
15288 15335 /* XXX An HBA driver should not be allocating an smp_device. */
15289 15336 bzero(&smp_sd, sizeof (struct smp_device));
15290 15337 smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran;
15291 15338 bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE);
15292 15339
15293 15340 if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS)
15294 15341 return (NDI_FAILURE);
15295 15342 return (NDI_SUCCESS);
15296 15343 }
15297 15344
15298 15345 static int
15299 15346 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
15300 15347 {
15301 15348 mptsas_t *mpt = DIP2MPT(pdip);
15302 15349 mptsas_smp_t *psmp = NULL;
15303 15350 int rval;
15304 15351 int phymask;
15305 15352
15306 15353 /*
15307 15354 * Get the physical port associated to the iport
15308 15355 * PHYMASK TODO
15309 15356 */
15310 15357 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
15311 15358 "phymask", 0);
15312 15359 /*
15313 15360 * Find the smp node in hash table with specified sas address and
15314 15361 * physical port
15315 15362 */
15316 15363 psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
15317 15364 if (psmp == NULL) {
15318 15365 return (DDI_FAILURE);
15319 15366 }
15320 15367
15321 15368 rval = mptsas_online_smp(pdip, psmp, smp_dip);
15322 15369
15323 15370 return (rval);
15324 15371 }
15325 15372
15326 15373 static int
15327 15374 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
15328 15375 dev_info_t **smp_dip)
15329 15376 {
15330 15377 char wwn_str[MPTSAS_WWN_STRLEN];
15331 15378 char attached_wwn_str[MPTSAS_WWN_STRLEN];
15332 15379 int ndi_rtn = NDI_FAILURE;
15333 15380 int rval = 0;
15334 15381 mptsas_smp_t dev_info;
15335 15382 uint32_t page_address;
15336 15383 mptsas_t *mpt = DIP2MPT(pdip);
15337 15384 uint16_t dev_hdl;
15338 15385 uint64_t sas_wwn;
15339 15386 uint64_t smp_sas_wwn;
15340 15387 uint8_t physport;
15341 15388 uint8_t phy_id;
15342 15389 uint16_t pdev_hdl;
15343 15390 uint8_t numphys = 0;
15344 15391 uint16_t i = 0;
15345 15392 char phymask[MPTSAS_MAX_PHYS];
15346 15393 char *iport = NULL;
15347 15394 mptsas_phymask_t phy_mask = 0;
15348 15395 uint16_t attached_devhdl;
15349 15396 uint16_t bay_num, enclosure;
15350 15397
15351 15398 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_sasaddr);
15352 15399
15353 15400 /*
15354 15401 * Probe smp device, prevent the node of removed device from being
15355 15402 * configured succesfully
15356 15403 */
15357 15404 if (mptsas_probe_smp(pdip, smp_node->m_sasaddr) != NDI_SUCCESS) {
15358 15405 return (DDI_FAILURE);
15359 15406 }
15360 15407
15361 15408 if ((*smp_dip = mptsas_find_smp_child(pdip, wwn_str)) != NULL) {
15362 15409 return (DDI_SUCCESS);
15363 15410 }
15364 15411
15365 15412 ndi_rtn = ndi_devi_alloc(pdip, "smp", DEVI_SID_NODEID, smp_dip);
15366 15413
15367 15414 /*
15368 15415 * if lun alloc success, set props
15369 15416 */
15370 15417 if (ndi_rtn == NDI_SUCCESS) {
15371 15418 /*
15372 15419 * Set the flavor of the child to be SMP flavored
15373 15420 */
15374 15421 ndi_flavor_set(*smp_dip, SCSA_FLAVOR_SMP);
15375 15422
15376 15423 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15377 15424 *smp_dip, SMP_WWN, wwn_str) !=
15378 15425 DDI_PROP_SUCCESS) {
15379 15426 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15380 15427 "property for smp device %s (sas_wwn)",
15381 15428 wwn_str);
15382 15429 ndi_rtn = NDI_FAILURE;
15383 15430 goto smp_create_done;
15384 15431 }
15385 15432 (void) sprintf(wwn_str, "w%"PRIx64, smp_node->m_sasaddr);
15386 15433 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15387 15434 *smp_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str) !=
15388 15435 DDI_PROP_SUCCESS) {
15389 15436 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15390 15437 "property for iport target-port %s (sas_wwn)",
15391 15438 wwn_str);
15392 15439 ndi_rtn = NDI_FAILURE;
15393 15440 goto smp_create_done;
15394 15441 }
15395 15442
15396 15443 mutex_enter(&mpt->m_mutex);
15397 15444
15398 15445 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
15399 15446 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | smp_node->m_devhdl;
15400 15447 rval = mptsas_get_sas_expander_page0(mpt, page_address,
15401 15448 &dev_info);
15402 15449 if (rval != DDI_SUCCESS) {
15403 15450 mutex_exit(&mpt->m_mutex);
15404 15451 mptsas_log(mpt, CE_WARN,
15405 15452 "mptsas unable to get expander "
15406 15453 "parent device info for %x", page_address);
15407 15454 ndi_rtn = NDI_FAILURE;
15408 15455 goto smp_create_done;
15409 15456 }
15410 15457
15411 15458 smp_node->m_pdevhdl = dev_info.m_pdevhdl;
15412 15459 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15413 15460 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15414 15461 (uint32_t)dev_info.m_pdevhdl;
15415 15462 rval = mptsas_get_sas_device_page0(mpt, page_address,
15416 15463 &dev_hdl, &sas_wwn, &smp_node->m_pdevinfo,
15417 15464 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
15418 15465 if (rval != DDI_SUCCESS) {
15419 15466 mutex_exit(&mpt->m_mutex);
15420 15467 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15421 15468 "device info for %x", page_address);
15422 15469 ndi_rtn = NDI_FAILURE;
15423 15470 goto smp_create_done;
15424 15471 }
15425 15472
15426 15473 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15427 15474 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15428 15475 (uint32_t)dev_info.m_devhdl;
15429 15476 rval = mptsas_get_sas_device_page0(mpt, page_address,
15430 15477 &dev_hdl, &smp_sas_wwn, &smp_node->m_deviceinfo,
15431 15478 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
15432 15479 if (rval != DDI_SUCCESS) {
15433 15480 mutex_exit(&mpt->m_mutex);
15434 15481 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15435 15482 "device info for %x", page_address);
15436 15483 ndi_rtn = NDI_FAILURE;
15437 15484 goto smp_create_done;
15438 15485 }
15439 15486 mutex_exit(&mpt->m_mutex);
15440 15487
15441 15488 /*
15442 15489 * If this smp direct attached to the controller
15443 15490 * set the attached-port to the base wwid
15444 15491 */
15445 15492 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15446 15493 != DEVINFO_DIRECT_ATTACHED) {
15447 15494 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
15448 15495 sas_wwn);
15449 15496 } else {
15450 15497 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
15451 15498 mpt->un.m_base_wwid);
15452 15499 }
15453 15500
15454 15501 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15455 15502 *smp_dip, SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwn_str) !=
15456 15503 DDI_PROP_SUCCESS) {
15457 15504 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15458 15505 "property for smp attached-port %s (sas_wwn)",
15459 15506 attached_wwn_str);
15460 15507 ndi_rtn = NDI_FAILURE;
15461 15508 goto smp_create_done;
15462 15509 }
15463 15510
15464 15511 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15465 15512 *smp_dip, SMP_PROP) != DDI_PROP_SUCCESS) {
15466 15513 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15467 15514 "create property for SMP %s (SMP_PROP) ",
15468 15515 wwn_str);
15469 15516 ndi_rtn = NDI_FAILURE;
15470 15517 goto smp_create_done;
15471 15518 }
15472 15519
15473 15520 /*
15474 15521 * check the smp to see whether it direct
15475 15522 * attached to the controller
15476 15523 */
15477 15524 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15478 15525 != DEVINFO_DIRECT_ATTACHED) {
15479 15526 goto smp_create_done;
15480 15527 }
15481 15528 numphys = ddi_prop_get_int(DDI_DEV_T_ANY, pdip,
15482 15529 DDI_PROP_DONTPASS, MPTSAS_NUM_PHYS, -1);
15483 15530 if (numphys > 0) {
15484 15531 goto smp_create_done;
15485 15532 }
15486 15533 /*
15487 15534 * this iport is an old iport, we need to
15488 15535 * reconfig the props for it.
15489 15536 */
15490 15537 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
15491 15538 MPTSAS_VIRTUAL_PORT, 0) !=
15492 15539 DDI_PROP_SUCCESS) {
15493 15540 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
15494 15541 MPTSAS_VIRTUAL_PORT);
15495 15542 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
15496 15543 "prop update failed");
15497 15544 goto smp_create_done;
15498 15545 }
15499 15546
15500 15547 mutex_enter(&mpt->m_mutex);
15501 15548 numphys = 0;
15502 15549 iport = ddi_get_name_addr(pdip);
15503 15550 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15504 15551 bzero(phymask, sizeof (phymask));
15505 15552 (void) sprintf(phymask,
15506 15553 "%x", mpt->m_phy_info[i].phy_mask);
15507 15554 if (strcmp(phymask, iport) == 0) {
15508 15555 phy_mask = mpt->m_phy_info[i].phy_mask;
15509 15556 break;
15510 15557 }
15511 15558 }
15512 15559
15513 15560 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15514 15561 if ((phy_mask >> i) & 0x01) {
15515 15562 numphys++;
15516 15563 }
15517 15564 }
15518 15565 /*
15519 15566 * Update PHY info for smhba
15520 15567 */
15521 15568 if (mptsas_smhba_phy_init(mpt)) {
15522 15569 mutex_exit(&mpt->m_mutex);
15523 15570 mptsas_log(mpt, CE_WARN, "mptsas phy update "
15524 15571 "failed");
15525 15572 goto smp_create_done;
15526 15573 }
15527 15574 mutex_exit(&mpt->m_mutex);
15528 15575
15529 15576 mptsas_smhba_set_phy_props(mpt, iport, pdip,
15530 15577 numphys, &attached_devhdl);
15531 15578
15532 15579 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
15533 15580 MPTSAS_NUM_PHYS, numphys) !=
15534 15581 DDI_PROP_SUCCESS) {
15535 15582 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
15536 15583 MPTSAS_NUM_PHYS);
15537 15584 mptsas_log(mpt, CE_WARN, "mptsas update "
15538 15585 "num phys props failed");
15539 15586 goto smp_create_done;
15540 15587 }
15541 15588 /*
15542 15589 * Add parent's props for SMHBA support
15543 15590 */
15544 15591 if (ddi_prop_update_string(DDI_DEV_T_NONE, pdip,
15545 15592 SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15546 15593 DDI_PROP_SUCCESS) {
15547 15594 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
15548 15595 SCSI_ADDR_PROP_ATTACHED_PORT);
15549 15596 mptsas_log(mpt, CE_WARN, "mptsas update iport"
15550 15597 "attached-port failed");
15551 15598 goto smp_create_done;
15552 15599 }
15553 15600
15554 15601 smp_create_done:
15555 15602 /*
15556 15603 * If props were setup ok, online the lun
15557 15604 */
15558 15605 if (ndi_rtn == NDI_SUCCESS) {
15559 15606 /*
15560 15607 * Try to online the new node
15561 15608 */
15562 15609 ndi_rtn = ndi_devi_online(*smp_dip, NDI_ONLINE_ATTACH);
15563 15610 }
15564 15611
15565 15612 /*
15566 15613 * If success set rtn flag, else unwire alloc'd lun
15567 15614 */
15568 15615 if (ndi_rtn != NDI_SUCCESS) {
15569 15616 NDBG12(("mptsas unable to online "
15570 15617 "SMP target %s", wwn_str));
15571 15618 ndi_prop_remove_all(*smp_dip);
15572 15619 (void) ndi_devi_free(*smp_dip);
15573 15620 }
15574 15621 }
15575 15622
15576 15623 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15577 15624 }
15578 15625
15579 15626 /* smp transport routine */
15580 15627 static int mptsas_smp_start(struct smp_pkt *smp_pkt)
15581 15628 {
15582 15629 uint64_t wwn;
15583 15630 Mpi2SmpPassthroughRequest_t req;
15584 15631 Mpi2SmpPassthroughReply_t rep;
15585 15632 uint32_t direction = 0;
15586 15633 mptsas_t *mpt;
15587 15634 int ret;
15588 15635 uint64_t tmp64;
15589 15636
15590 15637 mpt = (mptsas_t *)smp_pkt->smp_pkt_address->
15591 15638 smp_a_hba_tran->smp_tran_hba_private;
15592 15639
15593 15640 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
15594 15641 /*
15595 15642 * Need to compose a SMP request message
15596 15643 * and call mptsas_do_passthru() function
15597 15644 */
15598 15645 bzero(&req, sizeof (req));
15599 15646 bzero(&rep, sizeof (rep));
15600 15647 req.PassthroughFlags = 0;
15601 15648 req.PhysicalPort = 0xff;
15602 15649 req.ChainOffset = 0;
15603 15650 req.Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
15604 15651
15605 15652 if ((smp_pkt->smp_pkt_reqsize & 0xffff0000ul) != 0) {
15606 15653 smp_pkt->smp_pkt_reason = ERANGE;
15607 15654 return (DDI_FAILURE);
15608 15655 }
15609 15656 req.RequestDataLength = LE_16((uint16_t)(smp_pkt->smp_pkt_reqsize - 4));
15610 15657
15611 15658 req.MsgFlags = 0;
15612 15659 tmp64 = LE_64(wwn);
15613 15660 bcopy(&tmp64, &req.SASAddress, SAS_WWN_BYTE_SIZE);
15614 15661 if (smp_pkt->smp_pkt_rspsize > 0) {
15615 15662 direction |= MPTSAS_PASS_THRU_DIRECTION_READ;
15616 15663 }
15617 15664 if (smp_pkt->smp_pkt_reqsize > 0) {
15618 15665 direction |= MPTSAS_PASS_THRU_DIRECTION_WRITE;
15619 15666 }
15620 15667
15621 15668 mutex_enter(&mpt->m_mutex);
15622 15669 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep,
15623 15670 (uint8_t *)smp_pkt->smp_pkt_rsp,
15624 15671 offsetof(Mpi2SmpPassthroughRequest_t, SGL), sizeof (rep),
15625 15672 smp_pkt->smp_pkt_rspsize - 4, direction,
15626 15673 (uint8_t *)smp_pkt->smp_pkt_req, smp_pkt->smp_pkt_reqsize - 4,
15627 15674 smp_pkt->smp_pkt_timeout, FKIOCTL);
15628 15675 mutex_exit(&mpt->m_mutex);
15629 15676 if (ret != 0) {
15630 15677 cmn_err(CE_WARN, "smp_start do passthru error %d", ret);
15631 15678 smp_pkt->smp_pkt_reason = (uchar_t)(ret);
15632 15679 return (DDI_FAILURE);
15633 15680 }
15634 15681 /* do passthrough success, check the smp status */
15635 15682 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
15636 15683 switch (LE_16(rep.IOCStatus)) {
15637 15684 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
15638 15685 smp_pkt->smp_pkt_reason = ENODEV;
15639 15686 break;
15640 15687 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
15641 15688 smp_pkt->smp_pkt_reason = EOVERFLOW;
15642 15689 break;
15643 15690 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
15644 15691 smp_pkt->smp_pkt_reason = EIO;
15645 15692 break;
15646 15693 default:
15647 15694 mptsas_log(mpt, CE_NOTE, "smp_start: get unknown ioc"
15648 15695 "status:%x", LE_16(rep.IOCStatus));
15649 15696 smp_pkt->smp_pkt_reason = EIO;
15650 15697 break;
15651 15698 }
15652 15699 return (DDI_FAILURE);
15653 15700 }
15654 15701 if (rep.SASStatus != MPI2_SASSTATUS_SUCCESS) {
15655 15702 mptsas_log(mpt, CE_NOTE, "smp_start: get error SAS status:%x",
15656 15703 rep.SASStatus);
15657 15704 smp_pkt->smp_pkt_reason = EIO;
15658 15705 return (DDI_FAILURE);
15659 15706 }
15660 15707
15661 15708 return (DDI_SUCCESS);
15662 15709 }
15663 15710
15664 15711 /*
15665 15712 * If we didn't get a match, we need to get sas page0 for each device, and
15666 15713 * untill we get a match. If failed, return NULL
15667 15714 */
15668 15715 static mptsas_target_t *
15669 15716 mptsas_phy_to_tgt(mptsas_t *mpt, int phymask, uint8_t phy)
15670 15717 {
15671 15718 int i, j = 0;
15672 15719 int rval = 0;
15673 15720 uint16_t cur_handle;
15674 15721 uint32_t page_address;
15675 15722 mptsas_target_t *ptgt = NULL;
15676 15723
15677 15724 /*
15678 15725 * PHY named device must be direct attached and attaches to
15679 15726 * narrow port, if the iport is not parent of the device which
15680 15727 * we are looking for.
15681 15728 */
15682 15729 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15683 15730 if ((1 << i) & phymask)
15684 15731 j++;
15685 15732 }
15686 15733
15687 15734 if (j > 1)
15688 15735 return (NULL);
15689 15736
15690 15737 /*
15691 15738 * Must be a narrow port and single device attached to the narrow port
15692 15739 * So the physical port num of device which is equal to the iport's
15693 15740 * port num is the device what we are looking for.
15694 15741 */
15695 15742
15696 15743 if (mpt->m_phy_info[phy].phy_mask != phymask)
15697 15744 return (NULL);
15698 15745
15699 15746 mutex_enter(&mpt->m_mutex);
15700 15747
15701 15748 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
15702 15749 MPTSAS_HASH_FIRST);
15703 15750 while (ptgt != NULL) {
15704 15751 if ((ptgt->m_sas_wwn == 0) && (ptgt->m_phynum == phy)) {
15705 15752 mutex_exit(&mpt->m_mutex);
15706 15753 return (ptgt);
15707 15754 }
15708 15755
15709 15756 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
15710 15757 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
15711 15758 }
15712 15759
15713 15760 if (mpt->m_done_traverse_dev) {
15714 15761 mutex_exit(&mpt->m_mutex);
15715 15762 return (NULL);
15716 15763 }
15717 15764
15718 15765 /* If didn't get a match, come here */
15719 15766 cur_handle = mpt->m_dev_handle;
15720 15767 for (; ; ) {
15721 15768 ptgt = NULL;
15722 15769 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
15723 15770 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)cur_handle;
15724 15771 rval = mptsas_get_target_device_info(mpt, page_address,
15725 15772 &cur_handle, &ptgt);
15726 15773 if ((rval == DEV_INFO_FAIL_PAGE0) ||
15727 15774 (rval == DEV_INFO_FAIL_ALLOC)) {
15728 15775 break;
15729 15776 }
15730 15777 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
15731 15778 (rval == DEV_INFO_PHYS_DISK)) {
15732 15779 continue;
15733 15780 }
15734 15781 mpt->m_dev_handle = cur_handle;
15735 15782
15736 15783 if ((ptgt->m_sas_wwn == 0) && (ptgt->m_phynum == phy)) {
15737 15784 break;
15738 15785 }
15739 15786 }
15740 15787
15741 15788 mutex_exit(&mpt->m_mutex);
15742 15789 return (ptgt);
15743 15790 }
15744 15791
15745 15792 /*
15746 15793 * The ptgt->m_sas_wwn contains the wwid for each disk.
15747 15794 * For Raid volumes, we need to check m_raidvol[x].m_raidwwid
15748 15795 * If we didn't get a match, we need to get sas page0 for each device, and
15749 15796 * untill we get a match
15750 15797 * If failed, return NULL
15751 15798 */
15752 15799 static mptsas_target_t *
15753 15800 mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask, uint64_t wwid)
15754 15801 {
15755 15802 int rval = 0;
15756 15803 uint16_t cur_handle;
15757 15804 uint32_t page_address;
15758 15805 mptsas_target_t *tmp_tgt = NULL;
15759 15806
15760 15807 mutex_enter(&mpt->m_mutex);
15761 15808 tmp_tgt = (struct mptsas_target *)mptsas_hash_search(
15762 15809 &mpt->m_active->m_tgttbl, wwid, phymask);
15763 15810 if (tmp_tgt != NULL) {
15764 15811 mutex_exit(&mpt->m_mutex);
15765 15812 return (tmp_tgt);
15766 15813 }
15767 15814
15768 15815 if (phymask == 0) {
15769 15816 /*
15770 15817 * It's IR volume
15771 15818 */
15772 15819 rval = mptsas_get_raid_info(mpt);
15773 15820 if (rval) {
15774 15821 tmp_tgt = (struct mptsas_target *)mptsas_hash_search(
15775 15822 &mpt->m_active->m_tgttbl, wwid, phymask);
15776 15823 }
15777 15824 mutex_exit(&mpt->m_mutex);
15778 15825 return (tmp_tgt);
15779 15826 }
15780 15827
15781 15828 if (mpt->m_done_traverse_dev) {
15782 15829 mutex_exit(&mpt->m_mutex);
15783 15830 return (NULL);
15784 15831 }
15785 15832
15786 15833 /* If didn't get a match, come here */
15787 15834 cur_handle = mpt->m_dev_handle;
15788 15835 for (; ; ) {
15789 15836 tmp_tgt = NULL;
15790 15837 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
15791 15838 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | cur_handle;
15792 15839 rval = mptsas_get_target_device_info(mpt, page_address,
15793 15840 &cur_handle, &tmp_tgt);
15794 15841 if ((rval == DEV_INFO_FAIL_PAGE0) ||
15795 15842 (rval == DEV_INFO_FAIL_ALLOC)) {
15796 15843 tmp_tgt = NULL;
15797 15844 break;
15798 15845 }
15799 15846 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
15800 15847 (rval == DEV_INFO_PHYS_DISK)) {
15801 15848 continue;
15802 15849 }
15803 15850 mpt->m_dev_handle = cur_handle;
15804 15851 if ((tmp_tgt->m_sas_wwn) && (tmp_tgt->m_sas_wwn == wwid) &&
15805 15852 (tmp_tgt->m_phymask == phymask)) {
15806 15853 break;
15807 15854 }
15808 15855 }
15809 15856
15810 15857 mutex_exit(&mpt->m_mutex);
15811 15858 return (tmp_tgt);
15812 15859 }
15813 15860
15814 15861 static mptsas_smp_t *
15815 15862 mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask, uint64_t wwid)
15816 15863 {
15817 15864 int rval = 0;
15818 15865 uint16_t cur_handle;
15819 15866 uint32_t page_address;
15820 15867 mptsas_smp_t smp_node, *psmp = NULL;
15821 15868
15822 15869 mutex_enter(&mpt->m_mutex);
15823 15870 psmp = (struct mptsas_smp *)mptsas_hash_search(&mpt->m_active->m_smptbl,
15824 15871 wwid, phymask);
15825 15872 if (psmp != NULL) {
15826 15873 mutex_exit(&mpt->m_mutex);
15827 15874 return (psmp);
15828 15875 }
15829 15876
15830 15877 if (mpt->m_done_traverse_smp) {
15831 15878 mutex_exit(&mpt->m_mutex);
15832 15879 return (NULL);
15833 15880 }
15834 15881
15835 15882 /* If didn't get a match, come here */
15836 15883 cur_handle = mpt->m_smp_devhdl;
15837 15884 for (; ; ) {
15838 15885 psmp = NULL;
15839 15886 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
15840 15887 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)cur_handle;
15841 15888 rval = mptsas_get_sas_expander_page0(mpt, page_address,
15842 15889 &smp_node);
15843 15890 if (rval != DDI_SUCCESS) {
15844 15891 break;
15845 15892 }
15846 15893 mpt->m_smp_devhdl = cur_handle = smp_node.m_devhdl;
15847 15894 psmp = mptsas_smp_alloc(&mpt->m_active->m_smptbl, &smp_node);
15848 15895 ASSERT(psmp);
15849 15896 if ((psmp->m_sasaddr) && (psmp->m_sasaddr == wwid) &&
15850 15897 (psmp->m_phymask == phymask)) {
15851 15898 break;
15852 15899 }
15853 15900 }
15854 15901
15855 15902 mutex_exit(&mpt->m_mutex);
15856 15903 return (psmp);
15857 15904 }
15858 15905
15859 15906 /* helper functions using hash */
15860 15907
15861 15908 /*
15862 15909 * Can't have duplicate entries for same devhdl,
15863 15910 * if there are invalid entries, the devhdl should be set to 0xffff
15864 15911 */
15865 15912 static void *
15866 15913 mptsas_search_by_devhdl(mptsas_hash_table_t *hashtab, uint16_t devhdl)
15867 15914 {
15868 15915 mptsas_hash_data_t *data;
15869 15916
15870 15917 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST);
15871 15918 while (data != NULL) {
15872 15919 if (data->devhdl == devhdl) {
15873 15920 break;
15874 15921 }
15875 15922 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT);
15876 15923 }
15877 15924 return (data);
15878 15925 }
15879 15926
15880 15927 mptsas_target_t *
15881 15928 mptsas_tgt_alloc(mptsas_hash_table_t *hashtab, uint16_t devhdl, uint64_t wwid,
15882 15929 uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum, mptsas_t *mpt)
15883 15930 {
15884 15931 mptsas_target_t *tmp_tgt = NULL;
15885 15932
15886 15933 tmp_tgt = mptsas_hash_search(hashtab, wwid, phymask);
15887 15934 if (tmp_tgt != NULL) {
15888 15935 NDBG20(("Hash item already exist"));
15889 15936 tmp_tgt->m_deviceinfo = devinfo;
15890 15937 tmp_tgt->m_devhdl = devhdl;
15891 15938 return (tmp_tgt);
15892 15939 }
15893 15940 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
15894 15941 if (tmp_tgt == NULL) {
15895 15942 cmn_err(CE_WARN, "Fatal, allocated tgt failed");
15896 15943 return (NULL);
15897 15944 }
15898 15945 tmp_tgt->m_devhdl = devhdl;
15899 15946 tmp_tgt->m_sas_wwn = wwid;
15900 15947 tmp_tgt->m_deviceinfo = devinfo;
15901 15948 tmp_tgt->m_phymask = phymask;
15902 15949 tmp_tgt->m_phynum = phynum;
15903 15950 /* Initialized the tgt structure */
15904 15951 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
15905 15952 tmp_tgt->m_qfull_retry_interval =
15906 15953 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
15907 15954 tmp_tgt->m_t_throttle = MAX_THROTTLE;
15908 15955 mutex_init(&tmp_tgt->m_tgt_intr_mutex, NULL, MUTEX_DRIVER,
15909 15956 DDI_INTR_PRI(mpt->m_intr_pri));
15910 15957
15911 15958 mptsas_hash_add(hashtab, tmp_tgt);
15912 15959
15913 15960 return (tmp_tgt);
15914 15961 }
15915 15962
15916 15963 static void
15917 15964 mptsas_tgt_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
15918 15965 mptsas_phymask_t phymask)
15919 15966 {
15920 15967 mptsas_target_t *tmp_tgt;
15921 15968 tmp_tgt = mptsas_hash_rem(hashtab, wwid, phymask);
15922 15969 if (tmp_tgt == NULL) {
15923 15970 cmn_err(CE_WARN, "Tgt not found, nothing to free");
15924 15971 } else {
15925 15972 mutex_destroy(&tmp_tgt->m_tgt_intr_mutex);
15926 15973 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
15927 15974 }
15928 15975 }
15929 15976
15930 15977 /*
15931 15978 * Return the entry in the hash table
15932 15979 */
15933 15980 static mptsas_smp_t *
15934 15981 mptsas_smp_alloc(mptsas_hash_table_t *hashtab, mptsas_smp_t *data)
15935 15982 {
15936 15983 uint64_t key1 = data->m_sasaddr;
15937 15984 mptsas_phymask_t key2 = data->m_phymask;
15938 15985 mptsas_smp_t *ret_data;
15939 15986
15940 15987 ret_data = mptsas_hash_search(hashtab, key1, key2);
15941 15988 if (ret_data != NULL) {
15942 15989 bcopy(data, ret_data, sizeof (mptsas_smp_t));
15943 15990 return (ret_data);
15944 15991 }
15945 15992
15946 15993 ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
15947 15994 bcopy(data, ret_data, sizeof (mptsas_smp_t));
15948 15995 mptsas_hash_add(hashtab, ret_data);
15949 15996 return (ret_data);
15950 15997 }
15951 15998
15952 15999 static void
15953 16000 mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
15954 16001 mptsas_phymask_t phymask)
15955 16002 {
15956 16003 mptsas_smp_t *tmp_smp;
15957 16004 tmp_smp = mptsas_hash_rem(hashtab, wwid, phymask);
15958 16005 if (tmp_smp == NULL) {
15959 16006 cmn_err(CE_WARN, "Smp element not found, nothing to free");
15960 16007 } else {
15961 16008 kmem_free(tmp_smp, sizeof (struct mptsas_smp));
15962 16009 }
15963 16010 }
15964 16011
15965 16012 /*
15966 16013 * Hash operation functions
15967 16014 * key1 is the sas_wwn, key2 is the phymask
15968 16015 */
15969 16016 static void
15970 16017 mptsas_hash_init(mptsas_hash_table_t *hashtab)
15971 16018 {
15972 16019 if (hashtab == NULL) {
15973 16020 return;
15974 16021 }
15975 16022 bzero(hashtab->head, sizeof (mptsas_hash_node_t) *
15976 16023 MPTSAS_HASH_ARRAY_SIZE);
15977 16024 hashtab->cur = NULL;
15978 16025 hashtab->line = 0;
15979 16026 }
15980 16027
15981 16028 static void
15982 16029 mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen)
15983 16030 {
15984 16031 uint16_t line = 0;
15985 16032 mptsas_hash_node_t *cur = NULL, *last = NULL;
15986 16033
15987 16034 if (hashtab == NULL) {
15988 16035 return;
15989 16036 }
15990 16037 for (line = 0; line < MPTSAS_HASH_ARRAY_SIZE; line++) {
15991 16038 cur = hashtab->head[line];
15992 16039 while (cur != NULL) {
15993 16040 last = cur;
15994 16041 cur = cur->next;
15995 16042 kmem_free(last->data, datalen);
15996 16043 kmem_free(last, sizeof (mptsas_hash_node_t));
15997 16044 }
15998 16045 }
15999 16046 }
16000 16047
16001 16048 /*
16002 16049 * You must guarantee the element doesn't exist in the hash table
16003 16050 * before you call mptsas_hash_add()
16004 16051 */
16005 16052 static void
16006 16053 mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data)
16007 16054 {
16008 16055 uint64_t key1 = ((mptsas_hash_data_t *)data)->key1;
16009 16056 mptsas_phymask_t key2 = ((mptsas_hash_data_t *)data)->key2;
16010 16057 mptsas_hash_node_t **head = NULL;
16011 16058 mptsas_hash_node_t *node = NULL;
16012 16059
16013 16060 if (hashtab == NULL) {
16014 16061 return;
16015 16062 }
16016 16063 ASSERT(mptsas_hash_search(hashtab, key1, key2) == NULL);
16017 16064 node = kmem_zalloc(sizeof (mptsas_hash_node_t), KM_NOSLEEP);
16018 16065 node->data = data;
16019 16066
16020 16067 head = &(hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]);
16021 16068 if (*head == NULL) {
16022 16069 *head = node;
16023 16070 } else {
16024 16071 node->next = *head;
16025 16072 *head = node;
16026 16073 }
16027 16074 }
16028 16075
16029 16076 static void *
16030 16077 mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1,
16031 16078 mptsas_phymask_t key2)
16032 16079 {
16033 16080 mptsas_hash_node_t **head = NULL;
16034 16081 mptsas_hash_node_t *last = NULL, *cur = NULL;
16035 16082 mptsas_hash_data_t *data;
16036 16083 if (hashtab == NULL) {
16037 16084 return (NULL);
16038 16085 }
16039 16086 head = &(hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]);
16040 16087 cur = *head;
16041 16088 while (cur != NULL) {
16042 16089 data = cur->data;
16043 16090 if ((data->key1 == key1) && (data->key2 == key2)) {
16044 16091 if (last == NULL) {
16045 16092 (*head) = cur->next;
16046 16093 } else {
16047 16094 last->next = cur->next;
16048 16095 }
16049 16096 kmem_free(cur, sizeof (mptsas_hash_node_t));
16050 16097 return (data);
16051 16098 } else {
16052 16099 last = cur;
16053 16100 cur = cur->next;
16054 16101 }
16055 16102 }
16056 16103 return (NULL);
16057 16104 }
16058 16105
16059 16106 static void *
16060 16107 mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1,
16061 16108 mptsas_phymask_t key2)
16062 16109 {
16063 16110 mptsas_hash_node_t *cur = NULL;
16064 16111 mptsas_hash_data_t *data;
16065 16112 if (hashtab == NULL) {
16066 16113 return (NULL);
16067 16114 }
16068 16115 cur = hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE];
16069 16116 while (cur != NULL) {
16070 16117 data = cur->data;
16071 16118 if ((data->key1 == key1) && (data->key2 == key2)) {
16072 16119 return (data);
16073 16120 } else {
16074 16121 cur = cur->next;
16075 16122 }
16076 16123 }
16077 16124 return (NULL);
16078 16125 }
16079 16126
16080 16127 static void *
16081 16128 mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos)
16082 16129 {
16083 16130 mptsas_hash_node_t *this = NULL;
16084 16131
16085 16132 if (hashtab == NULL) {
16086 16133 return (NULL);
16087 16134 }
16088 16135
16089 16136 if (pos == MPTSAS_HASH_FIRST) {
16090 16137 hashtab->line = 0;
16091 16138 hashtab->cur = NULL;
16092 16139 this = hashtab->head[0];
16093 16140 } else {
16094 16141 if (hashtab->cur == NULL) {
16095 16142 return (NULL);
16096 16143 } else {
16097 16144 this = hashtab->cur->next;
16098 16145 }
16099 16146 }
16100 16147
16101 16148 while (this == NULL) {
16102 16149 hashtab->line++;
16103 16150 if (hashtab->line >= MPTSAS_HASH_ARRAY_SIZE) {
16104 16151 /* the traverse reaches the end */
16105 16152 hashtab->cur = NULL;
16106 16153 return (NULL);
16107 16154 } else {
16108 16155 this = hashtab->head[hashtab->line];
16109 16156 }
16110 16157 }
16111 16158 hashtab->cur = this;
16112 16159 return (this->data);
16113 16160 }
16114 16161
16115 16162 /*
16116 16163 * Functions for SGPIO LED support
16117 16164 */
16118 16165 static dev_info_t *
16119 16166 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
16120 16167 {
16121 16168 dev_info_t *dip;
16122 16169 int prop;
16123 16170 dip = e_ddi_hold_devi_by_dev(dev, 0);
16124 16171 if (dip == NULL)
16125 16172 return (dip);
16126 16173 prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
16127 16174 "phymask", 0);
16128 16175 *phymask = (mptsas_phymask_t)prop;
16129 16176 ddi_release_devi(dip);
16130 16177 return (dip);
16131 16178 }
16132 16179 static mptsas_target_t *
16133 16180 mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask)
16134 16181 {
16135 16182 uint8_t phynum;
16136 16183 uint64_t wwn;
16137 16184 int lun;
16138 16185 mptsas_target_t *ptgt = NULL;
16139 16186
16140 16187 if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) {
↓ open down ↓ |
880 lines elided |
↑ open up ↑ |
16141 16188 return (NULL);
16142 16189 }
16143 16190 if (addr[0] == 'w') {
16144 16191 ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn);
16145 16192 } else {
16146 16193 ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum);
16147 16194 }
16148 16195 return (ptgt);
16149 16196 }
16150 16197
16151 -#ifdef MPTSAS_GET_LED
16152 16198 static int
16153 -mptsas_get_led_status(mptsas_t *mpt, mptsas_target_t *ptgt,
16154 - uint32_t *slotstatus)
16155 -{
16156 - return (mptsas_send_sep(mpt, ptgt, slotstatus,
16157 - MPI2_SEP_REQ_ACTION_READ_STATUS));
16158 -}
16159 -#endif
16160 -static int
16161 -mptsas_set_led_status(mptsas_t *mpt, mptsas_target_t *ptgt, uint32_t slotstatus)
16199 +mptsas_flush_led_status(mptsas_t *mpt, mptsas_target_t *ptgt)
16162 16200 {
16201 + uint32_t slotstatus = 0;
16202 +
16203 + /* Build an MPI2 Slot Status based on our view of the world */
16204 + if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_IDENT - 1)))
16205 + slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST;
16206 + if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_FAIL - 1)))
16207 + slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT;
16208 + if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1)))
16209 + slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
16210 +
16211 + /* Write it to the controller */
16163 16212 NDBG14(("mptsas_ioctl: set LED status %x for slot %x",
16164 16213 slotstatus, ptgt->m_slot_num));
16165 16214 return (mptsas_send_sep(mpt, ptgt, &slotstatus,
16166 16215 MPI2_SEP_REQ_ACTION_WRITE_STATUS));
16167 16216 }
16217 +
16168 16218 /*
16169 16219 * send sep request, use enclosure/slot addressing
16170 16220 */
16171 16221 static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
16172 16222 uint32_t *status, uint8_t act)
16173 16223 {
16174 16224 Mpi2SepRequest_t req;
16175 16225 Mpi2SepReply_t rep;
16176 16226 int ret;
16177 16227
16178 16228 ASSERT(mutex_owned(&mpt->m_mutex));
16179 16229
16180 16230 bzero(&req, sizeof (req));
16181 16231 bzero(&rep, sizeof (rep));
16182 16232
16183 16233 /* Do nothing for RAID volumes */
16184 16234 if (ptgt->m_phymask == 0) {
16185 16235 NDBG14(("mptsas_send_sep: Skip RAID volumes"));
16186 16236 return (DDI_FAILURE);
16187 16237 }
16188 16238
16189 16239 req.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
16190 16240 req.Action = act;
16191 16241 req.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
16192 16242 req.EnclosureHandle = LE_16(ptgt->m_enclosure);
16193 16243 req.Slot = LE_16(ptgt->m_slot_num);
16194 16244 if (act == MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16195 16245 req.SlotStatus = LE_32(*status);
16196 16246 }
16197 16247 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
16198 16248 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
16199 16249 if (ret != 0) {
16200 16250 mptsas_log(mpt, CE_NOTE, "mptsas_send_sep: passthru SEP "
16201 16251 "Processor Request message error %d", ret);
16202 16252 return (DDI_FAILURE);
16203 16253 }
16204 16254 /* do passthrough success, check the ioc status */
16205 16255 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
16206 16256 if ((LE_16(rep.IOCStatus) & MPI2_IOCSTATUS_MASK) ==
16207 16257 MPI2_IOCSTATUS_INVALID_FIELD) {
16208 16258 mptsas_log(mpt, CE_NOTE, "send sep act %x: Not "
16209 16259 "supported action, loginfo %x", act,
16210 16260 LE_32(rep.IOCLogInfo));
16211 16261 return (DDI_FAILURE);
16212 16262 }
16213 16263 mptsas_log(mpt, CE_NOTE, "send_sep act %x: ioc "
16214 16264 "status:%x", act, LE_16(rep.IOCStatus));
16215 16265 return (DDI_FAILURE);
16216 16266 }
16217 16267 if (act != MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16218 16268 *status = LE_32(rep.SlotStatus);
16219 16269 }
16220 16270
16221 16271 return (DDI_SUCCESS);
16222 16272 }
16223 16273
16224 16274 int
16225 16275 mptsas_dma_addr_create(mptsas_t *mpt, ddi_dma_attr_t dma_attr,
16226 16276 ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp, caddr_t *dma_memp,
16227 16277 uint32_t alloc_size, ddi_dma_cookie_t *cookiep)
16228 16278 {
16229 16279 ddi_dma_cookie_t new_cookie;
16230 16280 size_t alloc_len;
16231 16281 uint_t ncookie;
16232 16282
16233 16283 if (cookiep == NULL)
16234 16284 cookiep = &new_cookie;
16235 16285
16236 16286 if (ddi_dma_alloc_handle(mpt->m_dip, &dma_attr, DDI_DMA_SLEEP,
16237 16287 NULL, dma_hdp) != DDI_SUCCESS) {
16238 16288 dma_hdp = NULL;
16239 16289 return (FALSE);
16240 16290 }
16241 16291
16242 16292 if (ddi_dma_mem_alloc(*dma_hdp, alloc_size, &mpt->m_dev_acc_attr,
16243 16293 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, dma_memp, &alloc_len,
16244 16294 acc_hdp) != DDI_SUCCESS) {
16245 16295 ddi_dma_free_handle(dma_hdp);
16246 16296 dma_hdp = NULL;
16247 16297 return (FALSE);
16248 16298 }
16249 16299
16250 16300 if (ddi_dma_addr_bind_handle(*dma_hdp, NULL, *dma_memp, alloc_len,
16251 16301 (DDI_DMA_RDWR | DDI_DMA_CONSISTENT), DDI_DMA_SLEEP, NULL,
16252 16302 cookiep, &ncookie) != DDI_DMA_MAPPED) {
16253 16303 (void) ddi_dma_mem_free(acc_hdp);
16254 16304 ddi_dma_free_handle(dma_hdp);
16255 16305 dma_hdp = NULL;
16256 16306 return (FALSE);
16257 16307 }
16258 16308
16259 16309 return (TRUE);
16260 16310 }
16261 16311
16262 16312 void
16263 16313 mptsas_dma_addr_destroy(ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp)
16264 16314 {
16265 16315 if (*dma_hdp == NULL)
16266 16316 return;
16267 16317
16268 16318 (void) ddi_dma_unbind_handle(*dma_hdp);
16269 16319 (void) ddi_dma_mem_free(acc_hdp);
16270 16320 ddi_dma_free_handle(dma_hdp);
16271 16321 dma_hdp = NULL;
16272 16322 }
16273 16323
16274 16324 static int
16275 16325 mptsas_outstanding_cmds_n(mptsas_t *mpt)
16276 16326 {
16277 16327 int n = 0, i;
16278 16328 for (i = 0; i < mpt->m_slot_freeq_pair_n; i++) {
16279 16329 mutex_enter(&mpt->m_slot_freeq_pairp[i].
16280 16330 m_slot_allocq.s.m_fq_mutex);
16281 16331 mutex_enter(&mpt->m_slot_freeq_pairp[i].
16282 16332 m_slot_releq.s.m_fq_mutex);
16283 16333 n += (mpt->m_slot_freeq_pairp[i].m_slot_allocq.s.m_fq_n_init -
16284 16334 mpt->m_slot_freeq_pairp[i].m_slot_allocq.s.m_fq_n -
16285 16335 mpt->m_slot_freeq_pairp[i].m_slot_releq.s.m_fq_n);
16286 16336 mutex_exit(&mpt->m_slot_freeq_pairp[i].
16287 16337 m_slot_releq.s.m_fq_mutex);
16288 16338 mutex_exit(&mpt->m_slot_freeq_pairp[i].
16289 16339 m_slot_allocq.s.m_fq_mutex);
16290 16340 }
16291 16341 if (mpt->m_max_requests - 2 < n)
16292 16342 panic("mptsas: free slot allocq and releq crazy");
16293 16343 return (n);
16294 16344 }
↓ open down ↓ |
117 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX