Print this page
5297 mptsas refhash replacement on reset can cause hang
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c
+++ new/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25 25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 26 * Copyright 2014 OmniTI Computer Consulting, Inc. All rights reserved.
27 27 * Copyright (c) 2014, Tegile Systems Inc. All rights reserved.
28 28 */
29 29
30 30 /*
31 31 * Copyright (c) 2000 to 2010, LSI Corporation.
32 32 * All rights reserved.
33 33 *
34 34 * Redistribution and use in source and binary forms of all code within
35 35 * this file that is exclusively owned by LSI, with or without
36 36 * modification, is permitted provided that, in addition to the CDDL 1.0
37 37 * License requirements, the following conditions are met:
38 38 *
39 39 * Neither the name of the author nor the names of its contributors may be
40 40 * used to endorse or promote products derived from this software without
41 41 * specific prior written permission.
42 42 *
43 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
46 46 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
47 47 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
48 48 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
49 49 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
50 50 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
51 51 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
52 52 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 54 * DAMAGE.
55 55 */
56 56
57 57 /*
58 58 * mptsas - This is a driver based on LSI Logic's MPT2.0/2.5 interface.
59 59 *
60 60 */
61 61
62 62 #if defined(lint) || defined(DEBUG)
63 63 #define MPTSAS_DEBUG
64 64 #endif
65 65
66 66 /*
67 67 * standard header files.
68 68 */
69 69 #include <sys/note.h>
70 70 #include <sys/scsi/scsi.h>
71 71 #include <sys/pci.h>
72 72 #include <sys/file.h>
73 73 #include <sys/policy.h>
74 74 #include <sys/model.h>
75 75 #include <sys/sysevent.h>
76 76 #include <sys/sysevent/eventdefs.h>
77 77 #include <sys/sysevent/dr.h>
78 78 #include <sys/sata/sata_defs.h>
79 79 #include <sys/scsi/generic/sas.h>
80 80 #include <sys/scsi/impl/scsi_sas.h>
81 81
82 82 #pragma pack(1)
83 83 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
84 84 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
85 85 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
86 86 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
87 87 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
88 88 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
89 89 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
90 90 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
91 91 #pragma pack()
92 92
93 93 /*
94 94 * private header files.
95 95 *
96 96 */
97 97 #include <sys/scsi/impl/scsi_reset_notify.h>
98 98 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
99 99 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
100 100 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
101 101 #include <sys/scsi/adapters/mpt_sas/mptsas_hash.h>
102 102 #include <sys/raidioctl.h>
103 103
104 104 #include <sys/fs/dv_node.h> /* devfs_clean */
105 105
106 106 /*
107 107 * FMA header files
108 108 */
109 109 #include <sys/ddifm.h>
110 110 #include <sys/fm/protocol.h>
111 111 #include <sys/fm/util.h>
112 112 #include <sys/fm/io/ddi.h>
113 113
114 114 /*
115 115 * autoconfiguration data and routines.
116 116 */
117 117 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
118 118 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
119 119 static int mptsas_power(dev_info_t *dip, int component, int level);
120 120
121 121 /*
122 122 * cb_ops function
123 123 */
124 124 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
125 125 cred_t *credp, int *rval);
126 126 #ifdef __sparc
127 127 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd);
128 128 #else /* __sparc */
129 129 static int mptsas_quiesce(dev_info_t *devi);
130 130 #endif /* __sparc */
131 131
132 132 /*
133 133 * Resource initilaization for hardware
134 134 */
135 135 static void mptsas_setup_cmd_reg(mptsas_t *mpt);
136 136 static void mptsas_disable_bus_master(mptsas_t *mpt);
137 137 static void mptsas_hba_fini(mptsas_t *mpt);
138 138 static void mptsas_cfg_fini(mptsas_t *mptsas_blkp);
139 139 static int mptsas_hba_setup(mptsas_t *mpt);
140 140 static void mptsas_hba_teardown(mptsas_t *mpt);
141 141 static int mptsas_config_space_init(mptsas_t *mpt);
142 142 static void mptsas_config_space_fini(mptsas_t *mpt);
143 143 static void mptsas_iport_register(mptsas_t *mpt);
144 144 static int mptsas_smp_setup(mptsas_t *mpt);
145 145 static void mptsas_smp_teardown(mptsas_t *mpt);
146 146 static int mptsas_cache_create(mptsas_t *mpt);
147 147 static void mptsas_cache_destroy(mptsas_t *mpt);
148 148 static int mptsas_alloc_request_frames(mptsas_t *mpt);
149 149 static int mptsas_alloc_reply_frames(mptsas_t *mpt);
150 150 static int mptsas_alloc_free_queue(mptsas_t *mpt);
151 151 static int mptsas_alloc_post_queue(mptsas_t *mpt);
152 152 static void mptsas_alloc_reply_args(mptsas_t *mpt);
153 153 static int mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
154 154 static void mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
155 155 static int mptsas_init_chip(mptsas_t *mpt, int first_time);
156 156
157 157 /*
158 158 * SCSA function prototypes
159 159 */
160 160 static int mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
161 161 static int mptsas_scsi_reset(struct scsi_address *ap, int level);
162 162 static int mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
163 163 static int mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
164 164 static int mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
165 165 int tgtonly);
166 166 static void mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
167 167 static struct scsi_pkt *mptsas_scsi_init_pkt(struct scsi_address *ap,
168 168 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
169 169 int tgtlen, int flags, int (*callback)(), caddr_t arg);
170 170 static void mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
171 171 static void mptsas_scsi_destroy_pkt(struct scsi_address *ap,
172 172 struct scsi_pkt *pkt);
173 173 static int mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
174 174 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
175 175 static void mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
176 176 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
177 177 static int mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
178 178 void (*callback)(caddr_t), caddr_t arg);
179 179 static int mptsas_get_name(struct scsi_device *sd, char *name, int len);
180 180 static int mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len);
181 181 static int mptsas_scsi_quiesce(dev_info_t *dip);
182 182 static int mptsas_scsi_unquiesce(dev_info_t *dip);
183 183 static int mptsas_bus_config(dev_info_t *pdip, uint_t flags,
184 184 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
185 185
186 186 /*
187 187 * SMP functions
188 188 */
189 189 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
190 190
191 191 /*
192 192 * internal function prototypes.
193 193 */
194 194 static void mptsas_list_add(mptsas_t *mpt);
195 195 static void mptsas_list_del(mptsas_t *mpt);
196 196
197 197 static int mptsas_quiesce_bus(mptsas_t *mpt);
198 198 static int mptsas_unquiesce_bus(mptsas_t *mpt);
199 199
200 200 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
201 201 static void mptsas_free_handshake_msg(mptsas_t *mpt);
202 202
203 203 static void mptsas_ncmds_checkdrain(void *arg);
204 204
205 205 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
206 206 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
207 207 static int mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
208 208 static void mptsas_accept_tx_waitq(mptsas_t *mpt);
209 209
210 210 static int mptsas_do_detach(dev_info_t *dev);
211 211 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
212 212 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
213 213 struct scsi_pkt *pkt);
214 214 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp);
215 215
216 216 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
217 217 static void mptsas_handle_event(void *args);
218 218 static int mptsas_handle_event_sync(void *args);
219 219 static void mptsas_handle_dr(void *args);
220 220 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
221 221 dev_info_t *pdip);
222 222
223 223 static void mptsas_restart_cmd(void *);
224 224
225 225 static void mptsas_flush_hba(mptsas_t *mpt);
226 226 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
227 227 uint8_t tasktype);
228 228 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
229 229 uchar_t reason, uint_t stat);
230 230
231 231 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
232 232 static void mptsas_process_intr(mptsas_t *mpt,
233 233 pMpi2ReplyDescriptorsUnion_t reply_desc_union);
234 234 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
235 235 pMpi2ReplyDescriptorsUnion_t reply_desc);
236 236 static void mptsas_handle_address_reply(mptsas_t *mpt,
237 237 pMpi2ReplyDescriptorsUnion_t reply_desc);
238 238 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
239 239 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
240 240 uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
241 241
242 242 static void mptsas_watch(void *arg);
243 243 static void mptsas_watchsubr(mptsas_t *mpt);
244 244 static void mptsas_cmd_timeout(mptsas_t *mpt, mptsas_target_t *ptgt);
245 245
246 246 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
247 247 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
248 248 uint8_t *data, uint32_t request_size, uint32_t reply_size,
249 249 uint32_t data_size, uint32_t direction, uint8_t *dataout,
250 250 uint32_t dataout_size, short timeout, int mode);
251 251 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
252 252
253 253 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt,
254 254 uint32_t unique_id);
255 255 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd);
256 256 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt,
257 257 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
258 258 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt,
259 259 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
260 260 uint32_t diag_type);
261 261 static int mptsas_diag_register(mptsas_t *mpt,
262 262 mptsas_fw_diag_register_t *diag_register, uint32_t *return_code);
263 263 static int mptsas_diag_unregister(mptsas_t *mpt,
264 264 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
265 265 static int mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
266 266 uint32_t *return_code);
267 267 static int mptsas_diag_read_buffer(mptsas_t *mpt,
268 268 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
269 269 uint32_t *return_code, int ioctl_mode);
270 270 static int mptsas_diag_release(mptsas_t *mpt,
271 271 mptsas_fw_diag_release_t *diag_release, uint32_t *return_code);
272 272 static int mptsas_do_diag_action(mptsas_t *mpt, uint32_t action,
273 273 uint8_t *diag_action, uint32_t length, uint32_t *return_code,
274 274 int ioctl_mode);
275 275 static int mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *data,
276 276 int mode);
277 277
278 278 static int mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
279 279 int cmdlen, int tgtlen, int statuslen, int kf);
280 280 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
281 281
282 282 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
283 283 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
284 284
285 285 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
286 286 int kmflags);
287 287 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
288 288
289 289 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
290 290 mptsas_cmd_t *cmd);
291 291 static void mptsas_check_task_mgt(mptsas_t *mpt,
292 292 pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
293 293 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
294 294 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
295 295 int *resid);
296 296
297 297 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
298 298 static void mptsas_free_active_slots(mptsas_t *mpt);
299 299 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
300 300
301 301 static void mptsas_restart_hba(mptsas_t *mpt);
302 302 static void mptsas_restart_waitq(mptsas_t *mpt);
303 303
304 304 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
305 305 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);
306 306 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
307 307
308 308 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
309 309 static void mptsas_doneq_empty(mptsas_t *mpt);
310 310 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
311 311
312 312 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
313 313 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
314 314 static mptsas_cmd_t *mptsas_tx_waitq_rm(mptsas_t *mpt);
315 315 static void mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
316 316
317 317
318 318 static void mptsas_start_watch_reset_delay();
319 319 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
320 320 static void mptsas_watch_reset_delay(void *arg);
321 321 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
322 322
323 323 /*
324 324 * helper functions
325 325 */
326 326 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
327 327
328 328 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
329 329 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
330 330 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
331 331 int lun);
332 332 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
333 333 int lun);
334 334 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
335 335 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
336 336
337 337 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
338 338 int *lun);
339 339 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
340 340
341 341 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt,
342 342 mptsas_phymask_t phymask, uint8_t phy);
343 343 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt,
344 344 mptsas_phymask_t phymask, uint64_t wwid);
345 345 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt,
346 346 mptsas_phymask_t phymask, uint64_t wwid);
347 347
348 348 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
349 349 uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
350 350
351 351 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
352 352 uint16_t *handle, mptsas_target_t **pptgt);
353 353 static void mptsas_update_phymask(mptsas_t *mpt);
354 354
355 355 static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
356 356 uint32_t *status, uint8_t cmd);
357 357 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
358 358 mptsas_phymask_t *phymask);
359 359 static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr,
360 360 mptsas_phymask_t phymask);
361 361 static int mptsas_flush_led_status(mptsas_t *mpt, mptsas_target_t *ptgt);
362 362
363 363
364 364 /*
365 365 * Enumeration / DR functions
366 366 */
367 367 static void mptsas_config_all(dev_info_t *pdip);
368 368 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
369 369 dev_info_t **lundip);
370 370 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
371 371 dev_info_t **lundip);
372 372
373 373 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
374 374 static int mptsas_offline_target(dev_info_t *pdip, char *name);
375 375
376 376 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
377 377 dev_info_t **dip);
378 378
379 379 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
380 380 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
381 381 dev_info_t **dip, mptsas_target_t *ptgt);
382 382
383 383 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
384 384 dev_info_t **dip, mptsas_target_t *ptgt, int lun);
385 385
386 386 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
387 387 char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
388 388 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
389 389 char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
390 390 int lun);
391 391
392 392 static void mptsas_offline_missed_luns(dev_info_t *pdip,
393 393 uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
394 394 static int mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
395 395 mdi_pathinfo_t *rpip, uint_t flags);
396 396
397 397 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
398 398 dev_info_t **smp_dip);
399 399 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
400 400 uint_t flags);
401 401
402 402 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
403 403 int mode, int *rval);
404 404 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
405 405 int mode, int *rval);
406 406 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
407 407 int mode, int *rval);
408 408 static void mptsas_record_event(void *args);
409 409 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
410 410 int mode);
411 411
412 412 mptsas_target_t *mptsas_tgt_alloc(mptsas_t *, uint16_t, uint64_t,
413 413 uint32_t, mptsas_phymask_t, uint8_t);
414 414 static mptsas_smp_t *mptsas_smp_alloc(mptsas_t *, mptsas_smp_t *);
415 415 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
416 416 dev_info_t **smp_dip);
417 417
418 418 /*
419 419 * Power management functions
420 420 */
421 421 static int mptsas_get_pci_cap(mptsas_t *mpt);
422 422 static int mptsas_init_pm(mptsas_t *mpt);
423 423
424 424 /*
425 425 * MPT MSI tunable:
426 426 *
427 427 * By default MSI is enabled on all supported platforms.
428 428 */
429 429 boolean_t mptsas_enable_msi = B_TRUE;
430 430 boolean_t mptsas_physical_bind_failed_page_83 = B_FALSE;
431 431
432 432 /*
433 433 * Global switch for use of MPI2.5 FAST PATH.
434 434 * We don't really know what FAST PATH actually does, so if it is suspected
435 435 * to cause problems it can be turned off by setting this variable to B_FALSE.
436 436 */
437 437 boolean_t mptsas_use_fastpath = B_TRUE;
438 438
439 439 static int mptsas_register_intrs(mptsas_t *);
440 440 static void mptsas_unregister_intrs(mptsas_t *);
441 441 static int mptsas_add_intrs(mptsas_t *, int);
442 442 static void mptsas_rem_intrs(mptsas_t *);
443 443
444 444 /*
445 445 * FMA Prototypes
446 446 */
447 447 static void mptsas_fm_init(mptsas_t *mpt);
448 448 static void mptsas_fm_fini(mptsas_t *mpt);
449 449 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
450 450
451 451 extern pri_t minclsyspri, maxclsyspri;
452 452
453 453 /*
454 454 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
455 455 * under this device that the paths to a physical device are created when
456 456 * MPxIO is used.
457 457 */
458 458 extern dev_info_t *scsi_vhci_dip;
459 459
460 460 /*
461 461 * Tunable timeout value for Inquiry VPD page 0x83
462 462 * By default the value is 30 seconds.
463 463 */
464 464 int mptsas_inq83_retry_timeout = 30;
465 465
466 466 /*
467 467 * This is used to allocate memory for message frame storage, not for
468 468 * data I/O DMA. All message frames must be stored in the first 4G of
469 469 * physical memory.
470 470 */
471 471 ddi_dma_attr_t mptsas_dma_attrs = {
472 472 DMA_ATTR_V0, /* attribute layout version */
473 473 0x0ull, /* address low - should be 0 (longlong) */
474 474 0xffffffffull, /* address high - 32-bit max range */
475 475 0x00ffffffull, /* count max - max DMA object size */
476 476 4, /* allocation alignment requirements */
477 477 0x78, /* burstsizes - binary encoded values */
478 478 1, /* minxfer - gran. of DMA engine */
479 479 0x00ffffffull, /* maxxfer - gran. of DMA engine */
480 480 0xffffffffull, /* max segment size (DMA boundary) */
481 481 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
482 482 512, /* granularity - device transfer size */
483 483 0 /* flags, set to 0 */
484 484 };
485 485
486 486 /*
487 487 * This is used for data I/O DMA memory allocation. (full 64-bit DMA
488 488 * physical addresses are supported.)
489 489 */
490 490 ddi_dma_attr_t mptsas_dma_attrs64 = {
491 491 DMA_ATTR_V0, /* attribute layout version */
492 492 0x0ull, /* address low - should be 0 (longlong) */
493 493 0xffffffffffffffffull, /* address high - 64-bit max */
494 494 0x00ffffffull, /* count max - max DMA object size */
495 495 4, /* allocation alignment requirements */
496 496 0x78, /* burstsizes - binary encoded values */
497 497 1, /* minxfer - gran. of DMA engine */
498 498 0x00ffffffull, /* maxxfer - gran. of DMA engine */
499 499 0xffffffffull, /* max segment size (DMA boundary) */
500 500 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
501 501 512, /* granularity - device transfer size */
502 502 DDI_DMA_RELAXED_ORDERING /* flags, enable relaxed ordering */
503 503 };
504 504
505 505 ddi_device_acc_attr_t mptsas_dev_attr = {
506 506 DDI_DEVICE_ATTR_V1,
507 507 DDI_STRUCTURE_LE_ACC,
508 508 DDI_STRICTORDER_ACC,
509 509 DDI_DEFAULT_ACC
510 510 };
511 511
512 512 static struct cb_ops mptsas_cb_ops = {
513 513 scsi_hba_open, /* open */
514 514 scsi_hba_close, /* close */
515 515 nodev, /* strategy */
516 516 nodev, /* print */
517 517 nodev, /* dump */
518 518 nodev, /* read */
519 519 nodev, /* write */
520 520 mptsas_ioctl, /* ioctl */
521 521 nodev, /* devmap */
522 522 nodev, /* mmap */
523 523 nodev, /* segmap */
524 524 nochpoll, /* chpoll */
525 525 ddi_prop_op, /* cb_prop_op */
526 526 NULL, /* streamtab */
527 527 D_MP, /* cb_flag */
528 528 CB_REV, /* rev */
529 529 nodev, /* aread */
530 530 nodev /* awrite */
531 531 };
532 532
533 533 static struct dev_ops mptsas_ops = {
534 534 DEVO_REV, /* devo_rev, */
535 535 0, /* refcnt */
536 536 ddi_no_info, /* info */
537 537 nulldev, /* identify */
538 538 nulldev, /* probe */
539 539 mptsas_attach, /* attach */
540 540 mptsas_detach, /* detach */
541 541 #ifdef __sparc
542 542 mptsas_reset,
543 543 #else
544 544 nodev, /* reset */
545 545 #endif /* __sparc */
546 546 &mptsas_cb_ops, /* driver operations */
547 547 NULL, /* bus operations */
548 548 mptsas_power, /* power management */
549 549 #ifdef __sparc
550 550 ddi_quiesce_not_needed
551 551 #else
552 552 mptsas_quiesce /* quiesce */
553 553 #endif /* __sparc */
554 554 };
555 555
556 556
557 557 #define MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24"
558 558
559 559 static struct modldrv modldrv = {
560 560 &mod_driverops, /* Type of module. This one is a driver */
561 561 MPTSAS_MOD_STRING, /* Name of the module. */
562 562 &mptsas_ops, /* driver ops */
563 563 };
564 564
565 565 static struct modlinkage modlinkage = {
566 566 MODREV_1, &modldrv, NULL
567 567 };
568 568 #define TARGET_PROP "target"
569 569 #define LUN_PROP "lun"
570 570 #define LUN64_PROP "lun64"
571 571 #define SAS_PROP "sas-mpt"
572 572 #define MDI_GUID "wwn"
573 573 #define NDI_GUID "guid"
574 574 #define MPTSAS_DEV_GONE "mptsas_dev_gone"
575 575
576 576 /*
577 577 * Local static data
578 578 */
579 579 #if defined(MPTSAS_DEBUG)
580 580 /*
581 581 * Flags to indicate which debug messages are to be printed and which go to the
582 582 * debug log ring buffer. Default is to not print anything, and to log
583 583 * everything except the watchsubr() output which normally happens every second.
584 584 */
585 585 uint32_t mptsas_debugprt_flags = 0x0;
586 586 uint32_t mptsas_debuglog_flags = ~(1U << 30);
587 587 #endif /* defined(MPTSAS_DEBUG) */
588 588 uint32_t mptsas_debug_resets = 0;
589 589
590 590 static kmutex_t mptsas_global_mutex;
591 591 static void *mptsas_state; /* soft state ptr */
592 592 static krwlock_t mptsas_global_rwlock;
593 593
594 594 static kmutex_t mptsas_log_mutex;
595 595 static char mptsas_log_buf[256];
596 596 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex, mptsas_log_buf))
597 597
598 598 static mptsas_t *mptsas_head, *mptsas_tail;
599 599 static clock_t mptsas_scsi_watchdog_tick;
600 600 static clock_t mptsas_tick;
601 601 static timeout_id_t mptsas_reset_watch;
602 602 static timeout_id_t mptsas_timeout_id;
603 603 static int mptsas_timeouts_enabled = 0;
604 604 /*
605 605 * warlock directives
606 606 */
607 607 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \
608 608 mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status))
609 609 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt))
610 610 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
611 611 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private))
612 612 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private))
613 613
614 614 /*
615 615 * SM - HBA statics
616 616 */
617 617 char *mptsas_driver_rev = MPTSAS_MOD_STRING;
618 618
619 619 #ifdef MPTSAS_DEBUG
620 620 void debug_enter(char *);
621 621 #endif
622 622
623 623 /*
624 624 * Notes:
625 625 * - scsi_hba_init(9F) initializes SCSI HBA modules
626 626 * - must call scsi_hba_fini(9F) if modload() fails
627 627 */
628 628 int
629 629 _init(void)
630 630 {
631 631 int status;
632 632 /* CONSTCOND */
633 633 ASSERT(NO_COMPETING_THREADS);
634 634
635 635 NDBG0(("_init"));
636 636
637 637 status = ddi_soft_state_init(&mptsas_state, MPTSAS_SIZE,
638 638 MPTSAS_INITIAL_SOFT_SPACE);
639 639 if (status != 0) {
640 640 return (status);
641 641 }
642 642
643 643 if ((status = scsi_hba_init(&modlinkage)) != 0) {
644 644 ddi_soft_state_fini(&mptsas_state);
645 645 return (status);
646 646 }
647 647
648 648 mutex_init(&mptsas_global_mutex, NULL, MUTEX_DRIVER, NULL);
649 649 rw_init(&mptsas_global_rwlock, NULL, RW_DRIVER, NULL);
650 650 mutex_init(&mptsas_log_mutex, NULL, MUTEX_DRIVER, NULL);
651 651
652 652 if ((status = mod_install(&modlinkage)) != 0) {
653 653 mutex_destroy(&mptsas_log_mutex);
654 654 rw_destroy(&mptsas_global_rwlock);
655 655 mutex_destroy(&mptsas_global_mutex);
656 656 ddi_soft_state_fini(&mptsas_state);
657 657 scsi_hba_fini(&modlinkage);
658 658 }
659 659
660 660 return (status);
661 661 }
662 662
663 663 /*
664 664 * Notes:
665 665 * - scsi_hba_fini(9F) uninitializes SCSI HBA modules
666 666 */
667 667 int
668 668 _fini(void)
669 669 {
670 670 int status;
671 671 /* CONSTCOND */
672 672 ASSERT(NO_COMPETING_THREADS);
673 673
674 674 NDBG0(("_fini"));
675 675
676 676 if ((status = mod_remove(&modlinkage)) == 0) {
677 677 ddi_soft_state_fini(&mptsas_state);
678 678 scsi_hba_fini(&modlinkage);
679 679 mutex_destroy(&mptsas_global_mutex);
680 680 rw_destroy(&mptsas_global_rwlock);
681 681 mutex_destroy(&mptsas_log_mutex);
682 682 }
683 683 return (status);
684 684 }
685 685
686 686 /*
687 687 * The loadable-module _info(9E) entry point
688 688 */
689 689 int
690 690 _info(struct modinfo *modinfop)
691 691 {
692 692 /* CONSTCOND */
693 693 ASSERT(NO_COMPETING_THREADS);
694 694 NDBG0(("mptsas _info"));
695 695
696 696 return (mod_info(&modlinkage, modinfop));
697 697 }
698 698
699 699 static int
700 700 mptsas_target_eval_devhdl(const void *op, void *arg)
701 701 {
702 702 uint16_t dh = *(uint16_t *)arg;
703 703 const mptsas_target_t *tp = op;
704 704
705 705 return ((int)tp->m_devhdl - (int)dh);
706 706 }
707 707
708 708 static int
709 709 mptsas_target_eval_slot(const void *op, void *arg)
710 710 {
711 711 mptsas_led_control_t *lcp = arg;
712 712 const mptsas_target_t *tp = op;
713 713
714 714 if (tp->m_enclosure != lcp->Enclosure)
715 715 return ((int)tp->m_enclosure - (int)lcp->Enclosure);
716 716
717 717 return ((int)tp->m_slot_num - (int)lcp->Slot);
718 718 }
719 719
720 720 static int
721 721 mptsas_target_eval_nowwn(const void *op, void *arg)
722 722 {
723 723 uint8_t phy = *(uint8_t *)arg;
724 724 const mptsas_target_t *tp = op;
725 725
726 726 if (tp->m_addr.mta_wwn != 0)
727 727 return (-1);
728 728
729 729 return ((int)tp->m_phynum - (int)phy);
730 730 }
731 731
732 732 static int
733 733 mptsas_smp_eval_devhdl(const void *op, void *arg)
734 734 {
735 735 uint16_t dh = *(uint16_t *)arg;
736 736 const mptsas_smp_t *sp = op;
737 737
738 738 return ((int)sp->m_devhdl - (int)dh);
739 739 }
740 740
741 741 static uint64_t
742 742 mptsas_target_addr_hash(const void *tp)
743 743 {
744 744 const mptsas_target_addr_t *tap = tp;
745 745
746 746 return ((tap->mta_wwn & 0xffffffffffffULL) |
747 747 ((uint64_t)tap->mta_phymask << 48));
748 748 }
749 749
750 750 static int
751 751 mptsas_target_addr_cmp(const void *a, const void *b)
752 752 {
753 753 const mptsas_target_addr_t *aap = a;
754 754 const mptsas_target_addr_t *bap = b;
755 755
756 756 if (aap->mta_wwn < bap->mta_wwn)
757 757 return (-1);
758 758 if (aap->mta_wwn > bap->mta_wwn)
759 759 return (1);
760 760 return ((int)bap->mta_phymask - (int)aap->mta_phymask);
761 761 }
762 762
763 763 static void
764 764 mptsas_target_free(void *op)
765 765 {
766 766 kmem_free(op, sizeof (mptsas_target_t));
767 767 }
768 768
769 769 static void
770 770 mptsas_smp_free(void *op)
771 771 {
772 772 kmem_free(op, sizeof (mptsas_smp_t));
773 773 }
774 774
775 775 static void
776 776 mptsas_destroy_hashes(mptsas_t *mpt)
777 777 {
778 778 mptsas_target_t *tp;
779 779 mptsas_smp_t *sp;
780 780
781 781 for (tp = refhash_first(mpt->m_targets); tp != NULL;
782 782 tp = refhash_next(mpt->m_targets, tp)) {
783 783 refhash_remove(mpt->m_targets, tp);
784 784 }
785 785 for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
786 786 sp = refhash_next(mpt->m_smp_targets, sp)) {
787 787 refhash_remove(mpt->m_smp_targets, sp);
788 788 }
789 789 refhash_destroy(mpt->m_targets);
790 790 refhash_destroy(mpt->m_smp_targets);
791 791 mpt->m_targets = NULL;
792 792 mpt->m_smp_targets = NULL;
793 793 }
794 794
795 795 static int
796 796 mptsas_iport_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
797 797 {
798 798 dev_info_t *pdip;
799 799 mptsas_t *mpt;
800 800 scsi_hba_tran_t *hba_tran;
801 801 char *iport = NULL;
802 802 char phymask[MPTSAS_MAX_PHYS];
803 803 mptsas_phymask_t phy_mask = 0;
804 804 int dynamic_port = 0;
805 805 uint32_t page_address;
806 806 char initiator_wwnstr[MPTSAS_WWN_STRLEN];
807 807 int rval = DDI_FAILURE;
808 808 int i = 0;
809 809 uint8_t numphys = 0;
810 810 uint8_t phy_id;
811 811 uint8_t phy_port = 0;
812 812 uint16_t attached_devhdl = 0;
813 813 uint32_t dev_info;
814 814 uint64_t attached_sas_wwn;
815 815 uint16_t dev_hdl;
816 816 uint16_t pdev_hdl;
817 817 uint16_t bay_num, enclosure, io_flags;
818 818 char attached_wwnstr[MPTSAS_WWN_STRLEN];
819 819
820 820 /* CONSTCOND */
821 821 ASSERT(NO_COMPETING_THREADS);
822 822
823 823 switch (cmd) {
824 824 case DDI_ATTACH:
825 825 break;
826 826
827 827 case DDI_RESUME:
828 828 /*
829 829 * If this a scsi-iport node, nothing to do here.
830 830 */
831 831 return (DDI_SUCCESS);
832 832
833 833 default:
834 834 return (DDI_FAILURE);
835 835 }
836 836
837 837 pdip = ddi_get_parent(dip);
838 838
839 839 if ((hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SCSI_DEVICE)) ==
840 840 NULL) {
841 841 cmn_err(CE_WARN, "Failed attach iport because fail to "
842 842 "get tran vector for the HBA node");
843 843 return (DDI_FAILURE);
844 844 }
845 845
846 846 mpt = TRAN2MPT(hba_tran);
847 847 ASSERT(mpt != NULL);
848 848 if (mpt == NULL)
849 849 return (DDI_FAILURE);
850 850
851 851 if ((hba_tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) ==
852 852 NULL) {
853 853 mptsas_log(mpt, CE_WARN, "Failed attach iport because fail to "
854 854 "get tran vector for the iport node");
855 855 return (DDI_FAILURE);
856 856 }
857 857
858 858 /*
859 859 * Overwrite parent's tran_hba_private to iport's tran vector
860 860 */
861 861 hba_tran->tran_hba_private = mpt;
862 862
863 863 ddi_report_dev(dip);
864 864
865 865 /*
866 866 * Get SAS address for initiator port according dev_handle
867 867 */
868 868 iport = ddi_get_name_addr(dip);
869 869 if (iport && strncmp(iport, "v0", 2) == 0) {
870 870 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
871 871 MPTSAS_VIRTUAL_PORT, 1) !=
872 872 DDI_PROP_SUCCESS) {
873 873 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
874 874 MPTSAS_VIRTUAL_PORT);
875 875 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
876 876 "prop update failed");
877 877 return (DDI_FAILURE);
878 878 }
879 879 return (DDI_SUCCESS);
880 880 }
881 881
882 882 mutex_enter(&mpt->m_mutex);
883 883 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
884 884 bzero(phymask, sizeof (phymask));
885 885 (void) sprintf(phymask,
886 886 "%x", mpt->m_phy_info[i].phy_mask);
887 887 if (strcmp(phymask, iport) == 0) {
888 888 break;
889 889 }
890 890 }
891 891
892 892 if (i == MPTSAS_MAX_PHYS) {
893 893 mptsas_log(mpt, CE_WARN, "Failed attach port %s because port"
894 894 "seems not exist", iport);
895 895 mutex_exit(&mpt->m_mutex);
896 896 return (DDI_FAILURE);
897 897 }
898 898
899 899 phy_mask = mpt->m_phy_info[i].phy_mask;
900 900
901 901 if (mpt->m_phy_info[i].port_flags & AUTO_PORT_CONFIGURATION)
902 902 dynamic_port = 1;
903 903 else
904 904 dynamic_port = 0;
905 905
906 906 /*
907 907 * Update PHY info for smhba
908 908 */
909 909 if (mptsas_smhba_phy_init(mpt)) {
910 910 mutex_exit(&mpt->m_mutex);
911 911 mptsas_log(mpt, CE_WARN, "mptsas phy update "
912 912 "failed");
913 913 return (DDI_FAILURE);
914 914 }
915 915
916 916 mutex_exit(&mpt->m_mutex);
917 917
918 918 numphys = 0;
919 919 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
920 920 if ((phy_mask >> i) & 0x01) {
921 921 numphys++;
922 922 }
923 923 }
924 924
925 925 bzero(initiator_wwnstr, sizeof (initiator_wwnstr));
926 926 (void) sprintf(initiator_wwnstr, "w%016"PRIx64,
927 927 mpt->un.m_base_wwid);
928 928
929 929 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
930 930 SCSI_ADDR_PROP_INITIATOR_PORT, initiator_wwnstr) !=
931 931 DDI_PROP_SUCCESS) {
932 932 (void) ddi_prop_remove(DDI_DEV_T_NONE,
933 933 dip, SCSI_ADDR_PROP_INITIATOR_PORT);
934 934 mptsas_log(mpt, CE_WARN, "mptsas Initiator port "
935 935 "prop update failed");
936 936 return (DDI_FAILURE);
937 937 }
938 938 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
939 939 MPTSAS_NUM_PHYS, numphys) !=
940 940 DDI_PROP_SUCCESS) {
941 941 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, MPTSAS_NUM_PHYS);
942 942 return (DDI_FAILURE);
943 943 }
944 944
945 945 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
946 946 "phymask", phy_mask) !=
947 947 DDI_PROP_SUCCESS) {
948 948 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "phymask");
949 949 mptsas_log(mpt, CE_WARN, "mptsas phy mask "
950 950 "prop update failed");
951 951 return (DDI_FAILURE);
952 952 }
953 953
954 954 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
955 955 "dynamic-port", dynamic_port) !=
956 956 DDI_PROP_SUCCESS) {
957 957 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "dynamic-port");
958 958 mptsas_log(mpt, CE_WARN, "mptsas dynamic port "
959 959 "prop update failed");
960 960 return (DDI_FAILURE);
961 961 }
962 962 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
963 963 MPTSAS_VIRTUAL_PORT, 0) !=
964 964 DDI_PROP_SUCCESS) {
965 965 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
966 966 MPTSAS_VIRTUAL_PORT);
967 967 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
968 968 "prop update failed");
969 969 return (DDI_FAILURE);
970 970 }
971 971 mptsas_smhba_set_all_phy_props(mpt, dip, numphys, phy_mask,
972 972 &attached_devhdl);
973 973
974 974 mutex_enter(&mpt->m_mutex);
975 975 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
976 976 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)attached_devhdl;
977 977 rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
978 978 &attached_sas_wwn, &dev_info, &phy_port, &phy_id,
979 979 &pdev_hdl, &bay_num, &enclosure, &io_flags);
980 980 if (rval != DDI_SUCCESS) {
981 981 mptsas_log(mpt, CE_WARN,
982 982 "Failed to get device page0 for handle:%d",
983 983 attached_devhdl);
984 984 mutex_exit(&mpt->m_mutex);
985 985 return (DDI_FAILURE);
986 986 }
987 987
988 988 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
989 989 bzero(phymask, sizeof (phymask));
990 990 (void) sprintf(phymask, "%x", mpt->m_phy_info[i].phy_mask);
991 991 if (strcmp(phymask, iport) == 0) {
992 992 (void) sprintf(&mpt->m_phy_info[i].smhba_info.path[0],
993 993 "%x",
994 994 mpt->m_phy_info[i].phy_mask);
995 995 }
996 996 }
997 997 mutex_exit(&mpt->m_mutex);
998 998
999 999 bzero(attached_wwnstr, sizeof (attached_wwnstr));
1000 1000 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
1001 1001 attached_sas_wwn);
1002 1002 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
1003 1003 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
1004 1004 DDI_PROP_SUCCESS) {
1005 1005 (void) ddi_prop_remove(DDI_DEV_T_NONE,
1006 1006 dip, SCSI_ADDR_PROP_ATTACHED_PORT);
1007 1007 return (DDI_FAILURE);
1008 1008 }
1009 1009
1010 1010 /* Create kstats for each phy on this iport */
1011 1011
1012 1012 mptsas_create_phy_stats(mpt, iport, dip);
1013 1013
1014 1014 /*
1015 1015 * register sas hba iport with mdi (MPxIO/vhci)
1016 1016 */
1017 1017 if (mdi_phci_register(MDI_HCI_CLASS_SCSI,
1018 1018 dip, 0) == MDI_SUCCESS) {
1019 1019 mpt->m_mpxio_enable = TRUE;
1020 1020 }
1021 1021 return (DDI_SUCCESS);
1022 1022 }
1023 1023
1024 1024 /*
1025 1025 * Notes:
1026 1026 * Set up all device state and allocate data structures,
1027 1027 * mutexes, condition variables, etc. for device operation.
1028 1028 * Add interrupts needed.
1029 1029 * Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
1030 1030 */
1031 1031 static int
1032 1032 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1033 1033 {
1034 1034 mptsas_t *mpt = NULL;
1035 1035 int instance, i, j;
1036 1036 int doneq_thread_num;
1037 1037 char intr_added = 0;
1038 1038 char map_setup = 0;
1039 1039 char config_setup = 0;
1040 1040 char hba_attach_setup = 0;
1041 1041 char smp_attach_setup = 0;
1042 1042 char mutex_init_done = 0;
1043 1043 char event_taskq_create = 0;
1044 1044 char dr_taskq_create = 0;
1045 1045 char doneq_thread_create = 0;
1046 1046 char added_watchdog = 0;
1047 1047 scsi_hba_tran_t *hba_tran;
1048 1048 uint_t mem_bar = MEM_SPACE;
1049 1049 int rval = DDI_FAILURE;
1050 1050
1051 1051 /* CONSTCOND */
1052 1052 ASSERT(NO_COMPETING_THREADS);
1053 1053
1054 1054 if (scsi_hba_iport_unit_address(dip)) {
1055 1055 return (mptsas_iport_attach(dip, cmd));
1056 1056 }
1057 1057
1058 1058 switch (cmd) {
1059 1059 case DDI_ATTACH:
1060 1060 break;
1061 1061
1062 1062 case DDI_RESUME:
1063 1063 if ((hba_tran = ddi_get_driver_private(dip)) == NULL)
1064 1064 return (DDI_FAILURE);
1065 1065
1066 1066 mpt = TRAN2MPT(hba_tran);
1067 1067
1068 1068 if (!mpt) {
1069 1069 return (DDI_FAILURE);
1070 1070 }
1071 1071
1072 1072 /*
1073 1073 * Reset hardware and softc to "no outstanding commands"
1074 1074 * Note that a check condition can result on first command
1075 1075 * to a target.
1076 1076 */
1077 1077 mutex_enter(&mpt->m_mutex);
1078 1078
1079 1079 /*
1080 1080 * raise power.
1081 1081 */
1082 1082 if (mpt->m_options & MPTSAS_OPT_PM) {
1083 1083 mutex_exit(&mpt->m_mutex);
1084 1084 (void) pm_busy_component(dip, 0);
1085 1085 rval = pm_power_has_changed(dip, 0, PM_LEVEL_D0);
1086 1086 if (rval == DDI_SUCCESS) {
1087 1087 mutex_enter(&mpt->m_mutex);
1088 1088 } else {
1089 1089 /*
1090 1090 * The pm_raise_power() call above failed,
1091 1091 * and that can only occur if we were unable
1092 1092 * to reset the hardware. This is probably
1093 1093 * due to unhealty hardware, and because
1094 1094 * important filesystems(such as the root
1095 1095 * filesystem) could be on the attached disks,
1096 1096 * it would not be a good idea to continue,
1097 1097 * as we won't be entirely certain we are
1098 1098 * writing correct data. So we panic() here
1099 1099 * to not only prevent possible data corruption,
1100 1100 * but to give developers or end users a hope
1101 1101 * of identifying and correcting any problems.
1102 1102 */
1103 1103 fm_panic("mptsas could not reset hardware "
1104 1104 "during resume");
1105 1105 }
1106 1106 }
1107 1107
1108 1108 mpt->m_suspended = 0;
1109 1109
1110 1110 /*
1111 1111 * Reinitialize ioc
1112 1112 */
1113 1113 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1114 1114 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
1115 1115 mutex_exit(&mpt->m_mutex);
1116 1116 if (mpt->m_options & MPTSAS_OPT_PM) {
1117 1117 (void) pm_idle_component(dip, 0);
1118 1118 }
1119 1119 fm_panic("mptsas init chip fail during resume");
1120 1120 }
1121 1121 /*
1122 1122 * mptsas_update_driver_data needs interrupts so enable them
1123 1123 * first.
1124 1124 */
1125 1125 MPTSAS_ENABLE_INTR(mpt);
1126 1126 mptsas_update_driver_data(mpt);
1127 1127
1128 1128 /* start requests, if possible */
1129 1129 mptsas_restart_hba(mpt);
1130 1130
1131 1131 mutex_exit(&mpt->m_mutex);
1132 1132
1133 1133 /*
1134 1134 * Restart watch thread
1135 1135 */
1136 1136 mutex_enter(&mptsas_global_mutex);
1137 1137 if (mptsas_timeout_id == 0) {
1138 1138 mptsas_timeout_id = timeout(mptsas_watch, NULL,
1139 1139 mptsas_tick);
1140 1140 mptsas_timeouts_enabled = 1;
1141 1141 }
1142 1142 mutex_exit(&mptsas_global_mutex);
1143 1143
1144 1144 /* report idle status to pm framework */
1145 1145 if (mpt->m_options & MPTSAS_OPT_PM) {
1146 1146 (void) pm_idle_component(dip, 0);
1147 1147 }
1148 1148
1149 1149 return (DDI_SUCCESS);
1150 1150
1151 1151 default:
1152 1152 return (DDI_FAILURE);
1153 1153
1154 1154 }
1155 1155
1156 1156 instance = ddi_get_instance(dip);
1157 1157
1158 1158 /*
1159 1159 * Allocate softc information.
1160 1160 */
1161 1161 if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
1162 1162 mptsas_log(NULL, CE_WARN,
1163 1163 "mptsas%d: cannot allocate soft state", instance);
1164 1164 goto fail;
1165 1165 }
1166 1166
1167 1167 mpt = ddi_get_soft_state(mptsas_state, instance);
1168 1168
1169 1169 if (mpt == NULL) {
1170 1170 mptsas_log(NULL, CE_WARN,
1171 1171 "mptsas%d: cannot get soft state", instance);
1172 1172 goto fail;
1173 1173 }
1174 1174
1175 1175 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
1176 1176 scsi_size_clean(dip);
1177 1177
1178 1178 mpt->m_dip = dip;
1179 1179 mpt->m_instance = instance;
1180 1180
1181 1181 /* Make a per-instance copy of the structures */
1182 1182 mpt->m_io_dma_attr = mptsas_dma_attrs64;
1183 1183 mpt->m_msg_dma_attr = mptsas_dma_attrs;
1184 1184 mpt->m_reg_acc_attr = mptsas_dev_attr;
1185 1185 mpt->m_dev_acc_attr = mptsas_dev_attr;
1186 1186
1187 1187 /*
1188 1188 * Initialize FMA
1189 1189 */
1190 1190 mpt->m_fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, mpt->m_dip,
1191 1191 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
1192 1192 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1193 1193 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1194 1194
1195 1195 mptsas_fm_init(mpt);
1196 1196
1197 1197 if (mptsas_alloc_handshake_msg(mpt,
1198 1198 sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) {
1199 1199 mptsas_log(mpt, CE_WARN, "cannot initialize handshake msg.");
1200 1200 goto fail;
1201 1201 }
1202 1202
1203 1203 /*
1204 1204 * Setup configuration space
1205 1205 */
1206 1206 if (mptsas_config_space_init(mpt) == FALSE) {
1207 1207 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init failed");
1208 1208 goto fail;
1209 1209 }
1210 1210 config_setup++;
1211 1211
1212 1212 if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&mpt->m_reg,
1213 1213 0, 0, &mpt->m_reg_acc_attr, &mpt->m_datap) != DDI_SUCCESS) {
1214 1214 mptsas_log(mpt, CE_WARN, "map setup failed");
1215 1215 goto fail;
1216 1216 }
1217 1217 map_setup++;
1218 1218
1219 1219 /*
1220 1220 * A taskq is created for dealing with the event handler
1221 1221 */
1222 1222 if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
1223 1223 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1224 1224 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
1225 1225 goto fail;
1226 1226 }
1227 1227 event_taskq_create++;
1228 1228
1229 1229 /*
1230 1230 * A taskq is created for dealing with dr events
1231 1231 */
1232 1232 if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1233 1233 "mptsas_dr_taskq",
1234 1234 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1235 1235 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1236 1236 "failed");
1237 1237 goto fail;
1238 1238 }
1239 1239 dr_taskq_create++;
1240 1240
1241 1241 mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1242 1242 0, "mptsas_doneq_thread_threshold_prop", 10);
1243 1243 mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1244 1244 0, "mptsas_doneq_length_threshold_prop", 8);
1245 1245 mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1246 1246 0, "mptsas_doneq_thread_n_prop", 8);
1247 1247
1248 1248 if (mpt->m_doneq_thread_n) {
1249 1249 cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1250 1250 mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1251 1251
1252 1252 mutex_enter(&mpt->m_doneq_mutex);
1253 1253 mpt->m_doneq_thread_id =
1254 1254 kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1255 1255 * mpt->m_doneq_thread_n, KM_SLEEP);
1256 1256
1257 1257 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1258 1258 cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1259 1259 CV_DRIVER, NULL);
1260 1260 mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1261 1261 MUTEX_DRIVER, NULL);
1262 1262 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1263 1263 mpt->m_doneq_thread_id[j].flag |=
1264 1264 MPTSAS_DONEQ_THREAD_ACTIVE;
1265 1265 mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1266 1266 mpt->m_doneq_thread_id[j].arg.t = j;
1267 1267 mpt->m_doneq_thread_id[j].threadp =
1268 1268 thread_create(NULL, 0, mptsas_doneq_thread,
1269 1269 &mpt->m_doneq_thread_id[j].arg,
1270 1270 0, &p0, TS_RUN, minclsyspri);
1271 1271 mpt->m_doneq_thread_id[j].donetail =
1272 1272 &mpt->m_doneq_thread_id[j].doneq;
1273 1273 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1274 1274 }
1275 1275 mutex_exit(&mpt->m_doneq_mutex);
1276 1276 doneq_thread_create++;
1277 1277 }
1278 1278
1279 1279 /*
1280 1280 * Disable hardware interrupt since we're not ready to
1281 1281 * handle it yet.
1282 1282 */
1283 1283 MPTSAS_DISABLE_INTR(mpt);
1284 1284 if (mptsas_register_intrs(mpt) == FALSE)
1285 1285 goto fail;
1286 1286 intr_added++;
1287 1287
1288 1288 /* Initialize mutex used in interrupt handler */
1289 1289 mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1290 1290 DDI_INTR_PRI(mpt->m_intr_pri));
1291 1291 mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1292 1292 mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1293 1293 DDI_INTR_PRI(mpt->m_intr_pri));
1294 1294 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1295 1295 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1296 1296 NULL, MUTEX_DRIVER,
1297 1297 DDI_INTR_PRI(mpt->m_intr_pri));
1298 1298 }
1299 1299
1300 1300 cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1301 1301 cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1302 1302 cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1303 1303 cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1304 1304 cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1305 1305 mutex_init_done++;
1306 1306
1307 1307 mutex_enter(&mpt->m_mutex);
1308 1308 /*
1309 1309 * Initialize power management component
1310 1310 */
1311 1311 if (mpt->m_options & MPTSAS_OPT_PM) {
1312 1312 if (mptsas_init_pm(mpt)) {
1313 1313 mutex_exit(&mpt->m_mutex);
1314 1314 mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1315 1315 "failed");
1316 1316 goto fail;
1317 1317 }
1318 1318 }
1319 1319
↓ open down ↓ |
1319 lines elided |
↑ open up ↑ |
1320 1320 /*
1321 1321 * Initialize chip using Message Unit Reset, if allowed
1322 1322 */
1323 1323 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1324 1324 if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1325 1325 mutex_exit(&mpt->m_mutex);
1326 1326 mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1327 1327 goto fail;
1328 1328 }
1329 1329
1330 + mpt->m_targets = refhash_create(MPTSAS_TARGET_BUCKET_COUNT,
1331 + mptsas_target_addr_hash, mptsas_target_addr_cmp,
1332 + mptsas_target_free, sizeof (mptsas_target_t),
1333 + offsetof(mptsas_target_t, m_link),
1334 + offsetof(mptsas_target_t, m_addr), KM_SLEEP);
1335 +
1330 1336 /*
1331 1337 * Fill in the phy_info structure and get the base WWID
1332 1338 */
1333 1339 if (mptsas_get_manufacture_page5(mpt) == DDI_FAILURE) {
1334 1340 mptsas_log(mpt, CE_WARN,
1335 1341 "mptsas_get_manufacture_page5 failed!");
1336 1342 goto fail;
1337 1343 }
1338 1344
1339 1345 if (mptsas_get_sas_io_unit_page_hndshk(mpt)) {
1340 1346 mptsas_log(mpt, CE_WARN,
1341 1347 "mptsas_get_sas_io_unit_page_hndshk failed!");
1342 1348 goto fail;
1343 1349 }
1344 1350
1345 1351 if (mptsas_get_manufacture_page0(mpt) == DDI_FAILURE) {
1346 1352 mptsas_log(mpt, CE_WARN,
1347 1353 "mptsas_get_manufacture_page0 failed!");
1348 1354 goto fail;
1349 1355 }
1350 1356
1351 1357 mutex_exit(&mpt->m_mutex);
1352 1358
1353 1359 /*
1354 1360 * Register the iport for multiple port HBA
1355 1361 */
1356 1362 mptsas_iport_register(mpt);
1357 1363
1358 1364 /*
1359 1365 * initialize SCSI HBA transport structure
1360 1366 */
1361 1367 if (mptsas_hba_setup(mpt) == FALSE)
1362 1368 goto fail;
1363 1369 hba_attach_setup++;
1364 1370
1365 1371 if (mptsas_smp_setup(mpt) == FALSE)
1366 1372 goto fail;
1367 1373 smp_attach_setup++;
1368 1374
1369 1375 if (mptsas_cache_create(mpt) == FALSE)
1370 1376 goto fail;
1371 1377
1372 1378 mpt->m_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1373 1379 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
1374 1380 if (mpt->m_scsi_reset_delay == 0) {
1375 1381 mptsas_log(mpt, CE_NOTE,
1376 1382 "scsi_reset_delay of 0 is not recommended,"
1377 1383 " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1378 1384 mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1379 1385 }
1380 1386
1381 1387 /*
1382 1388 * Initialize the wait and done FIFO queue
1383 1389 */
1384 1390 mpt->m_donetail = &mpt->m_doneq;
1385 1391 mpt->m_waitqtail = &mpt->m_waitq;
1386 1392 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
1387 1393 mpt->m_tx_draining = 0;
1388 1394
1389 1395 /*
1390 1396 * ioc cmd queue initialize
1391 1397 */
1392 1398 mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1393 1399 mpt->m_dev_handle = 0xFFFF;
1394 1400
1395 1401 MPTSAS_ENABLE_INTR(mpt);
1396 1402
1397 1403 /*
1398 1404 * enable event notification
1399 1405 */
1400 1406 mutex_enter(&mpt->m_mutex);
1401 1407 if (mptsas_ioc_enable_event_notification(mpt)) {
1402 1408 mutex_exit(&mpt->m_mutex);
1403 1409 goto fail;
1404 1410 }
1405 1411 mutex_exit(&mpt->m_mutex);
1406 1412
1407 1413 /*
1408 1414 * used for mptsas_watch
1409 1415 */
1410 1416 mptsas_list_add(mpt);
1411 1417
1412 1418 mutex_enter(&mptsas_global_mutex);
1413 1419 if (mptsas_timeouts_enabled == 0) {
1414 1420 mptsas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
1415 1421 dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
1416 1422
1417 1423 mptsas_tick = mptsas_scsi_watchdog_tick *
1418 1424 drv_usectohz((clock_t)1000000);
1419 1425
1420 1426 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
1421 1427 mptsas_timeouts_enabled = 1;
1422 1428 }
1423 1429 mutex_exit(&mptsas_global_mutex);
1424 1430 added_watchdog++;
1425 1431
1426 1432 /*
1427 1433 * Initialize PHY info for smhba.
1428 1434 * This requires watchdog to be enabled otherwise if interrupts
1429 1435 * don't work the system will hang.
1430 1436 */
1431 1437 if (mptsas_smhba_setup(mpt)) {
1432 1438 mptsas_log(mpt, CE_WARN, "mptsas phy initialization "
1433 1439 "failed");
1434 1440 goto fail;
1435 1441 }
1436 1442
1437 1443 /* Check all dma handles allocated in attach */
1438 1444 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl)
1439 1445 != DDI_SUCCESS) ||
1440 1446 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl)
1441 1447 != DDI_SUCCESS) ||
1442 1448 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl)
1443 1449 != DDI_SUCCESS) ||
1444 1450 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl)
1445 1451 != DDI_SUCCESS) ||
1446 1452 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl)
1447 1453 != DDI_SUCCESS)) {
1448 1454 goto fail;
1449 1455 }
1450 1456
1451 1457 /* Check all acc handles allocated in attach */
1452 1458 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
1453 1459 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl)
1454 1460 != DDI_SUCCESS) ||
1455 1461 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl)
1456 1462 != DDI_SUCCESS) ||
1457 1463 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl)
1458 1464 != DDI_SUCCESS) ||
1459 1465 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl)
1460 1466 != DDI_SUCCESS) ||
1461 1467 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl)
1462 1468 != DDI_SUCCESS) ||
1463 1469 (mptsas_check_acc_handle(mpt->m_config_handle)
1464 1470 != DDI_SUCCESS)) {
1465 1471 goto fail;
1466 1472 }
1467 1473
1468 1474 /*
1469 1475 * After this point, we are not going to fail the attach.
1470 1476 */
1471 1477
1472 1478 /* Print message of HBA present */
1473 1479 ddi_report_dev(dip);
1474 1480
1475 1481 /* report idle status to pm framework */
1476 1482 if (mpt->m_options & MPTSAS_OPT_PM) {
1477 1483 (void) pm_idle_component(dip, 0);
1478 1484 }
1479 1485
1480 1486 return (DDI_SUCCESS);
1481 1487
1482 1488 fail:
1483 1489 mptsas_log(mpt, CE_WARN, "attach failed");
1484 1490 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
1485 1491 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
1486 1492 if (mpt) {
1487 1493 /* deallocate in reverse order */
1488 1494 if (added_watchdog) {
1489 1495 mptsas_list_del(mpt);
1490 1496 mutex_enter(&mptsas_global_mutex);
1491 1497
1492 1498 if (mptsas_timeout_id && (mptsas_head == NULL)) {
1493 1499 timeout_id_t tid = mptsas_timeout_id;
1494 1500 mptsas_timeouts_enabled = 0;
1495 1501 mptsas_timeout_id = 0;
1496 1502 mutex_exit(&mptsas_global_mutex);
1497 1503 (void) untimeout(tid);
1498 1504 mutex_enter(&mptsas_global_mutex);
1499 1505 }
1500 1506 mutex_exit(&mptsas_global_mutex);
1501 1507 }
1502 1508
1503 1509 mptsas_cache_destroy(mpt);
1504 1510
1505 1511 if (smp_attach_setup) {
1506 1512 mptsas_smp_teardown(mpt);
1507 1513 }
1508 1514 if (hba_attach_setup) {
1509 1515 mptsas_hba_teardown(mpt);
1510 1516 }
1511 1517
1512 1518 if (mpt->m_targets)
1513 1519 refhash_destroy(mpt->m_targets);
1514 1520 if (mpt->m_smp_targets)
1515 1521 refhash_destroy(mpt->m_smp_targets);
1516 1522
1517 1523 if (mpt->m_active) {
1518 1524 mptsas_free_active_slots(mpt);
1519 1525 }
1520 1526 if (intr_added) {
1521 1527 mptsas_unregister_intrs(mpt);
1522 1528 }
1523 1529
1524 1530 if (doneq_thread_create) {
1525 1531 mutex_enter(&mpt->m_doneq_mutex);
1526 1532 doneq_thread_num = mpt->m_doneq_thread_n;
1527 1533 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1528 1534 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1529 1535 mpt->m_doneq_thread_id[j].flag &=
1530 1536 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1531 1537 cv_signal(&mpt->m_doneq_thread_id[j].cv);
1532 1538 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1533 1539 }
1534 1540 while (mpt->m_doneq_thread_n) {
1535 1541 cv_wait(&mpt->m_doneq_thread_cv,
1536 1542 &mpt->m_doneq_mutex);
1537 1543 }
1538 1544 for (j = 0; j < doneq_thread_num; j++) {
1539 1545 cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1540 1546 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1541 1547 }
1542 1548 kmem_free(mpt->m_doneq_thread_id,
1543 1549 sizeof (mptsas_doneq_thread_list_t)
1544 1550 * doneq_thread_num);
1545 1551 mutex_exit(&mpt->m_doneq_mutex);
1546 1552 cv_destroy(&mpt->m_doneq_thread_cv);
1547 1553 mutex_destroy(&mpt->m_doneq_mutex);
1548 1554 }
1549 1555 if (event_taskq_create) {
1550 1556 ddi_taskq_destroy(mpt->m_event_taskq);
1551 1557 }
1552 1558 if (dr_taskq_create) {
1553 1559 ddi_taskq_destroy(mpt->m_dr_taskq);
1554 1560 }
1555 1561 if (mutex_init_done) {
1556 1562 mutex_destroy(&mpt->m_tx_waitq_mutex);
1557 1563 mutex_destroy(&mpt->m_passthru_mutex);
1558 1564 mutex_destroy(&mpt->m_mutex);
1559 1565 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1560 1566 mutex_destroy(
1561 1567 &mpt->m_phy_info[i].smhba_info.phy_mutex);
1562 1568 }
1563 1569 cv_destroy(&mpt->m_cv);
1564 1570 cv_destroy(&mpt->m_passthru_cv);
1565 1571 cv_destroy(&mpt->m_fw_cv);
1566 1572 cv_destroy(&mpt->m_config_cv);
1567 1573 cv_destroy(&mpt->m_fw_diag_cv);
1568 1574 }
1569 1575
1570 1576 if (map_setup) {
1571 1577 mptsas_cfg_fini(mpt);
1572 1578 }
1573 1579 if (config_setup) {
1574 1580 mptsas_config_space_fini(mpt);
1575 1581 }
1576 1582 mptsas_free_handshake_msg(mpt);
1577 1583 mptsas_hba_fini(mpt);
1578 1584
1579 1585 mptsas_fm_fini(mpt);
1580 1586 ddi_soft_state_free(mptsas_state, instance);
1581 1587 ddi_prop_remove_all(dip);
1582 1588 }
1583 1589 return (DDI_FAILURE);
1584 1590 }
1585 1591
1586 1592 static int
1587 1593 mptsas_suspend(dev_info_t *devi)
1588 1594 {
1589 1595 mptsas_t *mpt, *g;
1590 1596 scsi_hba_tran_t *tran;
1591 1597
1592 1598 if (scsi_hba_iport_unit_address(devi)) {
1593 1599 return (DDI_SUCCESS);
1594 1600 }
1595 1601
1596 1602 if ((tran = ddi_get_driver_private(devi)) == NULL)
1597 1603 return (DDI_SUCCESS);
1598 1604
1599 1605 mpt = TRAN2MPT(tran);
1600 1606 if (!mpt) {
1601 1607 return (DDI_SUCCESS);
1602 1608 }
1603 1609
1604 1610 mutex_enter(&mpt->m_mutex);
1605 1611
1606 1612 if (mpt->m_suspended++) {
1607 1613 mutex_exit(&mpt->m_mutex);
1608 1614 return (DDI_SUCCESS);
1609 1615 }
1610 1616
1611 1617 /*
1612 1618 * Cancel timeout threads for this mpt
1613 1619 */
1614 1620 if (mpt->m_quiesce_timeid) {
1615 1621 timeout_id_t tid = mpt->m_quiesce_timeid;
1616 1622 mpt->m_quiesce_timeid = 0;
1617 1623 mutex_exit(&mpt->m_mutex);
1618 1624 (void) untimeout(tid);
1619 1625 mutex_enter(&mpt->m_mutex);
1620 1626 }
1621 1627
1622 1628 if (mpt->m_restart_cmd_timeid) {
1623 1629 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1624 1630 mpt->m_restart_cmd_timeid = 0;
1625 1631 mutex_exit(&mpt->m_mutex);
1626 1632 (void) untimeout(tid);
1627 1633 mutex_enter(&mpt->m_mutex);
1628 1634 }
1629 1635
1630 1636 mutex_exit(&mpt->m_mutex);
1631 1637
1632 1638 (void) pm_idle_component(mpt->m_dip, 0);
1633 1639
1634 1640 /*
1635 1641 * Cancel watch threads if all mpts suspended
1636 1642 */
1637 1643 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1638 1644 for (g = mptsas_head; g != NULL; g = g->m_next) {
1639 1645 if (!g->m_suspended)
1640 1646 break;
1641 1647 }
1642 1648 rw_exit(&mptsas_global_rwlock);
1643 1649
1644 1650 mutex_enter(&mptsas_global_mutex);
1645 1651 if (g == NULL) {
1646 1652 timeout_id_t tid;
1647 1653
1648 1654 mptsas_timeouts_enabled = 0;
1649 1655 if (mptsas_timeout_id) {
1650 1656 tid = mptsas_timeout_id;
1651 1657 mptsas_timeout_id = 0;
1652 1658 mutex_exit(&mptsas_global_mutex);
1653 1659 (void) untimeout(tid);
1654 1660 mutex_enter(&mptsas_global_mutex);
1655 1661 }
1656 1662 if (mptsas_reset_watch) {
1657 1663 tid = mptsas_reset_watch;
1658 1664 mptsas_reset_watch = 0;
1659 1665 mutex_exit(&mptsas_global_mutex);
1660 1666 (void) untimeout(tid);
1661 1667 mutex_enter(&mptsas_global_mutex);
1662 1668 }
1663 1669 }
1664 1670 mutex_exit(&mptsas_global_mutex);
1665 1671
1666 1672 mutex_enter(&mpt->m_mutex);
1667 1673
1668 1674 /*
1669 1675 * If this mpt is not in full power(PM_LEVEL_D0), just return.
1670 1676 */
1671 1677 if ((mpt->m_options & MPTSAS_OPT_PM) &&
1672 1678 (mpt->m_power_level != PM_LEVEL_D0)) {
1673 1679 mutex_exit(&mpt->m_mutex);
1674 1680 return (DDI_SUCCESS);
1675 1681 }
1676 1682
1677 1683 /* Disable HBA interrupts in hardware */
1678 1684 MPTSAS_DISABLE_INTR(mpt);
1679 1685 /*
1680 1686 * Send RAID action system shutdown to sync IR
1681 1687 */
1682 1688 mptsas_raid_action_system_shutdown(mpt);
1683 1689
1684 1690 mutex_exit(&mpt->m_mutex);
1685 1691
1686 1692 /* drain the taskq */
1687 1693 ddi_taskq_wait(mpt->m_event_taskq);
1688 1694 ddi_taskq_wait(mpt->m_dr_taskq);
1689 1695
1690 1696 return (DDI_SUCCESS);
1691 1697 }
1692 1698
1693 1699 #ifdef __sparc
1694 1700 /*ARGSUSED*/
1695 1701 static int
1696 1702 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd)
1697 1703 {
1698 1704 mptsas_t *mpt;
1699 1705 scsi_hba_tran_t *tran;
1700 1706
1701 1707 /*
1702 1708 * If this call is for iport, just return.
1703 1709 */
1704 1710 if (scsi_hba_iport_unit_address(devi))
1705 1711 return (DDI_SUCCESS);
1706 1712
1707 1713 if ((tran = ddi_get_driver_private(devi)) == NULL)
1708 1714 return (DDI_SUCCESS);
1709 1715
1710 1716 if ((mpt = TRAN2MPT(tran)) == NULL)
1711 1717 return (DDI_SUCCESS);
1712 1718
1713 1719 /*
1714 1720 * Send RAID action system shutdown to sync IR. Disable HBA
1715 1721 * interrupts in hardware first.
1716 1722 */
1717 1723 MPTSAS_DISABLE_INTR(mpt);
1718 1724 mptsas_raid_action_system_shutdown(mpt);
1719 1725
1720 1726 return (DDI_SUCCESS);
1721 1727 }
1722 1728 #else /* __sparc */
1723 1729 /*
1724 1730 * quiesce(9E) entry point.
1725 1731 *
1726 1732 * This function is called when the system is single-threaded at high
1727 1733 * PIL with preemption disabled. Therefore, this function must not be
1728 1734 * blocked.
1729 1735 *
1730 1736 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1731 1737 * DDI_FAILURE indicates an error condition and should almost never happen.
1732 1738 */
1733 1739 static int
1734 1740 mptsas_quiesce(dev_info_t *devi)
1735 1741 {
1736 1742 mptsas_t *mpt;
1737 1743 scsi_hba_tran_t *tran;
1738 1744
1739 1745 /*
1740 1746 * If this call is for iport, just return.
1741 1747 */
1742 1748 if (scsi_hba_iport_unit_address(devi))
1743 1749 return (DDI_SUCCESS);
1744 1750
1745 1751 if ((tran = ddi_get_driver_private(devi)) == NULL)
1746 1752 return (DDI_SUCCESS);
1747 1753
1748 1754 if ((mpt = TRAN2MPT(tran)) == NULL)
1749 1755 return (DDI_SUCCESS);
1750 1756
1751 1757 /* Disable HBA interrupts in hardware */
1752 1758 MPTSAS_DISABLE_INTR(mpt);
1753 1759 /* Send RAID action system shutdonw to sync IR */
1754 1760 mptsas_raid_action_system_shutdown(mpt);
1755 1761
1756 1762 return (DDI_SUCCESS);
1757 1763 }
1758 1764 #endif /* __sparc */
1759 1765
1760 1766 /*
1761 1767 * detach(9E). Remove all device allocations and system resources;
1762 1768 * disable device interrupts.
1763 1769 * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
1764 1770 */
1765 1771 static int
1766 1772 mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1767 1773 {
1768 1774 /* CONSTCOND */
1769 1775 ASSERT(NO_COMPETING_THREADS);
1770 1776 NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi, (void *)cmd));
1771 1777
1772 1778 switch (cmd) {
1773 1779 case DDI_DETACH:
1774 1780 return (mptsas_do_detach(devi));
1775 1781
1776 1782 case DDI_SUSPEND:
1777 1783 return (mptsas_suspend(devi));
1778 1784
1779 1785 default:
1780 1786 return (DDI_FAILURE);
1781 1787 }
1782 1788 /* NOTREACHED */
1783 1789 }
1784 1790
1785 1791 static int
1786 1792 mptsas_do_detach(dev_info_t *dip)
1787 1793 {
1788 1794 mptsas_t *mpt;
1789 1795 scsi_hba_tran_t *tran;
1790 1796 int circ = 0;
1791 1797 int circ1 = 0;
1792 1798 mdi_pathinfo_t *pip = NULL;
1793 1799 int i;
1794 1800 int doneq_thread_num = 0;
1795 1801
1796 1802 NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1797 1803
1798 1804 if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1799 1805 return (DDI_FAILURE);
1800 1806
1801 1807 mpt = TRAN2MPT(tran);
1802 1808 if (!mpt) {
1803 1809 return (DDI_FAILURE);
1804 1810 }
1805 1811 /*
1806 1812 * Still have pathinfo child, should not detach mpt driver
1807 1813 */
1808 1814 if (scsi_hba_iport_unit_address(dip)) {
1809 1815 if (mpt->m_mpxio_enable) {
1810 1816 /*
1811 1817 * MPxIO enabled for the iport
1812 1818 */
1813 1819 ndi_devi_enter(scsi_vhci_dip, &circ1);
1814 1820 ndi_devi_enter(dip, &circ);
1815 1821 while (pip = mdi_get_next_client_path(dip, NULL)) {
1816 1822 if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1817 1823 continue;
1818 1824 }
1819 1825 ndi_devi_exit(dip, circ);
1820 1826 ndi_devi_exit(scsi_vhci_dip, circ1);
1821 1827 NDBG12(("detach failed because of "
1822 1828 "outstanding path info"));
1823 1829 return (DDI_FAILURE);
1824 1830 }
1825 1831 ndi_devi_exit(dip, circ);
1826 1832 ndi_devi_exit(scsi_vhci_dip, circ1);
1827 1833 (void) mdi_phci_unregister(dip, 0);
1828 1834 }
1829 1835
1830 1836 ddi_prop_remove_all(dip);
1831 1837
1832 1838 return (DDI_SUCCESS);
1833 1839 }
1834 1840
1835 1841 /* Make sure power level is D0 before accessing registers */
1836 1842 if (mpt->m_options & MPTSAS_OPT_PM) {
1837 1843 (void) pm_busy_component(dip, 0);
1838 1844 if (mpt->m_power_level != PM_LEVEL_D0) {
1839 1845 if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1840 1846 DDI_SUCCESS) {
1841 1847 mptsas_log(mpt, CE_WARN,
1842 1848 "mptsas%d: Raise power request failed.",
1843 1849 mpt->m_instance);
1844 1850 (void) pm_idle_component(dip, 0);
1845 1851 return (DDI_FAILURE);
1846 1852 }
1847 1853 }
1848 1854 }
1849 1855
1850 1856 /*
1851 1857 * Send RAID action system shutdown to sync IR. After action, send a
1852 1858 * Message Unit Reset. Since after that DMA resource will be freed,
1853 1859 * set ioc to READY state will avoid HBA initiated DMA operation.
1854 1860 */
1855 1861 mutex_enter(&mpt->m_mutex);
1856 1862 MPTSAS_DISABLE_INTR(mpt);
1857 1863 mptsas_raid_action_system_shutdown(mpt);
1858 1864 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1859 1865 (void) mptsas_ioc_reset(mpt, FALSE);
1860 1866 mutex_exit(&mpt->m_mutex);
1861 1867 mptsas_rem_intrs(mpt);
1862 1868 ddi_taskq_destroy(mpt->m_event_taskq);
1863 1869 ddi_taskq_destroy(mpt->m_dr_taskq);
1864 1870
1865 1871 if (mpt->m_doneq_thread_n) {
1866 1872 mutex_enter(&mpt->m_doneq_mutex);
1867 1873 doneq_thread_num = mpt->m_doneq_thread_n;
1868 1874 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1869 1875 mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1870 1876 mpt->m_doneq_thread_id[i].flag &=
1871 1877 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1872 1878 cv_signal(&mpt->m_doneq_thread_id[i].cv);
1873 1879 mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1874 1880 }
1875 1881 while (mpt->m_doneq_thread_n) {
1876 1882 cv_wait(&mpt->m_doneq_thread_cv,
1877 1883 &mpt->m_doneq_mutex);
1878 1884 }
1879 1885 for (i = 0; i < doneq_thread_num; i++) {
1880 1886 cv_destroy(&mpt->m_doneq_thread_id[i].cv);
1881 1887 mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);
1882 1888 }
1883 1889 kmem_free(mpt->m_doneq_thread_id,
1884 1890 sizeof (mptsas_doneq_thread_list_t)
1885 1891 * doneq_thread_num);
1886 1892 mutex_exit(&mpt->m_doneq_mutex);
1887 1893 cv_destroy(&mpt->m_doneq_thread_cv);
1888 1894 mutex_destroy(&mpt->m_doneq_mutex);
1889 1895 }
1890 1896
1891 1897 scsi_hba_reset_notify_tear_down(mpt->m_reset_notify_listf);
1892 1898
1893 1899 mptsas_list_del(mpt);
1894 1900
1895 1901 /*
1896 1902 * Cancel timeout threads for this mpt
1897 1903 */
1898 1904 mutex_enter(&mpt->m_mutex);
1899 1905 if (mpt->m_quiesce_timeid) {
1900 1906 timeout_id_t tid = mpt->m_quiesce_timeid;
1901 1907 mpt->m_quiesce_timeid = 0;
1902 1908 mutex_exit(&mpt->m_mutex);
1903 1909 (void) untimeout(tid);
1904 1910 mutex_enter(&mpt->m_mutex);
1905 1911 }
1906 1912
1907 1913 if (mpt->m_restart_cmd_timeid) {
1908 1914 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1909 1915 mpt->m_restart_cmd_timeid = 0;
1910 1916 mutex_exit(&mpt->m_mutex);
1911 1917 (void) untimeout(tid);
1912 1918 mutex_enter(&mpt->m_mutex);
1913 1919 }
1914 1920
1915 1921 mutex_exit(&mpt->m_mutex);
1916 1922
1917 1923 /*
1918 1924 * last mpt? ... if active, CANCEL watch threads.
1919 1925 */
1920 1926 mutex_enter(&mptsas_global_mutex);
1921 1927 if (mptsas_head == NULL) {
1922 1928 timeout_id_t tid;
1923 1929 /*
1924 1930 * Clear mptsas_timeouts_enable so that the watch thread
1925 1931 * gets restarted on DDI_ATTACH
1926 1932 */
1927 1933 mptsas_timeouts_enabled = 0;
1928 1934 if (mptsas_timeout_id) {
1929 1935 tid = mptsas_timeout_id;
1930 1936 mptsas_timeout_id = 0;
1931 1937 mutex_exit(&mptsas_global_mutex);
1932 1938 (void) untimeout(tid);
1933 1939 mutex_enter(&mptsas_global_mutex);
1934 1940 }
1935 1941 if (mptsas_reset_watch) {
1936 1942 tid = mptsas_reset_watch;
1937 1943 mptsas_reset_watch = 0;
1938 1944 mutex_exit(&mptsas_global_mutex);
1939 1945 (void) untimeout(tid);
1940 1946 mutex_enter(&mptsas_global_mutex);
1941 1947 }
1942 1948 }
1943 1949 mutex_exit(&mptsas_global_mutex);
1944 1950
1945 1951 /*
1946 1952 * Delete Phy stats
1947 1953 */
1948 1954 mptsas_destroy_phy_stats(mpt);
1949 1955
1950 1956 mptsas_destroy_hashes(mpt);
1951 1957
1952 1958 /*
1953 1959 * Delete nt_active.
1954 1960 */
1955 1961 mutex_enter(&mpt->m_mutex);
1956 1962 mptsas_free_active_slots(mpt);
1957 1963 mutex_exit(&mpt->m_mutex);
1958 1964
1959 1965 /* deallocate everything that was allocated in mptsas_attach */
1960 1966 mptsas_cache_destroy(mpt);
1961 1967
1962 1968 mptsas_hba_fini(mpt);
1963 1969 mptsas_cfg_fini(mpt);
1964 1970
1965 1971 /* Lower the power informing PM Framework */
1966 1972 if (mpt->m_options & MPTSAS_OPT_PM) {
1967 1973 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
1968 1974 mptsas_log(mpt, CE_WARN,
1969 1975 "!mptsas%d: Lower power request failed "
1970 1976 "during detach, ignoring.",
1971 1977 mpt->m_instance);
1972 1978 }
1973 1979
1974 1980 mutex_destroy(&mpt->m_tx_waitq_mutex);
1975 1981 mutex_destroy(&mpt->m_passthru_mutex);
1976 1982 mutex_destroy(&mpt->m_mutex);
1977 1983 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1978 1984 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
1979 1985 }
1980 1986 cv_destroy(&mpt->m_cv);
1981 1987 cv_destroy(&mpt->m_passthru_cv);
1982 1988 cv_destroy(&mpt->m_fw_cv);
1983 1989 cv_destroy(&mpt->m_config_cv);
1984 1990 cv_destroy(&mpt->m_fw_diag_cv);
1985 1991
1986 1992
1987 1993 mptsas_smp_teardown(mpt);
1988 1994 mptsas_hba_teardown(mpt);
1989 1995
1990 1996 mptsas_config_space_fini(mpt);
1991 1997
1992 1998 mptsas_free_handshake_msg(mpt);
1993 1999
1994 2000 mptsas_fm_fini(mpt);
1995 2001 ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
1996 2002 ddi_prop_remove_all(dip);
1997 2003
1998 2004 return (DDI_SUCCESS);
1999 2005 }
2000 2006
2001 2007 static void
2002 2008 mptsas_list_add(mptsas_t *mpt)
2003 2009 {
2004 2010 rw_enter(&mptsas_global_rwlock, RW_WRITER);
2005 2011
2006 2012 if (mptsas_head == NULL) {
2007 2013 mptsas_head = mpt;
2008 2014 } else {
2009 2015 mptsas_tail->m_next = mpt;
2010 2016 }
2011 2017 mptsas_tail = mpt;
2012 2018 rw_exit(&mptsas_global_rwlock);
2013 2019 }
2014 2020
2015 2021 static void
2016 2022 mptsas_list_del(mptsas_t *mpt)
2017 2023 {
2018 2024 mptsas_t *m;
2019 2025 /*
2020 2026 * Remove device instance from the global linked list
2021 2027 */
2022 2028 rw_enter(&mptsas_global_rwlock, RW_WRITER);
2023 2029 if (mptsas_head == mpt) {
2024 2030 m = mptsas_head = mpt->m_next;
2025 2031 } else {
2026 2032 for (m = mptsas_head; m != NULL; m = m->m_next) {
2027 2033 if (m->m_next == mpt) {
2028 2034 m->m_next = mpt->m_next;
2029 2035 break;
2030 2036 }
2031 2037 }
2032 2038 if (m == NULL) {
2033 2039 mptsas_log(mpt, CE_PANIC, "Not in softc list!");
2034 2040 }
2035 2041 }
2036 2042
2037 2043 if (mptsas_tail == mpt) {
2038 2044 mptsas_tail = m;
2039 2045 }
2040 2046 rw_exit(&mptsas_global_rwlock);
2041 2047 }
2042 2048
2043 2049 static int
2044 2050 mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size)
2045 2051 {
2046 2052 ddi_dma_attr_t task_dma_attrs;
2047 2053
2048 2054 mpt->m_hshk_dma_size = 0;
2049 2055 task_dma_attrs = mpt->m_msg_dma_attr;
2050 2056 task_dma_attrs.dma_attr_sgllen = 1;
2051 2057 task_dma_attrs.dma_attr_granular = (uint32_t)(alloc_size);
2052 2058
2053 2059 /* allocate Task Management ddi_dma resources */
2054 2060 if (mptsas_dma_addr_create(mpt, task_dma_attrs,
2055 2061 &mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl, &mpt->m_hshk_memp,
2056 2062 alloc_size, NULL) == FALSE) {
2057 2063 return (DDI_FAILURE);
2058 2064 }
2059 2065 mpt->m_hshk_dma_size = alloc_size;
2060 2066
2061 2067 return (DDI_SUCCESS);
2062 2068 }
2063 2069
2064 2070 static void
2065 2071 mptsas_free_handshake_msg(mptsas_t *mpt)
2066 2072 {
2067 2073 if (mpt->m_hshk_dma_size == 0)
2068 2074 return;
2069 2075 mptsas_dma_addr_destroy(&mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl);
2070 2076 mpt->m_hshk_dma_size = 0;
2071 2077 }
2072 2078
2073 2079 static int
2074 2080 mptsas_hba_setup(mptsas_t *mpt)
2075 2081 {
2076 2082 scsi_hba_tran_t *hba_tran;
2077 2083 int tran_flags;
2078 2084
2079 2085 /* Allocate a transport structure */
2080 2086 hba_tran = mpt->m_tran = scsi_hba_tran_alloc(mpt->m_dip,
2081 2087 SCSI_HBA_CANSLEEP);
2082 2088 ASSERT(mpt->m_tran != NULL);
2083 2089
2084 2090 hba_tran->tran_hba_private = mpt;
2085 2091 hba_tran->tran_tgt_private = NULL;
2086 2092
2087 2093 hba_tran->tran_tgt_init = mptsas_scsi_tgt_init;
2088 2094 hba_tran->tran_tgt_free = mptsas_scsi_tgt_free;
2089 2095
2090 2096 hba_tran->tran_start = mptsas_scsi_start;
2091 2097 hba_tran->tran_reset = mptsas_scsi_reset;
2092 2098 hba_tran->tran_abort = mptsas_scsi_abort;
2093 2099 hba_tran->tran_getcap = mptsas_scsi_getcap;
2094 2100 hba_tran->tran_setcap = mptsas_scsi_setcap;
2095 2101 hba_tran->tran_init_pkt = mptsas_scsi_init_pkt;
2096 2102 hba_tran->tran_destroy_pkt = mptsas_scsi_destroy_pkt;
2097 2103
2098 2104 hba_tran->tran_dmafree = mptsas_scsi_dmafree;
2099 2105 hba_tran->tran_sync_pkt = mptsas_scsi_sync_pkt;
2100 2106 hba_tran->tran_reset_notify = mptsas_scsi_reset_notify;
2101 2107
2102 2108 hba_tran->tran_get_bus_addr = mptsas_get_bus_addr;
2103 2109 hba_tran->tran_get_name = mptsas_get_name;
2104 2110
2105 2111 hba_tran->tran_quiesce = mptsas_scsi_quiesce;
2106 2112 hba_tran->tran_unquiesce = mptsas_scsi_unquiesce;
2107 2113 hba_tran->tran_bus_reset = NULL;
2108 2114
2109 2115 hba_tran->tran_add_eventcall = NULL;
2110 2116 hba_tran->tran_get_eventcookie = NULL;
2111 2117 hba_tran->tran_post_event = NULL;
2112 2118 hba_tran->tran_remove_eventcall = NULL;
2113 2119
2114 2120 hba_tran->tran_bus_config = mptsas_bus_config;
2115 2121
2116 2122 hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
2117 2123
2118 2124 /*
2119 2125 * All children of the HBA are iports. We need tran was cloned.
2120 2126 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
2121 2127 * inherited to iport's tran vector.
2122 2128 */
2123 2129 tran_flags = (SCSI_HBA_HBA | SCSI_HBA_TRAN_CLONE);
2124 2130
2125 2131 if (scsi_hba_attach_setup(mpt->m_dip, &mpt->m_msg_dma_attr,
2126 2132 hba_tran, tran_flags) != DDI_SUCCESS) {
2127 2133 mptsas_log(mpt, CE_WARN, "hba attach setup failed");
2128 2134 scsi_hba_tran_free(hba_tran);
2129 2135 mpt->m_tran = NULL;
2130 2136 return (FALSE);
2131 2137 }
2132 2138 return (TRUE);
2133 2139 }
2134 2140
2135 2141 static void
2136 2142 mptsas_hba_teardown(mptsas_t *mpt)
2137 2143 {
2138 2144 (void) scsi_hba_detach(mpt->m_dip);
2139 2145 if (mpt->m_tran != NULL) {
2140 2146 scsi_hba_tran_free(mpt->m_tran);
2141 2147 mpt->m_tran = NULL;
2142 2148 }
2143 2149 }
2144 2150
2145 2151 static void
2146 2152 mptsas_iport_register(mptsas_t *mpt)
2147 2153 {
2148 2154 int i, j;
2149 2155 mptsas_phymask_t mask = 0x0;
2150 2156 /*
2151 2157 * initial value of mask is 0
2152 2158 */
2153 2159 mutex_enter(&mpt->m_mutex);
2154 2160 for (i = 0; i < mpt->m_num_phys; i++) {
2155 2161 mptsas_phymask_t phy_mask = 0x0;
2156 2162 char phy_mask_name[MPTSAS_MAX_PHYS];
2157 2163 uint8_t current_port;
2158 2164
2159 2165 if (mpt->m_phy_info[i].attached_devhdl == 0)
2160 2166 continue;
2161 2167
2162 2168 bzero(phy_mask_name, sizeof (phy_mask_name));
2163 2169
2164 2170 current_port = mpt->m_phy_info[i].port_num;
2165 2171
2166 2172 if ((mask & (1 << i)) != 0)
2167 2173 continue;
2168 2174
2169 2175 for (j = 0; j < mpt->m_num_phys; j++) {
2170 2176 if (mpt->m_phy_info[j].attached_devhdl &&
2171 2177 (mpt->m_phy_info[j].port_num == current_port)) {
2172 2178 phy_mask |= (1 << j);
2173 2179 }
2174 2180 }
2175 2181 mask = mask | phy_mask;
2176 2182
2177 2183 for (j = 0; j < mpt->m_num_phys; j++) {
2178 2184 if ((phy_mask >> j) & 0x01) {
2179 2185 mpt->m_phy_info[j].phy_mask = phy_mask;
2180 2186 }
2181 2187 }
2182 2188
2183 2189 (void) sprintf(phy_mask_name, "%x", phy_mask);
2184 2190
2185 2191 mutex_exit(&mpt->m_mutex);
2186 2192 /*
2187 2193 * register a iport
2188 2194 */
2189 2195 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
2190 2196 mutex_enter(&mpt->m_mutex);
2191 2197 }
2192 2198 mutex_exit(&mpt->m_mutex);
2193 2199 /*
2194 2200 * register a virtual port for RAID volume always
2195 2201 */
2196 2202 (void) scsi_hba_iport_register(mpt->m_dip, "v0");
2197 2203
2198 2204 }
2199 2205
2200 2206 static int
2201 2207 mptsas_smp_setup(mptsas_t *mpt)
2202 2208 {
2203 2209 mpt->m_smptran = smp_hba_tran_alloc(mpt->m_dip);
2204 2210 ASSERT(mpt->m_smptran != NULL);
2205 2211 mpt->m_smptran->smp_tran_hba_private = mpt;
2206 2212 mpt->m_smptran->smp_tran_start = mptsas_smp_start;
2207 2213 if (smp_hba_attach_setup(mpt->m_dip, mpt->m_smptran) != DDI_SUCCESS) {
2208 2214 mptsas_log(mpt, CE_WARN, "smp attach setup failed");
2209 2215 smp_hba_tran_free(mpt->m_smptran);
2210 2216 mpt->m_smptran = NULL;
2211 2217 return (FALSE);
2212 2218 }
2213 2219 /*
2214 2220 * Initialize smp hash table
2215 2221 */
2216 2222 mpt->m_smp_targets = refhash_create(MPTSAS_SMP_BUCKET_COUNT,
2217 2223 mptsas_target_addr_hash, mptsas_target_addr_cmp,
2218 2224 mptsas_smp_free, sizeof (mptsas_smp_t),
2219 2225 offsetof(mptsas_smp_t, m_link), offsetof(mptsas_smp_t, m_addr),
2220 2226 KM_SLEEP);
2221 2227 mpt->m_smp_devhdl = 0xFFFF;
2222 2228
2223 2229 return (TRUE);
2224 2230 }
2225 2231
2226 2232 static void
2227 2233 mptsas_smp_teardown(mptsas_t *mpt)
2228 2234 {
2229 2235 (void) smp_hba_detach(mpt->m_dip);
2230 2236 if (mpt->m_smptran != NULL) {
2231 2237 smp_hba_tran_free(mpt->m_smptran);
2232 2238 mpt->m_smptran = NULL;
2233 2239 }
2234 2240 mpt->m_smp_devhdl = 0;
2235 2241 }
2236 2242
2237 2243 static int
2238 2244 mptsas_cache_create(mptsas_t *mpt)
2239 2245 {
2240 2246 int instance = mpt->m_instance;
2241 2247 char buf[64];
2242 2248
2243 2249 /*
2244 2250 * create kmem cache for packets
2245 2251 */
2246 2252 (void) sprintf(buf, "mptsas%d_cache", instance);
2247 2253 mpt->m_kmem_cache = kmem_cache_create(buf,
2248 2254 sizeof (struct mptsas_cmd) + scsi_pkt_size(), 8,
2249 2255 mptsas_kmem_cache_constructor, mptsas_kmem_cache_destructor,
2250 2256 NULL, (void *)mpt, NULL, 0);
2251 2257
2252 2258 if (mpt->m_kmem_cache == NULL) {
2253 2259 mptsas_log(mpt, CE_WARN, "creating kmem cache failed");
2254 2260 return (FALSE);
2255 2261 }
2256 2262
2257 2263 /*
2258 2264 * create kmem cache for extra SGL frames if SGL cannot
2259 2265 * be accomodated into main request frame.
2260 2266 */
2261 2267 (void) sprintf(buf, "mptsas%d_cache_frames", instance);
2262 2268 mpt->m_cache_frames = kmem_cache_create(buf,
2263 2269 sizeof (mptsas_cache_frames_t), 8,
2264 2270 mptsas_cache_frames_constructor, mptsas_cache_frames_destructor,
2265 2271 NULL, (void *)mpt, NULL, 0);
2266 2272
2267 2273 if (mpt->m_cache_frames == NULL) {
2268 2274 mptsas_log(mpt, CE_WARN, "creating cache for frames failed");
2269 2275 return (FALSE);
2270 2276 }
2271 2277
2272 2278 return (TRUE);
2273 2279 }
2274 2280
2275 2281 static void
2276 2282 mptsas_cache_destroy(mptsas_t *mpt)
2277 2283 {
2278 2284 /* deallocate in reverse order */
2279 2285 if (mpt->m_cache_frames) {
2280 2286 kmem_cache_destroy(mpt->m_cache_frames);
2281 2287 mpt->m_cache_frames = NULL;
2282 2288 }
2283 2289 if (mpt->m_kmem_cache) {
2284 2290 kmem_cache_destroy(mpt->m_kmem_cache);
2285 2291 mpt->m_kmem_cache = NULL;
2286 2292 }
2287 2293 }
2288 2294
2289 2295 static int
2290 2296 mptsas_power(dev_info_t *dip, int component, int level)
2291 2297 {
2292 2298 #ifndef __lock_lint
2293 2299 _NOTE(ARGUNUSED(component))
2294 2300 #endif
2295 2301 mptsas_t *mpt;
2296 2302 int rval = DDI_SUCCESS;
2297 2303 int polls = 0;
2298 2304 uint32_t ioc_status;
2299 2305
2300 2306 if (scsi_hba_iport_unit_address(dip) != 0)
2301 2307 return (DDI_SUCCESS);
2302 2308
2303 2309 mpt = ddi_get_soft_state(mptsas_state, ddi_get_instance(dip));
2304 2310 if (mpt == NULL) {
2305 2311 return (DDI_FAILURE);
2306 2312 }
2307 2313
2308 2314 mutex_enter(&mpt->m_mutex);
2309 2315
2310 2316 /*
2311 2317 * If the device is busy, don't lower its power level
2312 2318 */
2313 2319 if (mpt->m_busy && (mpt->m_power_level > level)) {
2314 2320 mutex_exit(&mpt->m_mutex);
2315 2321 return (DDI_FAILURE);
2316 2322 }
2317 2323 switch (level) {
2318 2324 case PM_LEVEL_D0:
2319 2325 NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
2320 2326 MPTSAS_POWER_ON(mpt);
2321 2327 /*
2322 2328 * Wait up to 30 seconds for IOC to come out of reset.
2323 2329 */
2324 2330 while (((ioc_status = ddi_get32(mpt->m_datap,
2325 2331 &mpt->m_reg->Doorbell)) &
2326 2332 MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2327 2333 if (polls++ > 3000) {
2328 2334 break;
2329 2335 }
2330 2336 delay(drv_usectohz(10000));
2331 2337 }
2332 2338 /*
2333 2339 * If IOC is not in operational state, try to hard reset it.
2334 2340 */
2335 2341 if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2336 2342 MPI2_IOC_STATE_OPERATIONAL) {
2337 2343 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2338 2344 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2339 2345 mptsas_log(mpt, CE_WARN,
2340 2346 "mptsas_power: hard reset failed");
2341 2347 mutex_exit(&mpt->m_mutex);
2342 2348 return (DDI_FAILURE);
2343 2349 }
2344 2350 }
2345 2351 mpt->m_power_level = PM_LEVEL_D0;
2346 2352 break;
2347 2353 case PM_LEVEL_D3:
2348 2354 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2349 2355 MPTSAS_POWER_OFF(mpt);
2350 2356 break;
2351 2357 default:
2352 2358 mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2353 2359 mpt->m_instance, level);
2354 2360 rval = DDI_FAILURE;
2355 2361 break;
2356 2362 }
2357 2363 mutex_exit(&mpt->m_mutex);
2358 2364 return (rval);
2359 2365 }
2360 2366
2361 2367 /*
2362 2368 * Initialize configuration space and figure out which
2363 2369 * chip and revison of the chip the mpt driver is using.
2364 2370 */
2365 2371 static int
2366 2372 mptsas_config_space_init(mptsas_t *mpt)
2367 2373 {
2368 2374 NDBG0(("mptsas_config_space_init"));
2369 2375
2370 2376 if (mpt->m_config_handle != NULL)
2371 2377 return (TRUE);
2372 2378
2373 2379 if (pci_config_setup(mpt->m_dip,
2374 2380 &mpt->m_config_handle) != DDI_SUCCESS) {
2375 2381 mptsas_log(mpt, CE_WARN, "cannot map configuration space.");
2376 2382 return (FALSE);
2377 2383 }
2378 2384
2379 2385 /*
2380 2386 * This is a workaround for a XMITS ASIC bug which does not
2381 2387 * drive the CBE upper bits.
2382 2388 */
2383 2389 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) &
2384 2390 PCI_STAT_PERROR) {
2385 2391 pci_config_put16(mpt->m_config_handle, PCI_CONF_STAT,
2386 2392 PCI_STAT_PERROR);
2387 2393 }
2388 2394
2389 2395 mptsas_setup_cmd_reg(mpt);
2390 2396
2391 2397 /*
2392 2398 * Get the chip device id:
2393 2399 */
2394 2400 mpt->m_devid = pci_config_get16(mpt->m_config_handle, PCI_CONF_DEVID);
2395 2401
2396 2402 /*
2397 2403 * Save the revision.
2398 2404 */
2399 2405 mpt->m_revid = pci_config_get8(mpt->m_config_handle, PCI_CONF_REVID);
2400 2406
2401 2407 /*
2402 2408 * Save the SubSystem Vendor and Device IDs
2403 2409 */
2404 2410 mpt->m_svid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBVENID);
2405 2411 mpt->m_ssid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBSYSID);
2406 2412
2407 2413 /*
2408 2414 * Set the latency timer to 0x40 as specified by the upa -> pci
2409 2415 * bridge chip design team. This may be done by the sparc pci
2410 2416 * bus nexus driver, but the driver should make sure the latency
2411 2417 * timer is correct for performance reasons.
2412 2418 */
2413 2419 pci_config_put8(mpt->m_config_handle, PCI_CONF_LATENCY_TIMER,
2414 2420 MPTSAS_LATENCY_TIMER);
2415 2421
2416 2422 (void) mptsas_get_pci_cap(mpt);
2417 2423 return (TRUE);
2418 2424 }
2419 2425
2420 2426 static void
2421 2427 mptsas_config_space_fini(mptsas_t *mpt)
2422 2428 {
2423 2429 if (mpt->m_config_handle != NULL) {
2424 2430 mptsas_disable_bus_master(mpt);
2425 2431 pci_config_teardown(&mpt->m_config_handle);
2426 2432 mpt->m_config_handle = NULL;
2427 2433 }
2428 2434 }
2429 2435
2430 2436 static void
2431 2437 mptsas_setup_cmd_reg(mptsas_t *mpt)
2432 2438 {
2433 2439 ushort_t cmdreg;
2434 2440
2435 2441 /*
2436 2442 * Set the command register to the needed values.
2437 2443 */
2438 2444 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2439 2445 cmdreg |= (PCI_COMM_ME | PCI_COMM_SERR_ENABLE |
2440 2446 PCI_COMM_PARITY_DETECT | PCI_COMM_MAE);
2441 2447 cmdreg &= ~PCI_COMM_IO;
2442 2448 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2443 2449 }
2444 2450
2445 2451 static void
2446 2452 mptsas_disable_bus_master(mptsas_t *mpt)
2447 2453 {
2448 2454 ushort_t cmdreg;
2449 2455
2450 2456 /*
2451 2457 * Clear the master enable bit in the PCI command register.
2452 2458 * This prevents any bus mastering activity like DMA.
2453 2459 */
2454 2460 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2455 2461 cmdreg &= ~PCI_COMM_ME;
2456 2462 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2457 2463 }
2458 2464
2459 2465 int
2460 2466 mptsas_dma_alloc(mptsas_t *mpt, mptsas_dma_alloc_state_t *dma_statep)
2461 2467 {
2462 2468 ddi_dma_attr_t attrs;
2463 2469
2464 2470 attrs = mpt->m_io_dma_attr;
2465 2471 attrs.dma_attr_sgllen = 1;
2466 2472
2467 2473 ASSERT(dma_statep != NULL);
2468 2474
2469 2475 if (mptsas_dma_addr_create(mpt, attrs, &dma_statep->handle,
2470 2476 &dma_statep->accessp, &dma_statep->memp, dma_statep->size,
2471 2477 &dma_statep->cookie) == FALSE) {
2472 2478 return (DDI_FAILURE);
2473 2479 }
2474 2480
2475 2481 return (DDI_SUCCESS);
2476 2482 }
2477 2483
2478 2484 void
2479 2485 mptsas_dma_free(mptsas_dma_alloc_state_t *dma_statep)
2480 2486 {
2481 2487 ASSERT(dma_statep != NULL);
2482 2488 mptsas_dma_addr_destroy(&dma_statep->handle, &dma_statep->accessp);
2483 2489 dma_statep->size = 0;
2484 2490 }
2485 2491
2486 2492 int
2487 2493 mptsas_do_dma(mptsas_t *mpt, uint32_t size, int var, int (*callback)())
2488 2494 {
2489 2495 ddi_dma_attr_t attrs;
2490 2496 ddi_dma_handle_t dma_handle;
2491 2497 caddr_t memp;
2492 2498 ddi_acc_handle_t accessp;
2493 2499 int rval;
2494 2500
2495 2501 ASSERT(mutex_owned(&mpt->m_mutex));
2496 2502
2497 2503 attrs = mpt->m_msg_dma_attr;
2498 2504 attrs.dma_attr_sgllen = 1;
2499 2505 attrs.dma_attr_granular = size;
2500 2506
2501 2507 if (mptsas_dma_addr_create(mpt, attrs, &dma_handle,
2502 2508 &accessp, &memp, size, NULL) == FALSE) {
2503 2509 return (DDI_FAILURE);
2504 2510 }
2505 2511
2506 2512 rval = (*callback) (mpt, memp, var, accessp);
2507 2513
2508 2514 if ((mptsas_check_dma_handle(dma_handle) != DDI_SUCCESS) ||
2509 2515 (mptsas_check_acc_handle(accessp) != DDI_SUCCESS)) {
2510 2516 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
2511 2517 rval = DDI_FAILURE;
2512 2518 }
2513 2519
2514 2520 mptsas_dma_addr_destroy(&dma_handle, &accessp);
2515 2521 return (rval);
2516 2522
2517 2523 }
2518 2524
2519 2525 static int
2520 2526 mptsas_alloc_request_frames(mptsas_t *mpt)
2521 2527 {
2522 2528 ddi_dma_attr_t frame_dma_attrs;
2523 2529 caddr_t memp;
2524 2530 ddi_dma_cookie_t cookie;
2525 2531 size_t mem_size;
2526 2532
2527 2533 /*
2528 2534 * re-alloc when it has already alloced
2529 2535 */
2530 2536 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2531 2537 &mpt->m_acc_req_frame_hdl);
2532 2538
2533 2539 /*
2534 2540 * The size of the request frame pool is:
2535 2541 * Number of Request Frames * Request Frame Size
2536 2542 */
2537 2543 mem_size = mpt->m_max_requests * mpt->m_req_frame_size;
2538 2544
2539 2545 /*
2540 2546 * set the DMA attributes. System Request Message Frames must be
2541 2547 * aligned on a 16-byte boundry.
2542 2548 */
2543 2549 frame_dma_attrs = mpt->m_msg_dma_attr;
2544 2550 frame_dma_attrs.dma_attr_align = 16;
2545 2551 frame_dma_attrs.dma_attr_sgllen = 1;
2546 2552
2547 2553 /*
2548 2554 * allocate the request frame pool.
2549 2555 */
2550 2556 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2551 2557 &mpt->m_dma_req_frame_hdl, &mpt->m_acc_req_frame_hdl, &memp,
2552 2558 mem_size, &cookie) == FALSE) {
2553 2559 return (DDI_FAILURE);
2554 2560 }
2555 2561
2556 2562 /*
2557 2563 * Store the request frame memory address. This chip uses this
2558 2564 * address to dma to and from the driver's frame. The second
2559 2565 * address is the address mpt uses to fill in the frame.
2560 2566 */
2561 2567 mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2562 2568 mpt->m_req_frame = memp;
2563 2569
2564 2570 /*
2565 2571 * Clear the request frame pool.
2566 2572 */
2567 2573 bzero(mpt->m_req_frame, mem_size);
2568 2574
2569 2575 return (DDI_SUCCESS);
2570 2576 }
2571 2577
2572 2578 static int
2573 2579 mptsas_alloc_reply_frames(mptsas_t *mpt)
2574 2580 {
2575 2581 ddi_dma_attr_t frame_dma_attrs;
2576 2582 caddr_t memp;
2577 2583 ddi_dma_cookie_t cookie;
2578 2584 size_t mem_size;
2579 2585
2580 2586 /*
2581 2587 * re-alloc when it has already alloced
2582 2588 */
2583 2589 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2584 2590 &mpt->m_acc_reply_frame_hdl);
2585 2591
2586 2592 /*
2587 2593 * The size of the reply frame pool is:
2588 2594 * Number of Reply Frames * Reply Frame Size
2589 2595 */
2590 2596 mem_size = mpt->m_max_replies * mpt->m_reply_frame_size;
2591 2597
2592 2598 /*
2593 2599 * set the DMA attributes. System Reply Message Frames must be
2594 2600 * aligned on a 4-byte boundry. This is the default.
2595 2601 */
2596 2602 frame_dma_attrs = mpt->m_msg_dma_attr;
2597 2603 frame_dma_attrs.dma_attr_sgllen = 1;
2598 2604
2599 2605 /*
2600 2606 * allocate the reply frame pool
2601 2607 */
2602 2608 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2603 2609 &mpt->m_dma_reply_frame_hdl, &mpt->m_acc_reply_frame_hdl, &memp,
2604 2610 mem_size, &cookie) == FALSE) {
2605 2611 return (DDI_FAILURE);
2606 2612 }
2607 2613
2608 2614 /*
2609 2615 * Store the reply frame memory address. This chip uses this
2610 2616 * address to dma to and from the driver's frame. The second
2611 2617 * address is the address mpt uses to process the frame.
2612 2618 */
2613 2619 mpt->m_reply_frame_dma_addr = cookie.dmac_laddress;
2614 2620 mpt->m_reply_frame = memp;
2615 2621
2616 2622 /*
2617 2623 * Clear the reply frame pool.
2618 2624 */
2619 2625 bzero(mpt->m_reply_frame, mem_size);
2620 2626
2621 2627 return (DDI_SUCCESS);
2622 2628 }
2623 2629
2624 2630 static int
2625 2631 mptsas_alloc_free_queue(mptsas_t *mpt)
2626 2632 {
2627 2633 ddi_dma_attr_t frame_dma_attrs;
2628 2634 caddr_t memp;
2629 2635 ddi_dma_cookie_t cookie;
2630 2636 size_t mem_size;
2631 2637
2632 2638 /*
2633 2639 * re-alloc when it has already alloced
2634 2640 */
2635 2641 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2636 2642 &mpt->m_acc_free_queue_hdl);
2637 2643
2638 2644 /*
2639 2645 * The reply free queue size is:
2640 2646 * Reply Free Queue Depth * 4
2641 2647 * The "4" is the size of one 32 bit address (low part of 64-bit
2642 2648 * address)
2643 2649 */
2644 2650 mem_size = mpt->m_free_queue_depth * 4;
2645 2651
2646 2652 /*
2647 2653 * set the DMA attributes The Reply Free Queue must be aligned on a
2648 2654 * 16-byte boundry.
2649 2655 */
2650 2656 frame_dma_attrs = mpt->m_msg_dma_attr;
2651 2657 frame_dma_attrs.dma_attr_align = 16;
2652 2658 frame_dma_attrs.dma_attr_sgllen = 1;
2653 2659
2654 2660 /*
2655 2661 * allocate the reply free queue
2656 2662 */
2657 2663 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2658 2664 &mpt->m_dma_free_queue_hdl, &mpt->m_acc_free_queue_hdl, &memp,
2659 2665 mem_size, &cookie) == FALSE) {
2660 2666 return (DDI_FAILURE);
2661 2667 }
2662 2668
2663 2669 /*
2664 2670 * Store the reply free queue memory address. This chip uses this
2665 2671 * address to read from the reply free queue. The second address
2666 2672 * is the address mpt uses to manage the queue.
2667 2673 */
2668 2674 mpt->m_free_queue_dma_addr = cookie.dmac_laddress;
2669 2675 mpt->m_free_queue = memp;
2670 2676
2671 2677 /*
2672 2678 * Clear the reply free queue memory.
2673 2679 */
2674 2680 bzero(mpt->m_free_queue, mem_size);
2675 2681
2676 2682 return (DDI_SUCCESS);
2677 2683 }
2678 2684
2679 2685 static int
2680 2686 mptsas_alloc_post_queue(mptsas_t *mpt)
2681 2687 {
2682 2688 ddi_dma_attr_t frame_dma_attrs;
2683 2689 caddr_t memp;
2684 2690 ddi_dma_cookie_t cookie;
2685 2691 size_t mem_size;
2686 2692
2687 2693 /*
2688 2694 * re-alloc when it has already alloced
2689 2695 */
2690 2696 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2691 2697 &mpt->m_acc_post_queue_hdl);
2692 2698
2693 2699 /*
2694 2700 * The reply descriptor post queue size is:
2695 2701 * Reply Descriptor Post Queue Depth * 8
2696 2702 * The "8" is the size of each descriptor (8 bytes or 64 bits).
2697 2703 */
2698 2704 mem_size = mpt->m_post_queue_depth * 8;
2699 2705
2700 2706 /*
2701 2707 * set the DMA attributes. The Reply Descriptor Post Queue must be
2702 2708 * aligned on a 16-byte boundry.
2703 2709 */
2704 2710 frame_dma_attrs = mpt->m_msg_dma_attr;
2705 2711 frame_dma_attrs.dma_attr_align = 16;
2706 2712 frame_dma_attrs.dma_attr_sgllen = 1;
2707 2713
2708 2714 /*
2709 2715 * allocate the reply post queue
2710 2716 */
2711 2717 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2712 2718 &mpt->m_dma_post_queue_hdl, &mpt->m_acc_post_queue_hdl, &memp,
2713 2719 mem_size, &cookie) == FALSE) {
2714 2720 return (DDI_FAILURE);
2715 2721 }
2716 2722
2717 2723 /*
2718 2724 * Store the reply descriptor post queue memory address. This chip
2719 2725 * uses this address to write to the reply descriptor post queue. The
2720 2726 * second address is the address mpt uses to manage the queue.
2721 2727 */
2722 2728 mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
2723 2729 mpt->m_post_queue = memp;
2724 2730
2725 2731 /*
2726 2732 * Clear the reply post queue memory.
2727 2733 */
2728 2734 bzero(mpt->m_post_queue, mem_size);
2729 2735
2730 2736 return (DDI_SUCCESS);
2731 2737 }
2732 2738
2733 2739 static void
2734 2740 mptsas_alloc_reply_args(mptsas_t *mpt)
2735 2741 {
2736 2742 if (mpt->m_replyh_args == NULL) {
2737 2743 mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
2738 2744 mpt->m_max_replies, KM_SLEEP);
2739 2745 }
2740 2746 }
2741 2747
2742 2748 static int
2743 2749 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2744 2750 {
2745 2751 mptsas_cache_frames_t *frames = NULL;
2746 2752 if (cmd->cmd_extra_frames == NULL) {
2747 2753 frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
2748 2754 if (frames == NULL) {
2749 2755 return (DDI_FAILURE);
2750 2756 }
2751 2757 cmd->cmd_extra_frames = frames;
2752 2758 }
2753 2759 return (DDI_SUCCESS);
2754 2760 }
2755 2761
2756 2762 static void
2757 2763 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2758 2764 {
2759 2765 if (cmd->cmd_extra_frames) {
2760 2766 kmem_cache_free(mpt->m_cache_frames,
2761 2767 (void *)cmd->cmd_extra_frames);
2762 2768 cmd->cmd_extra_frames = NULL;
2763 2769 }
2764 2770 }
2765 2771
2766 2772 static void
2767 2773 mptsas_cfg_fini(mptsas_t *mpt)
2768 2774 {
2769 2775 NDBG0(("mptsas_cfg_fini"));
2770 2776 ddi_regs_map_free(&mpt->m_datap);
2771 2777 }
2772 2778
2773 2779 static void
2774 2780 mptsas_hba_fini(mptsas_t *mpt)
2775 2781 {
2776 2782 NDBG0(("mptsas_hba_fini"));
2777 2783
2778 2784 /*
2779 2785 * Free up any allocated memory
2780 2786 */
2781 2787 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2782 2788 &mpt->m_acc_req_frame_hdl);
2783 2789
2784 2790 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2785 2791 &mpt->m_acc_reply_frame_hdl);
2786 2792
2787 2793 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2788 2794 &mpt->m_acc_free_queue_hdl);
2789 2795
2790 2796 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2791 2797 &mpt->m_acc_post_queue_hdl);
2792 2798
2793 2799 if (mpt->m_replyh_args != NULL) {
2794 2800 kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
2795 2801 * mpt->m_max_replies);
2796 2802 }
2797 2803 }
2798 2804
2799 2805 static int
2800 2806 mptsas_name_child(dev_info_t *lun_dip, char *name, int len)
2801 2807 {
2802 2808 int lun = 0;
2803 2809 char *sas_wwn = NULL;
2804 2810 int phynum = -1;
2805 2811 int reallen = 0;
2806 2812
2807 2813 /* Get the target num */
2808 2814 lun = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
2809 2815 LUN_PROP, 0);
2810 2816
2811 2817 if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
2812 2818 DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
2813 2819 /*
2814 2820 * Stick in the address of form "pPHY,LUN"
2815 2821 */
2816 2822 reallen = snprintf(name, len, "p%x,%x", phynum, lun);
2817 2823 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
2818 2824 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn)
2819 2825 == DDI_PROP_SUCCESS) {
2820 2826 /*
2821 2827 * Stick in the address of the form "wWWN,LUN"
2822 2828 */
2823 2829 reallen = snprintf(name, len, "%s,%x", sas_wwn, lun);
2824 2830 ddi_prop_free(sas_wwn);
2825 2831 } else {
2826 2832 return (DDI_FAILURE);
2827 2833 }
2828 2834
2829 2835 ASSERT(reallen < len);
2830 2836 if (reallen >= len) {
2831 2837 mptsas_log(0, CE_WARN, "!mptsas_get_name: name parameter "
2832 2838 "length too small, it needs to be %d bytes", reallen + 1);
2833 2839 }
2834 2840 return (DDI_SUCCESS);
2835 2841 }
2836 2842
2837 2843 /*
2838 2844 * tran_tgt_init(9E) - target device instance initialization
2839 2845 */
2840 2846 static int
2841 2847 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2842 2848 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2843 2849 {
2844 2850 #ifndef __lock_lint
2845 2851 _NOTE(ARGUNUSED(hba_tran))
2846 2852 #endif
2847 2853
2848 2854 /*
2849 2855 * At this point, the scsi_device structure already exists
2850 2856 * and has been initialized.
2851 2857 *
2852 2858 * Use this function to allocate target-private data structures,
2853 2859 * if needed by this HBA. Add revised flow-control and queue
2854 2860 * properties for child here, if desired and if you can tell they
2855 2861 * support tagged queueing by now.
2856 2862 */
2857 2863 mptsas_t *mpt;
2858 2864 int lun = sd->sd_address.a_lun;
2859 2865 mdi_pathinfo_t *pip = NULL;
2860 2866 mptsas_tgt_private_t *tgt_private = NULL;
2861 2867 mptsas_target_t *ptgt = NULL;
2862 2868 char *psas_wwn = NULL;
2863 2869 mptsas_phymask_t phymask = 0;
2864 2870 uint64_t sas_wwn = 0;
2865 2871 mptsas_target_addr_t addr;
2866 2872 mpt = SDEV2MPT(sd);
2867 2873
2868 2874 ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
2869 2875
2870 2876 NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
2871 2877 (void *)hba_dip, (void *)tgt_dip, lun));
2872 2878
2873 2879 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
2874 2880 (void) ndi_merge_node(tgt_dip, mptsas_name_child);
2875 2881 ddi_set_name_addr(tgt_dip, NULL);
2876 2882 return (DDI_FAILURE);
2877 2883 }
2878 2884 /*
2879 2885 * phymask is 0 means the virtual port for RAID
2880 2886 */
2881 2887 phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
2882 2888 "phymask", 0);
2883 2889 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2884 2890 if ((pip = (void *)(sd->sd_private)) == NULL) {
2885 2891 /*
2886 2892 * Very bad news if this occurs. Somehow scsi_vhci has
2887 2893 * lost the pathinfo node for this target.
2888 2894 */
2889 2895 return (DDI_NOT_WELL_FORMED);
2890 2896 }
2891 2897
2892 2898 if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
2893 2899 DDI_PROP_SUCCESS) {
2894 2900 mptsas_log(mpt, CE_WARN, "Get lun property failed\n");
2895 2901 return (DDI_FAILURE);
2896 2902 }
2897 2903
2898 2904 if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
2899 2905 &psas_wwn) == MDI_SUCCESS) {
2900 2906 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2901 2907 sas_wwn = 0;
2902 2908 }
2903 2909 (void) mdi_prop_free(psas_wwn);
2904 2910 }
2905 2911 } else {
2906 2912 lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
2907 2913 DDI_PROP_DONTPASS, LUN_PROP, 0);
2908 2914 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
2909 2915 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
2910 2916 DDI_PROP_SUCCESS) {
2911 2917 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2912 2918 sas_wwn = 0;
2913 2919 }
2914 2920 ddi_prop_free(psas_wwn);
2915 2921 } else {
2916 2922 sas_wwn = 0;
2917 2923 }
2918 2924 }
2919 2925
2920 2926 ASSERT((sas_wwn != 0) || (phymask != 0));
2921 2927 addr.mta_wwn = sas_wwn;
2922 2928 addr.mta_phymask = phymask;
2923 2929 mutex_enter(&mpt->m_mutex);
2924 2930 ptgt = refhash_lookup(mpt->m_targets, &addr);
2925 2931 mutex_exit(&mpt->m_mutex);
2926 2932 if (ptgt == NULL) {
2927 2933 mptsas_log(mpt, CE_WARN, "!tgt_init: target doesn't exist or "
2928 2934 "gone already! phymask:%x, saswwn %"PRIx64, phymask,
2929 2935 sas_wwn);
2930 2936 return (DDI_FAILURE);
2931 2937 }
2932 2938 if (hba_tran->tran_tgt_private == NULL) {
2933 2939 tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
2934 2940 KM_SLEEP);
2935 2941 tgt_private->t_lun = lun;
2936 2942 tgt_private->t_private = ptgt;
2937 2943 hba_tran->tran_tgt_private = tgt_private;
2938 2944 }
2939 2945
2940 2946 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2941 2947 return (DDI_SUCCESS);
2942 2948 }
2943 2949 mutex_enter(&mpt->m_mutex);
2944 2950
2945 2951 if (ptgt->m_deviceinfo &
2946 2952 (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
2947 2953 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
2948 2954 uchar_t *inq89 = NULL;
2949 2955 int inq89_len = 0x238;
2950 2956 int reallen = 0;
2951 2957 int rval = 0;
2952 2958 struct sata_id *sid = NULL;
2953 2959 char model[SATA_ID_MODEL_LEN + 1];
2954 2960 char fw[SATA_ID_FW_LEN + 1];
2955 2961 char *vid, *pid;
2956 2962 int i;
2957 2963
2958 2964 mutex_exit(&mpt->m_mutex);
2959 2965 /*
2960 2966 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
2961 2967 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
2962 2968 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
2963 2969 */
2964 2970 inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
2965 2971 rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
2966 2972 inq89, inq89_len, &reallen, 1);
2967 2973
2968 2974 if (rval != 0) {
2969 2975 if (inq89 != NULL) {
2970 2976 kmem_free(inq89, inq89_len);
2971 2977 }
2972 2978
2973 2979 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
2974 2980 "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
2975 2981 return (DDI_SUCCESS);
2976 2982 }
2977 2983 sid = (void *)(&inq89[60]);
2978 2984
2979 2985 swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
2980 2986 swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
2981 2987
2982 2988 model[SATA_ID_MODEL_LEN] = 0;
2983 2989 fw[SATA_ID_FW_LEN] = 0;
2984 2990
2985 2991 /*
2986 2992 * split model into into vid/pid
2987 2993 */
2988 2994 for (i = 0, pid = model; i < SATA_ID_MODEL_LEN; i++, pid++)
2989 2995 if ((*pid == ' ') || (*pid == '\t'))
2990 2996 break;
2991 2997 if (i < SATA_ID_MODEL_LEN) {
2992 2998 vid = model;
2993 2999 /*
2994 3000 * terminate vid, establish pid
2995 3001 */
2996 3002 *pid++ = 0;
2997 3003 } else {
2998 3004 /*
2999 3005 * vid will stay "ATA ", the rule is same
3000 3006 * as sata framework implementation.
3001 3007 */
3002 3008 vid = NULL;
3003 3009 /*
3004 3010 * model is all pid
3005 3011 */
3006 3012 pid = model;
3007 3013 }
3008 3014
3009 3015 /*
3010 3016 * override SCSA "inquiry-*" properties
3011 3017 */
3012 3018 if (vid)
3013 3019 (void) scsi_device_prop_update_inqstring(sd,
3014 3020 INQUIRY_VENDOR_ID, vid, strlen(vid));
3015 3021 if (pid)
3016 3022 (void) scsi_device_prop_update_inqstring(sd,
3017 3023 INQUIRY_PRODUCT_ID, pid, strlen(pid));
3018 3024 (void) scsi_device_prop_update_inqstring(sd,
3019 3025 INQUIRY_REVISION_ID, fw, strlen(fw));
3020 3026
3021 3027 if (inq89 != NULL) {
3022 3028 kmem_free(inq89, inq89_len);
3023 3029 }
3024 3030 } else {
3025 3031 mutex_exit(&mpt->m_mutex);
3026 3032 }
3027 3033
3028 3034 return (DDI_SUCCESS);
3029 3035 }
3030 3036 /*
3031 3037 * tran_tgt_free(9E) - target device instance deallocation
3032 3038 */
3033 3039 static void
3034 3040 mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3035 3041 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
3036 3042 {
3037 3043 #ifndef __lock_lint
3038 3044 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran, sd))
3039 3045 #endif
3040 3046
3041 3047 mptsas_tgt_private_t *tgt_private = hba_tran->tran_tgt_private;
3042 3048
3043 3049 if (tgt_private != NULL) {
3044 3050 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
3045 3051 hba_tran->tran_tgt_private = NULL;
3046 3052 }
3047 3053 }
3048 3054
3049 3055 /*
3050 3056 * scsi_pkt handling
3051 3057 *
3052 3058 * Visible to the external world via the transport structure.
3053 3059 */
3054 3060
3055 3061 /*
3056 3062 * Notes:
3057 3063 * - transport the command to the addressed SCSI target/lun device
3058 3064 * - normal operation is to schedule the command to be transported,
3059 3065 * and return TRAN_ACCEPT if this is successful.
3060 3066 * - if NO_INTR, tran_start must poll device for command completion
3061 3067 */
3062 3068 static int
3063 3069 mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
3064 3070 {
3065 3071 #ifndef __lock_lint
3066 3072 _NOTE(ARGUNUSED(ap))
3067 3073 #endif
3068 3074 mptsas_t *mpt = PKT2MPT(pkt);
3069 3075 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3070 3076 int rval;
3071 3077 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3072 3078
3073 3079 NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt));
3074 3080 ASSERT(ptgt);
3075 3081 if (ptgt == NULL)
3076 3082 return (TRAN_FATAL_ERROR);
3077 3083
3078 3084 /*
3079 3085 * prepare the pkt before taking mutex.
3080 3086 */
3081 3087 rval = mptsas_prepare_pkt(cmd);
3082 3088 if (rval != TRAN_ACCEPT) {
3083 3089 return (rval);
3084 3090 }
3085 3091
3086 3092 /*
3087 3093 * Send the command to target/lun, however your HBA requires it.
3088 3094 * If busy, return TRAN_BUSY; if there's some other formatting error
3089 3095 * in the packet, return TRAN_BADPKT; otherwise, fall through to the
3090 3096 * return of TRAN_ACCEPT.
3091 3097 *
3092 3098 * Remember that access to shared resources, including the mptsas_t
3093 3099 * data structure and the HBA hardware registers, must be protected
3094 3100 * with mutexes, here and everywhere.
3095 3101 *
3096 3102 * Also remember that at interrupt time, you'll get an argument
3097 3103 * to the interrupt handler which is a pointer to your mptsas_t
3098 3104 * structure; you'll have to remember which commands are outstanding
3099 3105 * and which scsi_pkt is the currently-running command so the
3100 3106 * interrupt handler can refer to the pkt to set completion
3101 3107 * status, call the target driver back through pkt_comp, etc.
3102 3108 *
3103 3109 * If the instance lock is held by other thread, don't spin to wait
3104 3110 * for it. Instead, queue the cmd and next time when the instance lock
3105 3111 * is not held, accept all the queued cmd. A extra tx_waitq is
3106 3112 * introduced to protect the queue.
3107 3113 *
3108 3114 * The polled cmd will not be queud and accepted as usual.
3109 3115 *
3110 3116 * Under the tx_waitq mutex, record whether a thread is draining
3111 3117 * the tx_waitq. An IO requesting thread that finds the instance
3112 3118 * mutex contended appends to the tx_waitq and while holding the
3113 3119 * tx_wait mutex, if the draining flag is not set, sets it and then
3114 3120 * proceeds to spin for the instance mutex. This scheme ensures that
3115 3121 * the last cmd in a burst be processed.
3116 3122 *
3117 3123 * we enable this feature only when the helper threads are enabled,
3118 3124 * at which we think the loads are heavy.
3119 3125 *
3120 3126 * per instance mutex m_tx_waitq_mutex is introduced to protect the
3121 3127 * m_tx_waitqtail, m_tx_waitq, m_tx_draining.
3122 3128 */
3123 3129
3124 3130 if (mpt->m_doneq_thread_n) {
3125 3131 if (mutex_tryenter(&mpt->m_mutex) != 0) {
3126 3132 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3127 3133 mutex_exit(&mpt->m_mutex);
3128 3134 } else if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3129 3135 mutex_enter(&mpt->m_mutex);
3130 3136 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3131 3137 mutex_exit(&mpt->m_mutex);
3132 3138 } else {
3133 3139 mutex_enter(&mpt->m_tx_waitq_mutex);
3134 3140 /*
3135 3141 * ptgt->m_dr_flag is protected by m_mutex or
3136 3142 * m_tx_waitq_mutex. In this case, m_tx_waitq_mutex
3137 3143 * is acquired.
3138 3144 */
3139 3145 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3140 3146 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3141 3147 /*
3142 3148 * The command should be allowed to
3143 3149 * retry by returning TRAN_BUSY to
3144 3150 * to stall the I/O's which come from
3145 3151 * scsi_vhci since the device/path is
3146 3152 * in unstable state now.
3147 3153 */
3148 3154 mutex_exit(&mpt->m_tx_waitq_mutex);
3149 3155 return (TRAN_BUSY);
3150 3156 } else {
3151 3157 /*
3152 3158 * The device is offline, just fail the
3153 3159 * command by returning
3154 3160 * TRAN_FATAL_ERROR.
3155 3161 */
3156 3162 mutex_exit(&mpt->m_tx_waitq_mutex);
3157 3163 return (TRAN_FATAL_ERROR);
3158 3164 }
3159 3165 }
3160 3166 if (mpt->m_tx_draining) {
3161 3167 cmd->cmd_flags |= CFLAG_TXQ;
3162 3168 *mpt->m_tx_waitqtail = cmd;
3163 3169 mpt->m_tx_waitqtail = &cmd->cmd_linkp;
3164 3170 mutex_exit(&mpt->m_tx_waitq_mutex);
3165 3171 } else { /* drain the queue */
3166 3172 mpt->m_tx_draining = 1;
3167 3173 mutex_exit(&mpt->m_tx_waitq_mutex);
3168 3174 mutex_enter(&mpt->m_mutex);
3169 3175 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3170 3176 mutex_exit(&mpt->m_mutex);
3171 3177 }
3172 3178 }
3173 3179 } else {
3174 3180 mutex_enter(&mpt->m_mutex);
3175 3181 /*
3176 3182 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3177 3183 * in this case, m_mutex is acquired.
3178 3184 */
3179 3185 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3180 3186 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3181 3187 /*
3182 3188 * commands should be allowed to retry by
3183 3189 * returning TRAN_BUSY to stall the I/O's
3184 3190 * which come from scsi_vhci since the device/
3185 3191 * path is in unstable state now.
3186 3192 */
3187 3193 mutex_exit(&mpt->m_mutex);
3188 3194 return (TRAN_BUSY);
3189 3195 } else {
3190 3196 /*
3191 3197 * The device is offline, just fail the
3192 3198 * command by returning TRAN_FATAL_ERROR.
3193 3199 */
3194 3200 mutex_exit(&mpt->m_mutex);
3195 3201 return (TRAN_FATAL_ERROR);
3196 3202 }
3197 3203 }
3198 3204 rval = mptsas_accept_pkt(mpt, cmd);
3199 3205 mutex_exit(&mpt->m_mutex);
3200 3206 }
3201 3207
3202 3208 return (rval);
3203 3209 }
3204 3210
3205 3211 /*
3206 3212 * Accept all the queued cmds(if any) before accept the current one.
3207 3213 */
3208 3214 static int
3209 3215 mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3210 3216 {
3211 3217 int rval;
3212 3218 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3213 3219
3214 3220 ASSERT(mutex_owned(&mpt->m_mutex));
3215 3221 /*
3216 3222 * The call to mptsas_accept_tx_waitq() must always be performed
3217 3223 * because that is where mpt->m_tx_draining is cleared.
3218 3224 */
3219 3225 mutex_enter(&mpt->m_tx_waitq_mutex);
3220 3226 mptsas_accept_tx_waitq(mpt);
3221 3227 mutex_exit(&mpt->m_tx_waitq_mutex);
3222 3228 /*
3223 3229 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3224 3230 * in this case, m_mutex is acquired.
3225 3231 */
3226 3232 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3227 3233 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3228 3234 /*
3229 3235 * The command should be allowed to retry by returning
3230 3236 * TRAN_BUSY to stall the I/O's which come from
3231 3237 * scsi_vhci since the device/path is in unstable state
3232 3238 * now.
3233 3239 */
3234 3240 return (TRAN_BUSY);
3235 3241 } else {
3236 3242 /*
3237 3243 * The device is offline, just fail the command by
3238 3244 * return TRAN_FATAL_ERROR.
3239 3245 */
3240 3246 return (TRAN_FATAL_ERROR);
3241 3247 }
3242 3248 }
3243 3249 rval = mptsas_accept_pkt(mpt, cmd);
3244 3250
3245 3251 return (rval);
3246 3252 }
3247 3253
3248 3254 static int
3249 3255 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3250 3256 {
3251 3257 int rval = TRAN_ACCEPT;
3252 3258 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3253 3259
3254 3260 NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3255 3261
3256 3262 ASSERT(mutex_owned(&mpt->m_mutex));
3257 3263
3258 3264 if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3259 3265 rval = mptsas_prepare_pkt(cmd);
3260 3266 if (rval != TRAN_ACCEPT) {
3261 3267 cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3262 3268 return (rval);
3263 3269 }
3264 3270 }
3265 3271
3266 3272 /*
3267 3273 * reset the throttle if we were draining
3268 3274 */
3269 3275 if ((ptgt->m_t_ncmds == 0) &&
3270 3276 (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3271 3277 NDBG23(("reset throttle"));
3272 3278 ASSERT(ptgt->m_reset_delay == 0);
3273 3279 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3274 3280 }
3275 3281
3276 3282 /*
3277 3283 * If HBA is being reset, the DevHandles are being re-initialized,
3278 3284 * which means that they could be invalid even if the target is still
3279 3285 * attached. Check if being reset and if DevHandle is being
3280 3286 * re-initialized. If this is the case, return BUSY so the I/O can be
3281 3287 * retried later.
3282 3288 */
3283 3289 if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3284 3290 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
3285 3291 if (cmd->cmd_flags & CFLAG_TXQ) {
3286 3292 mptsas_doneq_add(mpt, cmd);
3287 3293 mptsas_doneq_empty(mpt);
3288 3294 return (rval);
3289 3295 } else {
3290 3296 return (TRAN_BUSY);
3291 3297 }
3292 3298 }
3293 3299
3294 3300 /*
3295 3301 * If device handle has already been invalidated, just
3296 3302 * fail the command. In theory, command from scsi_vhci
3297 3303 * client is impossible send down command with invalid
3298 3304 * devhdl since devhdl is set after path offline, target
3299 3305 * driver is not suppose to select a offlined path.
3300 3306 */
3301 3307 if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3302 3308 NDBG3(("rejecting command, it might because invalid devhdl "
3303 3309 "request."));
3304 3310 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3305 3311 if (cmd->cmd_flags & CFLAG_TXQ) {
3306 3312 mptsas_doneq_add(mpt, cmd);
3307 3313 mptsas_doneq_empty(mpt);
3308 3314 return (rval);
3309 3315 } else {
3310 3316 return (TRAN_FATAL_ERROR);
3311 3317 }
3312 3318 }
3313 3319 /*
3314 3320 * The first case is the normal case. mpt gets a command from the
3315 3321 * target driver and starts it.
3316 3322 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3317 3323 * commands is m_max_requests - 2.
3318 3324 */
3319 3325 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
3320 3326 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
3321 3327 (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3322 3328 (ptgt->m_reset_delay == 0) &&
3323 3329 (ptgt->m_t_nwait == 0) &&
3324 3330 ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3325 3331 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3326 3332 (void) mptsas_start_cmd(mpt, cmd);
3327 3333 } else {
3328 3334 mptsas_waitq_add(mpt, cmd);
3329 3335 }
3330 3336 } else {
3331 3337 /*
3332 3338 * Add this pkt to the work queue
3333 3339 */
3334 3340 mptsas_waitq_add(mpt, cmd);
3335 3341
3336 3342 if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3337 3343 (void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3338 3344
3339 3345 /*
3340 3346 * Only flush the doneq if this is not a TM
3341 3347 * cmd. For TM cmds the flushing of the
3342 3348 * doneq will be done in those routines.
3343 3349 */
3344 3350 if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
3345 3351 mptsas_doneq_empty(mpt);
3346 3352 }
3347 3353 }
3348 3354 }
3349 3355 return (rval);
3350 3356 }
3351 3357
3352 3358 int
3353 3359 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3354 3360 {
3355 3361 mptsas_slots_t *slots = mpt->m_active;
3356 3362 uint_t slot, start_rotor;
3357 3363 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3358 3364
3359 3365 ASSERT(MUTEX_HELD(&mpt->m_mutex));
3360 3366
3361 3367 /*
3362 3368 * Account for reserved TM request slot and reserved SMID of 0.
3363 3369 */
3364 3370 ASSERT(slots->m_n_normal == (mpt->m_max_requests - 2));
3365 3371
3366 3372 /*
3367 3373 * Find the next available slot, beginning at m_rotor. If no slot is
3368 3374 * available, we'll return FALSE to indicate that. This mechanism
3369 3375 * considers only the normal slots, not the reserved slot 0 nor the
3370 3376 * task management slot m_n_normal + 1. The rotor is left to point to
3371 3377 * the normal slot after the one we select, unless we select the last
3372 3378 * normal slot in which case it returns to slot 1.
3373 3379 */
3374 3380 start_rotor = slots->m_rotor;
3375 3381 do {
3376 3382 slot = slots->m_rotor++;
3377 3383 if (slots->m_rotor > slots->m_n_normal)
3378 3384 slots->m_rotor = 1;
3379 3385
3380 3386 if (slots->m_rotor == start_rotor)
3381 3387 break;
3382 3388 } while (slots->m_slot[slot] != NULL);
3383 3389
3384 3390 if (slots->m_slot[slot] != NULL)
3385 3391 return (FALSE);
3386 3392
3387 3393 ASSERT(slot != 0 && slot <= slots->m_n_normal);
3388 3394
3389 3395 cmd->cmd_slot = slot;
3390 3396 slots->m_slot[slot] = cmd;
3391 3397 mpt->m_ncmds++;
3392 3398
3393 3399 /*
3394 3400 * only increment per target ncmds if this is not a
3395 3401 * command that has no target associated with it (i.e. a
3396 3402 * event acknoledgment)
3397 3403 */
3398 3404 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3399 3405 /*
3400 3406 * Expiration time is set in mptsas_start_cmd
3401 3407 */
3402 3408 ptgt->m_t_ncmds++;
3403 3409 cmd->cmd_active_expiration = 0;
3404 3410 } else {
3405 3411 /*
3406 3412 * Initialize expiration time for passthrough commands,
3407 3413 */
3408 3414 cmd->cmd_active_expiration = gethrtime() +
3409 3415 (hrtime_t)cmd->cmd_pkt->pkt_time * NANOSEC;
3410 3416 }
3411 3417 return (TRUE);
3412 3418 }
3413 3419
3414 3420 /*
3415 3421 * prepare the pkt:
3416 3422 * the pkt may have been resubmitted or just reused so
3417 3423 * initialize some fields and do some checks.
3418 3424 */
3419 3425 static int
3420 3426 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3421 3427 {
3422 3428 struct scsi_pkt *pkt = CMD2PKT(cmd);
3423 3429
3424 3430 NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3425 3431
3426 3432 /*
3427 3433 * Reinitialize some fields that need it; the packet may
3428 3434 * have been resubmitted
3429 3435 */
3430 3436 pkt->pkt_reason = CMD_CMPLT;
3431 3437 pkt->pkt_state = 0;
3432 3438 pkt->pkt_statistics = 0;
3433 3439 pkt->pkt_resid = 0;
3434 3440 cmd->cmd_age = 0;
3435 3441 cmd->cmd_pkt_flags = pkt->pkt_flags;
3436 3442
3437 3443 /*
3438 3444 * zero status byte.
3439 3445 */
3440 3446 *(pkt->pkt_scbp) = 0;
3441 3447
3442 3448 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3443 3449 pkt->pkt_resid = cmd->cmd_dmacount;
3444 3450
3445 3451 /*
3446 3452 * consistent packets need to be sync'ed first
3447 3453 * (only for data going out)
3448 3454 */
3449 3455 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
3450 3456 (cmd->cmd_flags & CFLAG_DMASEND)) {
3451 3457 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
3452 3458 DDI_DMA_SYNC_FORDEV);
3453 3459 }
3454 3460 }
3455 3461
3456 3462 cmd->cmd_flags =
3457 3463 (cmd->cmd_flags & ~(CFLAG_TRANFLAG)) |
3458 3464 CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
3459 3465
3460 3466 return (TRAN_ACCEPT);
3461 3467 }
3462 3468
3463 3469 /*
3464 3470 * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3465 3471 *
3466 3472 * One of three possibilities:
3467 3473 * - allocate scsi_pkt
3468 3474 * - allocate scsi_pkt and DMA resources
3469 3475 * - allocate DMA resources to an already-allocated pkt
3470 3476 */
3471 3477 static struct scsi_pkt *
3472 3478 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3473 3479 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3474 3480 int (*callback)(), caddr_t arg)
3475 3481 {
3476 3482 mptsas_cmd_t *cmd, *new_cmd;
3477 3483 mptsas_t *mpt = ADDR2MPT(ap);
3478 3484 int failure = 1;
3479 3485 uint_t oldcookiec;
3480 3486 mptsas_target_t *ptgt = NULL;
3481 3487 int rval;
3482 3488 mptsas_tgt_private_t *tgt_private;
3483 3489 int kf;
3484 3490
3485 3491 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3486 3492
3487 3493 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3488 3494 tran_tgt_private;
3489 3495 ASSERT(tgt_private != NULL);
3490 3496 if (tgt_private == NULL) {
3491 3497 return (NULL);
3492 3498 }
3493 3499 ptgt = tgt_private->t_private;
3494 3500 ASSERT(ptgt != NULL);
3495 3501 if (ptgt == NULL)
3496 3502 return (NULL);
3497 3503 ap->a_target = ptgt->m_devhdl;
3498 3504 ap->a_lun = tgt_private->t_lun;
3499 3505
3500 3506 ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3501 3507 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3502 3508 statuslen *= 100; tgtlen *= 4;
3503 3509 #endif
3504 3510 NDBG3(("mptsas_scsi_init_pkt:\n"
3505 3511 "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3506 3512 ap->a_target, (void *)pkt, (void *)bp,
3507 3513 cmdlen, statuslen, tgtlen, flags));
3508 3514
3509 3515 /*
3510 3516 * Allocate the new packet.
3511 3517 */
3512 3518 if (pkt == NULL) {
3513 3519 ddi_dma_handle_t save_dma_handle;
3514 3520 ddi_dma_handle_t save_arq_dma_handle;
3515 3521 struct buf *save_arq_bp;
3516 3522 ddi_dma_cookie_t save_arqcookie;
3517 3523
3518 3524 cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3519 3525
3520 3526 if (cmd) {
3521 3527 save_dma_handle = cmd->cmd_dmahandle;
3522 3528 save_arq_dma_handle = cmd->cmd_arqhandle;
3523 3529 save_arq_bp = cmd->cmd_arq_buf;
3524 3530 save_arqcookie = cmd->cmd_arqcookie;
3525 3531 bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3526 3532 cmd->cmd_dmahandle = save_dma_handle;
3527 3533 cmd->cmd_arqhandle = save_arq_dma_handle;
3528 3534 cmd->cmd_arq_buf = save_arq_bp;
3529 3535 cmd->cmd_arqcookie = save_arqcookie;
3530 3536
3531 3537 pkt = (void *)((uchar_t *)cmd +
3532 3538 sizeof (struct mptsas_cmd));
3533 3539 pkt->pkt_ha_private = (opaque_t)cmd;
3534 3540 pkt->pkt_address = *ap;
3535 3541 pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3536 3542 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3537 3543 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3538 3544 cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3539 3545 cmd->cmd_cdblen = (uchar_t)cmdlen;
3540 3546 cmd->cmd_scblen = statuslen;
3541 3547 cmd->cmd_rqslen = SENSE_LENGTH;
3542 3548 cmd->cmd_tgt_addr = ptgt;
3543 3549 failure = 0;
3544 3550 }
3545 3551
3546 3552 if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
3547 3553 (tgtlen > PKT_PRIV_LEN) ||
3548 3554 (statuslen > EXTCMDS_STATUS_SIZE)) {
3549 3555 if (failure == 0) {
3550 3556 /*
3551 3557 * if extern alloc fails, all will be
3552 3558 * deallocated, including cmd
3553 3559 */
3554 3560 failure = mptsas_pkt_alloc_extern(mpt, cmd,
3555 3561 cmdlen, tgtlen, statuslen, kf);
3556 3562 }
3557 3563 if (failure) {
3558 3564 /*
3559 3565 * if extern allocation fails, it will
3560 3566 * deallocate the new pkt as well
3561 3567 */
3562 3568 return (NULL);
3563 3569 }
3564 3570 }
3565 3571 new_cmd = cmd;
3566 3572
3567 3573 } else {
3568 3574 cmd = PKT2CMD(pkt);
3569 3575 new_cmd = NULL;
3570 3576 }
3571 3577
3572 3578
3573 3579 /* grab cmd->cmd_cookiec here as oldcookiec */
3574 3580
3575 3581 oldcookiec = cmd->cmd_cookiec;
3576 3582
3577 3583 /*
3578 3584 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3579 3585 * greater than 0 and we'll need to grab the next dma window
3580 3586 */
3581 3587 /*
3582 3588 * SLM-not doing extra command frame right now; may add later
3583 3589 */
3584 3590
3585 3591 if (cmd->cmd_nwin > 0) {
3586 3592
3587 3593 /*
3588 3594 * Make sure we havn't gone past the the total number
3589 3595 * of windows
3590 3596 */
3591 3597 if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3592 3598 return (NULL);
3593 3599 }
3594 3600 if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3595 3601 &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
3596 3602 &cmd->cmd_cookie, &cmd->cmd_cookiec) == DDI_FAILURE) {
3597 3603 return (NULL);
3598 3604 }
3599 3605 goto get_dma_cookies;
3600 3606 }
3601 3607
3602 3608
3603 3609 if (flags & PKT_XARQ) {
3604 3610 cmd->cmd_flags |= CFLAG_XARQ;
3605 3611 }
3606 3612
3607 3613 /*
3608 3614 * DMA resource allocation. This version assumes your
3609 3615 * HBA has some sort of bus-mastering or onboard DMA capability, with a
3610 3616 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
3611 3617 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
3612 3618 */
3613 3619 if (bp && (bp->b_bcount != 0) &&
3614 3620 (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
3615 3621
3616 3622 int cnt, dma_flags;
3617 3623 mptti_t *dmap; /* ptr to the S/G list */
3618 3624
3619 3625 /*
3620 3626 * Set up DMA memory and position to the next DMA segment.
3621 3627 */
3622 3628 ASSERT(cmd->cmd_dmahandle != NULL);
3623 3629
3624 3630 if (bp->b_flags & B_READ) {
3625 3631 dma_flags = DDI_DMA_READ;
3626 3632 cmd->cmd_flags &= ~CFLAG_DMASEND;
3627 3633 } else {
3628 3634 dma_flags = DDI_DMA_WRITE;
3629 3635 cmd->cmd_flags |= CFLAG_DMASEND;
3630 3636 }
3631 3637 if (flags & PKT_CONSISTENT) {
3632 3638 cmd->cmd_flags |= CFLAG_CMDIOPB;
3633 3639 dma_flags |= DDI_DMA_CONSISTENT;
3634 3640 }
3635 3641
3636 3642 if (flags & PKT_DMA_PARTIAL) {
3637 3643 dma_flags |= DDI_DMA_PARTIAL;
3638 3644 }
3639 3645
3640 3646 /*
3641 3647 * workaround for byte hole issue on psycho and
3642 3648 * schizo pre 2.1
3643 3649 */
3644 3650 if ((bp->b_flags & B_READ) && ((bp->b_flags &
3645 3651 (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
3646 3652 ((uintptr_t)bp->b_un.b_addr & 0x7)) {
3647 3653 dma_flags |= DDI_DMA_CONSISTENT;
3648 3654 }
3649 3655
3650 3656 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
3651 3657 dma_flags, callback, arg,
3652 3658 &cmd->cmd_cookie, &cmd->cmd_cookiec);
3653 3659 if (rval == DDI_DMA_PARTIAL_MAP) {
3654 3660 (void) ddi_dma_numwin(cmd->cmd_dmahandle,
3655 3661 &cmd->cmd_nwin);
3656 3662 cmd->cmd_winindex = 0;
3657 3663 (void) ddi_dma_getwin(cmd->cmd_dmahandle,
3658 3664 cmd->cmd_winindex, &cmd->cmd_dma_offset,
3659 3665 &cmd->cmd_dma_len, &cmd->cmd_cookie,
3660 3666 &cmd->cmd_cookiec);
3661 3667 } else if (rval && (rval != DDI_DMA_MAPPED)) {
3662 3668 switch (rval) {
3663 3669 case DDI_DMA_NORESOURCES:
3664 3670 bioerror(bp, 0);
3665 3671 break;
3666 3672 case DDI_DMA_BADATTR:
3667 3673 case DDI_DMA_NOMAPPING:
3668 3674 bioerror(bp, EFAULT);
3669 3675 break;
3670 3676 case DDI_DMA_TOOBIG:
3671 3677 default:
3672 3678 bioerror(bp, EINVAL);
3673 3679 break;
3674 3680 }
3675 3681 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3676 3682 if (new_cmd) {
3677 3683 mptsas_scsi_destroy_pkt(ap, pkt);
3678 3684 }
3679 3685 return ((struct scsi_pkt *)NULL);
3680 3686 }
3681 3687
3682 3688 get_dma_cookies:
3683 3689 cmd->cmd_flags |= CFLAG_DMAVALID;
3684 3690 ASSERT(cmd->cmd_cookiec > 0);
3685 3691
3686 3692 if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
3687 3693 mptsas_log(mpt, CE_NOTE, "large cookiec received %d\n",
3688 3694 cmd->cmd_cookiec);
3689 3695 bioerror(bp, EINVAL);
3690 3696 if (new_cmd) {
3691 3697 mptsas_scsi_destroy_pkt(ap, pkt);
3692 3698 }
3693 3699 return ((struct scsi_pkt *)NULL);
3694 3700 }
3695 3701
3696 3702 /*
3697 3703 * Allocate extra SGL buffer if needed.
3698 3704 */
3699 3705 if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
3700 3706 (cmd->cmd_extra_frames == NULL)) {
3701 3707 if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
3702 3708 DDI_FAILURE) {
3703 3709 mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
3704 3710 "failed");
3705 3711 bioerror(bp, ENOMEM);
3706 3712 if (new_cmd) {
3707 3713 mptsas_scsi_destroy_pkt(ap, pkt);
3708 3714 }
3709 3715 return ((struct scsi_pkt *)NULL);
3710 3716 }
3711 3717 }
3712 3718
3713 3719 /*
3714 3720 * Always use scatter-gather transfer
3715 3721 * Use the loop below to store physical addresses of
3716 3722 * DMA segments, from the DMA cookies, into your HBA's
3717 3723 * scatter-gather list.
3718 3724 * We need to ensure we have enough kmem alloc'd
3719 3725 * for the sg entries since we are no longer using an
3720 3726 * array inside mptsas_cmd_t.
3721 3727 *
3722 3728 * We check cmd->cmd_cookiec against oldcookiec so
3723 3729 * the scatter-gather list is correctly allocated
3724 3730 */
3725 3731
3726 3732 if (oldcookiec != cmd->cmd_cookiec) {
3727 3733 if (cmd->cmd_sg != (mptti_t *)NULL) {
3728 3734 kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
3729 3735 oldcookiec);
3730 3736 cmd->cmd_sg = NULL;
3731 3737 }
3732 3738 }
3733 3739
3734 3740 if (cmd->cmd_sg == (mptti_t *)NULL) {
3735 3741 cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3736 3742 cmd->cmd_cookiec), kf);
3737 3743
3738 3744 if (cmd->cmd_sg == (mptti_t *)NULL) {
3739 3745 mptsas_log(mpt, CE_WARN,
3740 3746 "unable to kmem_alloc enough memory "
3741 3747 "for scatter/gather list");
3742 3748 /*
3743 3749 * if we have an ENOMEM condition we need to behave
3744 3750 * the same way as the rest of this routine
3745 3751 */
3746 3752
3747 3753 bioerror(bp, ENOMEM);
3748 3754 if (new_cmd) {
3749 3755 mptsas_scsi_destroy_pkt(ap, pkt);
3750 3756 }
3751 3757 return ((struct scsi_pkt *)NULL);
3752 3758 }
3753 3759 }
3754 3760
3755 3761 dmap = cmd->cmd_sg;
3756 3762
3757 3763 ASSERT(cmd->cmd_cookie.dmac_size != 0);
3758 3764
3759 3765 /*
3760 3766 * store the first segment into the S/G list
3761 3767 */
3762 3768 dmap->count = cmd->cmd_cookie.dmac_size;
3763 3769 dmap->addr.address64.Low = (uint32_t)
3764 3770 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3765 3771 dmap->addr.address64.High = (uint32_t)
3766 3772 (cmd->cmd_cookie.dmac_laddress >> 32);
3767 3773
3768 3774 /*
3769 3775 * dmacount counts the size of the dma for this window
3770 3776 * (if partial dma is being used). totaldmacount
3771 3777 * keeps track of the total amount of dma we have
3772 3778 * transferred for all the windows (needed to calculate
3773 3779 * the resid value below).
3774 3780 */
3775 3781 cmd->cmd_dmacount = cmd->cmd_cookie.dmac_size;
3776 3782 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3777 3783
3778 3784 /*
3779 3785 * We already stored the first DMA scatter gather segment,
3780 3786 * start at 1 if we need to store more.
3781 3787 */
3782 3788 for (cnt = 1; cnt < cmd->cmd_cookiec; cnt++) {
3783 3789 /*
3784 3790 * Get next DMA cookie
3785 3791 */
3786 3792 ddi_dma_nextcookie(cmd->cmd_dmahandle,
3787 3793 &cmd->cmd_cookie);
3788 3794 dmap++;
3789 3795
3790 3796 cmd->cmd_dmacount += cmd->cmd_cookie.dmac_size;
3791 3797 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3792 3798
3793 3799 /*
3794 3800 * store the segment parms into the S/G list
3795 3801 */
3796 3802 dmap->count = cmd->cmd_cookie.dmac_size;
3797 3803 dmap->addr.address64.Low = (uint32_t)
3798 3804 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3799 3805 dmap->addr.address64.High = (uint32_t)
3800 3806 (cmd->cmd_cookie.dmac_laddress >> 32);
3801 3807 }
3802 3808
3803 3809 /*
3804 3810 * If this was partially allocated we set the resid
3805 3811 * the amount of data NOT transferred in this window
3806 3812 * If there is only one window, the resid will be 0
3807 3813 */
3808 3814 pkt->pkt_resid = (bp->b_bcount - cmd->cmd_totaldmacount);
3809 3815 NDBG3(("mptsas_scsi_init_pkt: cmd_dmacount=%d.",
3810 3816 cmd->cmd_dmacount));
3811 3817 }
3812 3818 return (pkt);
3813 3819 }
3814 3820
3815 3821 /*
3816 3822 * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
3817 3823 *
3818 3824 * Notes:
3819 3825 * - also frees DMA resources if allocated
3820 3826 * - implicit DMA synchonization
3821 3827 */
3822 3828 static void
3823 3829 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
3824 3830 {
3825 3831 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3826 3832 mptsas_t *mpt = ADDR2MPT(ap);
3827 3833
3828 3834 NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
3829 3835 ap->a_target, (void *)pkt));
3830 3836
3831 3837 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3832 3838 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
3833 3839 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3834 3840 }
3835 3841
3836 3842 if (cmd->cmd_sg) {
3837 3843 kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
3838 3844 cmd->cmd_sg = NULL;
3839 3845 }
3840 3846
3841 3847 mptsas_free_extra_sgl_frame(mpt, cmd);
3842 3848
3843 3849 if ((cmd->cmd_flags &
3844 3850 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
3845 3851 CFLAG_SCBEXTERN)) == 0) {
3846 3852 cmd->cmd_flags = CFLAG_FREE;
3847 3853 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
3848 3854 } else {
3849 3855 mptsas_pkt_destroy_extern(mpt, cmd);
3850 3856 }
3851 3857 }
3852 3858
3853 3859 /*
3854 3860 * kmem cache constructor and destructor:
3855 3861 * When constructing, we bzero the cmd and allocate the dma handle
3856 3862 * When destructing, just free the dma handle
3857 3863 */
3858 3864 static int
3859 3865 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
3860 3866 {
3861 3867 mptsas_cmd_t *cmd = buf;
3862 3868 mptsas_t *mpt = cdrarg;
3863 3869 struct scsi_address ap;
3864 3870 uint_t cookiec;
3865 3871 ddi_dma_attr_t arq_dma_attr;
3866 3872 int (*callback)(caddr_t);
3867 3873
3868 3874 callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3869 3875
3870 3876 NDBG4(("mptsas_kmem_cache_constructor"));
3871 3877
3872 3878 ap.a_hba_tran = mpt->m_tran;
3873 3879 ap.a_target = 0;
3874 3880 ap.a_lun = 0;
3875 3881
3876 3882 /*
3877 3883 * allocate a dma handle
3878 3884 */
3879 3885 if ((ddi_dma_alloc_handle(mpt->m_dip, &mpt->m_io_dma_attr, callback,
3880 3886 NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
3881 3887 cmd->cmd_dmahandle = NULL;
3882 3888 return (-1);
3883 3889 }
3884 3890
3885 3891 cmd->cmd_arq_buf = scsi_alloc_consistent_buf(&ap, (struct buf *)NULL,
3886 3892 SENSE_LENGTH, B_READ, callback, NULL);
3887 3893 if (cmd->cmd_arq_buf == NULL) {
3888 3894 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3889 3895 cmd->cmd_dmahandle = NULL;
3890 3896 return (-1);
3891 3897 }
3892 3898
3893 3899 /*
3894 3900 * allocate a arq handle
3895 3901 */
3896 3902 arq_dma_attr = mpt->m_msg_dma_attr;
3897 3903 arq_dma_attr.dma_attr_sgllen = 1;
3898 3904 if ((ddi_dma_alloc_handle(mpt->m_dip, &arq_dma_attr, callback,
3899 3905 NULL, &cmd->cmd_arqhandle)) != DDI_SUCCESS) {
3900 3906 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3901 3907 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3902 3908 cmd->cmd_dmahandle = NULL;
3903 3909 cmd->cmd_arqhandle = NULL;
3904 3910 return (-1);
3905 3911 }
3906 3912
3907 3913 if (ddi_dma_buf_bind_handle(cmd->cmd_arqhandle,
3908 3914 cmd->cmd_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
3909 3915 callback, NULL, &cmd->cmd_arqcookie, &cookiec) != DDI_SUCCESS) {
3910 3916 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3911 3917 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3912 3918 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3913 3919 cmd->cmd_dmahandle = NULL;
3914 3920 cmd->cmd_arqhandle = NULL;
3915 3921 cmd->cmd_arq_buf = NULL;
3916 3922 return (-1);
3917 3923 }
3918 3924
3919 3925 return (0);
3920 3926 }
3921 3927
3922 3928 static void
3923 3929 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
3924 3930 {
3925 3931 #ifndef __lock_lint
3926 3932 _NOTE(ARGUNUSED(cdrarg))
3927 3933 #endif
3928 3934 mptsas_cmd_t *cmd = buf;
3929 3935
3930 3936 NDBG4(("mptsas_kmem_cache_destructor"));
3931 3937
3932 3938 if (cmd->cmd_arqhandle) {
3933 3939 (void) ddi_dma_unbind_handle(cmd->cmd_arqhandle);
3934 3940 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3935 3941 cmd->cmd_arqhandle = NULL;
3936 3942 }
3937 3943 if (cmd->cmd_arq_buf) {
3938 3944 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3939 3945 cmd->cmd_arq_buf = NULL;
3940 3946 }
3941 3947 if (cmd->cmd_dmahandle) {
3942 3948 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3943 3949 cmd->cmd_dmahandle = NULL;
3944 3950 }
3945 3951 }
3946 3952
3947 3953 static int
3948 3954 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
3949 3955 {
3950 3956 mptsas_cache_frames_t *p = buf;
3951 3957 mptsas_t *mpt = cdrarg;
3952 3958 ddi_dma_attr_t frame_dma_attr;
3953 3959 size_t mem_size, alloc_len;
3954 3960 ddi_dma_cookie_t cookie;
3955 3961 uint_t ncookie;
3956 3962 int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
3957 3963 ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3958 3964
3959 3965 frame_dma_attr = mpt->m_msg_dma_attr;
3960 3966 frame_dma_attr.dma_attr_align = 0x10;
3961 3967 frame_dma_attr.dma_attr_sgllen = 1;
3962 3968
3963 3969 if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
3964 3970 &p->m_dma_hdl) != DDI_SUCCESS) {
3965 3971 mptsas_log(mpt, CE_WARN, "Unable to allocate dma handle for"
3966 3972 " extra SGL.");
3967 3973 return (DDI_FAILURE);
3968 3974 }
3969 3975
3970 3976 mem_size = (mpt->m_max_request_frames - 1) * mpt->m_req_frame_size;
3971 3977
3972 3978 if (ddi_dma_mem_alloc(p->m_dma_hdl, mem_size, &mpt->m_dev_acc_attr,
3973 3979 DDI_DMA_CONSISTENT, callback, NULL, (caddr_t *)&p->m_frames_addr,
3974 3980 &alloc_len, &p->m_acc_hdl) != DDI_SUCCESS) {
3975 3981 ddi_dma_free_handle(&p->m_dma_hdl);
3976 3982 p->m_dma_hdl = NULL;
3977 3983 mptsas_log(mpt, CE_WARN, "Unable to allocate dma memory for"
3978 3984 " extra SGL.");
3979 3985 return (DDI_FAILURE);
3980 3986 }
3981 3987
3982 3988 if (ddi_dma_addr_bind_handle(p->m_dma_hdl, NULL, p->m_frames_addr,
3983 3989 alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
3984 3990 &cookie, &ncookie) != DDI_DMA_MAPPED) {
3985 3991 (void) ddi_dma_mem_free(&p->m_acc_hdl);
3986 3992 ddi_dma_free_handle(&p->m_dma_hdl);
3987 3993 p->m_dma_hdl = NULL;
3988 3994 mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources for"
3989 3995 " extra SGL");
3990 3996 return (DDI_FAILURE);
3991 3997 }
3992 3998
3993 3999 /*
3994 4000 * Store the SGL memory address. This chip uses this
3995 4001 * address to dma to and from the driver. The second
3996 4002 * address is the address mpt uses to fill in the SGL.
3997 4003 */
3998 4004 p->m_phys_addr = cookie.dmac_address;
3999 4005
4000 4006 return (DDI_SUCCESS);
4001 4007 }
4002 4008
4003 4009 static void
4004 4010 mptsas_cache_frames_destructor(void *buf, void *cdrarg)
4005 4011 {
4006 4012 #ifndef __lock_lint
4007 4013 _NOTE(ARGUNUSED(cdrarg))
4008 4014 #endif
4009 4015 mptsas_cache_frames_t *p = buf;
4010 4016 if (p->m_dma_hdl != NULL) {
4011 4017 (void) ddi_dma_unbind_handle(p->m_dma_hdl);
4012 4018 (void) ddi_dma_mem_free(&p->m_acc_hdl);
4013 4019 ddi_dma_free_handle(&p->m_dma_hdl);
4014 4020 p->m_phys_addr = NULL;
4015 4021 p->m_frames_addr = NULL;
4016 4022 p->m_dma_hdl = NULL;
4017 4023 p->m_acc_hdl = NULL;
4018 4024 }
4019 4025
4020 4026 }
4021 4027
4022 4028 /*
4023 4029 * allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
4024 4030 * for non-standard length cdb, pkt_private, status areas
4025 4031 * if allocation fails, then deallocate all external space and the pkt
4026 4032 */
4027 4033 /* ARGSUSED */
4028 4034 static int
4029 4035 mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
4030 4036 int cmdlen, int tgtlen, int statuslen, int kf)
4031 4037 {
4032 4038 caddr_t cdbp, scbp, tgt;
4033 4039 int (*callback)(caddr_t) = (kf == KM_SLEEP) ?
4034 4040 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
4035 4041 struct scsi_address ap;
4036 4042 size_t senselength;
4037 4043 ddi_dma_attr_t ext_arq_dma_attr;
4038 4044 uint_t cookiec;
4039 4045
4040 4046 NDBG3(("mptsas_pkt_alloc_extern: "
4041 4047 "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
4042 4048 (void *)cmd, cmdlen, tgtlen, statuslen, kf));
4043 4049
4044 4050 tgt = cdbp = scbp = NULL;
4045 4051 cmd->cmd_scblen = statuslen;
4046 4052 cmd->cmd_privlen = (uchar_t)tgtlen;
4047 4053
4048 4054 if (cmdlen > sizeof (cmd->cmd_cdb)) {
4049 4055 if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
4050 4056 goto fail;
4051 4057 }
4052 4058 cmd->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
4053 4059 cmd->cmd_flags |= CFLAG_CDBEXTERN;
4054 4060 }
4055 4061 if (tgtlen > PKT_PRIV_LEN) {
4056 4062 if ((tgt = kmem_zalloc((size_t)tgtlen, kf)) == NULL) {
4057 4063 goto fail;
4058 4064 }
4059 4065 cmd->cmd_flags |= CFLAG_PRIVEXTERN;
4060 4066 cmd->cmd_pkt->pkt_private = tgt;
4061 4067 }
4062 4068 if (statuslen > EXTCMDS_STATUS_SIZE) {
4063 4069 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
4064 4070 goto fail;
4065 4071 }
4066 4072 cmd->cmd_flags |= CFLAG_SCBEXTERN;
4067 4073 cmd->cmd_pkt->pkt_scbp = (opaque_t)scbp;
4068 4074
4069 4075 /* allocate sense data buf for DMA */
4070 4076
4071 4077 senselength = statuslen - MPTSAS_GET_ITEM_OFF(
4072 4078 struct scsi_arq_status, sts_sensedata);
4073 4079 cmd->cmd_rqslen = (uchar_t)senselength;
4074 4080
4075 4081 ap.a_hba_tran = mpt->m_tran;
4076 4082 ap.a_target = 0;
4077 4083 ap.a_lun = 0;
4078 4084
4079 4085 cmd->cmd_ext_arq_buf = scsi_alloc_consistent_buf(&ap,
4080 4086 (struct buf *)NULL, senselength, B_READ,
4081 4087 callback, NULL);
4082 4088
4083 4089 if (cmd->cmd_ext_arq_buf == NULL) {
4084 4090 goto fail;
4085 4091 }
4086 4092 /*
4087 4093 * allocate a extern arq handle and bind the buf
4088 4094 */
4089 4095 ext_arq_dma_attr = mpt->m_msg_dma_attr;
4090 4096 ext_arq_dma_attr.dma_attr_sgllen = 1;
4091 4097 if ((ddi_dma_alloc_handle(mpt->m_dip,
4092 4098 &ext_arq_dma_attr, callback,
4093 4099 NULL, &cmd->cmd_ext_arqhandle)) != DDI_SUCCESS) {
4094 4100 goto fail;
4095 4101 }
4096 4102
4097 4103 if (ddi_dma_buf_bind_handle(cmd->cmd_ext_arqhandle,
4098 4104 cmd->cmd_ext_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
4099 4105 callback, NULL, &cmd->cmd_ext_arqcookie,
4100 4106 &cookiec)
4101 4107 != DDI_SUCCESS) {
4102 4108 goto fail;
4103 4109 }
4104 4110 cmd->cmd_flags |= CFLAG_EXTARQBUFVALID;
4105 4111 }
4106 4112 return (0);
4107 4113 fail:
4108 4114 mptsas_pkt_destroy_extern(mpt, cmd);
4109 4115 return (1);
4110 4116 }
4111 4117
4112 4118 /*
4113 4119 * deallocate external pkt space and deallocate the pkt
4114 4120 */
4115 4121 static void
4116 4122 mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd)
4117 4123 {
4118 4124 NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd));
4119 4125
4120 4126 if (cmd->cmd_flags & CFLAG_FREE) {
4121 4127 mptsas_log(mpt, CE_PANIC,
4122 4128 "mptsas_pkt_destroy_extern: freeing free packet");
4123 4129 _NOTE(NOT_REACHED)
4124 4130 /* NOTREACHED */
4125 4131 }
4126 4132 if (cmd->cmd_flags & CFLAG_CDBEXTERN) {
4127 4133 kmem_free(cmd->cmd_pkt->pkt_cdbp, (size_t)cmd->cmd_cdblen);
4128 4134 }
4129 4135 if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
4130 4136 kmem_free(cmd->cmd_pkt->pkt_scbp, (size_t)cmd->cmd_scblen);
4131 4137 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4132 4138 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4133 4139 }
4134 4140 if (cmd->cmd_ext_arqhandle) {
4135 4141 ddi_dma_free_handle(&cmd->cmd_ext_arqhandle);
4136 4142 cmd->cmd_ext_arqhandle = NULL;
4137 4143 }
4138 4144 if (cmd->cmd_ext_arq_buf)
4139 4145 scsi_free_consistent_buf(cmd->cmd_ext_arq_buf);
4140 4146 }
4141 4147 if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
4142 4148 kmem_free(cmd->cmd_pkt->pkt_private, (size_t)cmd->cmd_privlen);
4143 4149 }
4144 4150 cmd->cmd_flags = CFLAG_FREE;
4145 4151 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4146 4152 }
4147 4153
4148 4154 /*
4149 4155 * tran_sync_pkt(9E) - explicit DMA synchronization
4150 4156 */
4151 4157 /*ARGSUSED*/
4152 4158 static void
4153 4159 mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4154 4160 {
4155 4161 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4156 4162
4157 4163 NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
4158 4164 ap->a_target, (void *)pkt));
4159 4165
4160 4166 if (cmd->cmd_dmahandle) {
4161 4167 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4162 4168 (cmd->cmd_flags & CFLAG_DMASEND) ?
4163 4169 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
4164 4170 }
4165 4171 }
4166 4172
4167 4173 /*
4168 4174 * tran_dmafree(9E) - deallocate DMA resources allocated for command
4169 4175 */
4170 4176 /*ARGSUSED*/
4171 4177 static void
4172 4178 mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
4173 4179 {
4174 4180 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4175 4181 mptsas_t *mpt = ADDR2MPT(ap);
4176 4182
4177 4183 NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
4178 4184 ap->a_target, (void *)pkt));
4179 4185
4180 4186 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4181 4187 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4182 4188 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4183 4189 }
4184 4190
4185 4191 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4186 4192 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4187 4193 cmd->cmd_flags &= ~CFLAG_EXTARQBUFVALID;
4188 4194 }
4189 4195
4190 4196 mptsas_free_extra_sgl_frame(mpt, cmd);
4191 4197 }
4192 4198
4193 4199 static void
4194 4200 mptsas_pkt_comp(struct scsi_pkt *pkt, mptsas_cmd_t *cmd)
4195 4201 {
4196 4202 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
4197 4203 (!(cmd->cmd_flags & CFLAG_DMASEND))) {
4198 4204 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4199 4205 DDI_DMA_SYNC_FORCPU);
4200 4206 }
4201 4207 (*pkt->pkt_comp)(pkt);
4202 4208 }
4203 4209
4204 4210 static void
4205 4211 mptsas_sge_mainframe(mptsas_cmd_t *cmd, pMpi2SCSIIORequest_t frame,
4206 4212 ddi_acc_handle_t acc_hdl, uint_t cookiec, uint32_t end_flags)
4207 4213 {
4208 4214 pMpi2SGESimple64_t sge;
4209 4215 mptti_t *dmap;
4210 4216 uint32_t flags;
4211 4217
4212 4218 dmap = cmd->cmd_sg;
4213 4219
4214 4220 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4215 4221 while (cookiec--) {
4216 4222 ddi_put32(acc_hdl,
4217 4223 &sge->Address.Low, dmap->addr.address64.Low);
4218 4224 ddi_put32(acc_hdl,
4219 4225 &sge->Address.High, dmap->addr.address64.High);
4220 4226 ddi_put32(acc_hdl, &sge->FlagsLength,
4221 4227 dmap->count);
4222 4228 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4223 4229 flags |= ((uint32_t)
4224 4230 (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4225 4231 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4226 4232 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4227 4233 MPI2_SGE_FLAGS_SHIFT);
4228 4234
4229 4235 /*
4230 4236 * If this is the last cookie, we set the flags
4231 4237 * to indicate so
4232 4238 */
4233 4239 if (cookiec == 0) {
4234 4240 flags |= end_flags;
4235 4241 }
4236 4242 if (cmd->cmd_flags & CFLAG_DMASEND) {
4237 4243 flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4238 4244 MPI2_SGE_FLAGS_SHIFT);
4239 4245 } else {
4240 4246 flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4241 4247 MPI2_SGE_FLAGS_SHIFT);
4242 4248 }
4243 4249 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4244 4250 dmap++;
4245 4251 sge++;
4246 4252 }
4247 4253 }
4248 4254
4249 4255 static void
4250 4256 mptsas_sge_chain(mptsas_t *mpt, mptsas_cmd_t *cmd,
4251 4257 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4252 4258 {
4253 4259 pMpi2SGESimple64_t sge;
4254 4260 pMpi2SGEChain64_t sgechain;
4255 4261 uint_t cookiec;
4256 4262 mptti_t *dmap;
4257 4263 uint32_t flags;
4258 4264
4259 4265 /*
4260 4266 * Save the number of entries in the DMA
4261 4267 * Scatter/Gather list
4262 4268 */
4263 4269 cookiec = cmd->cmd_cookiec;
4264 4270
4265 4271 /*
4266 4272 * Hereby we start to deal with multiple frames.
4267 4273 * The process is as follows:
4268 4274 * 1. Determine how many frames are needed for SGL element
4269 4275 * storage; Note that all frames are stored in contiguous
4270 4276 * memory space and in 64-bit DMA mode each element is
4271 4277 * 3 double-words (12 bytes) long.
4272 4278 * 2. Fill up the main frame. We need to do this separately
4273 4279 * since it contains the SCSI IO request header and needs
4274 4280 * dedicated processing. Note that the last 4 double-words
4275 4281 * of the SCSI IO header is for SGL element storage
4276 4282 * (MPI2_SGE_IO_UNION).
4277 4283 * 3. Fill the chain element in the main frame, so the DMA
4278 4284 * engine can use the following frames.
4279 4285 * 4. Enter a loop to fill the remaining frames. Note that the
4280 4286 * last frame contains no chain element. The remaining
4281 4287 * frames go into the mpt SGL buffer allocated on the fly,
4282 4288 * not immediately following the main message frame, as in
4283 4289 * Gen1.
4284 4290 * Some restrictions:
4285 4291 * 1. For 64-bit DMA, the simple element and chain element
4286 4292 * are both of 3 double-words (12 bytes) in size, even
4287 4293 * though all frames are stored in the first 4G of mem
4288 4294 * range and the higher 32-bits of the address are always 0.
4289 4295 * 2. On some controllers (like the 1064/1068), a frame can
4290 4296 * hold SGL elements with the last 1 or 2 double-words
4291 4297 * (4 or 8 bytes) un-used. On these controllers, we should
4292 4298 * recognize that there's not enough room for another SGL
4293 4299 * element and move the sge pointer to the next frame.
4294 4300 */
4295 4301 int i, j, k, l, frames, sgemax;
4296 4302 int temp;
4297 4303 uint8_t chainflags;
4298 4304 uint16_t chainlength;
4299 4305 mptsas_cache_frames_t *p;
4300 4306
4301 4307 /*
4302 4308 * Sgemax is the number of SGE's that will fit
4303 4309 * each extra frame and frames is total
4304 4310 * number of frames we'll need. 1 sge entry per
4305 4311 * frame is reseverd for the chain element thus the -1 below.
4306 4312 */
4307 4313 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64))
4308 4314 - 1);
4309 4315 temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4310 4316
4311 4317 /*
4312 4318 * A little check to see if we need to round up the number
4313 4319 * of frames we need
4314 4320 */
4315 4321 if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4316 4322 sgemax) > 1) {
4317 4323 frames = (temp + 1);
4318 4324 } else {
4319 4325 frames = temp;
4320 4326 }
4321 4327 dmap = cmd->cmd_sg;
4322 4328 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4323 4329
4324 4330 /*
4325 4331 * First fill in the main frame
4326 4332 */
4327 4333 j = MPTSAS_MAX_FRAME_SGES64(mpt) - 1;
4328 4334 mptsas_sge_mainframe(cmd, frame, acc_hdl, j,
4329 4335 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4330 4336 MPI2_SGE_FLAGS_SHIFT));
4331 4337 dmap += j;
4332 4338 sge += j;
4333 4339 j++;
4334 4340
4335 4341 /*
4336 4342 * Fill in the chain element in the main frame.
4337 4343 * About calculation on ChainOffset:
4338 4344 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4339 4345 * in the end reserved for SGL element storage
4340 4346 * (MPI2_SGE_IO_UNION); we should count it in our
4341 4347 * calculation. See its definition in the header file.
4342 4348 * 2. Constant j is the counter of the current SGL element
4343 4349 * that will be processed, and (j - 1) is the number of
4344 4350 * SGL elements that have been processed (stored in the
4345 4351 * main frame).
4346 4352 * 3. ChainOffset value should be in units of double-words (4
4347 4353 * bytes) so the last value should be divided by 4.
4348 4354 */
4349 4355 ddi_put8(acc_hdl, &frame->ChainOffset,
4350 4356 (sizeof (MPI2_SCSI_IO_REQUEST) -
4351 4357 sizeof (MPI2_SGE_IO_UNION) +
4352 4358 (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4353 4359 sgechain = (pMpi2SGEChain64_t)sge;
4354 4360 chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4355 4361 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4356 4362 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4357 4363 ddi_put8(acc_hdl, &sgechain->Flags, chainflags);
4358 4364
4359 4365 /*
4360 4366 * The size of the next frame is the accurate size of space
4361 4367 * (in bytes) used to store the SGL elements. j is the counter
4362 4368 * of SGL elements. (j - 1) is the number of SGL elements that
4363 4369 * have been processed (stored in frames).
4364 4370 */
4365 4371 if (frames >= 2) {
4366 4372 ASSERT(mpt->m_req_frame_size >= sizeof (MPI2_SGE_SIMPLE64));
4367 4373 chainlength = mpt->m_req_frame_size /
4368 4374 sizeof (MPI2_SGE_SIMPLE64) *
4369 4375 sizeof (MPI2_SGE_SIMPLE64);
4370 4376 } else {
4371 4377 chainlength = ((cookiec - (j - 1)) *
4372 4378 sizeof (MPI2_SGE_SIMPLE64));
4373 4379 }
4374 4380
4375 4381 p = cmd->cmd_extra_frames;
4376 4382
4377 4383 ddi_put16(acc_hdl, &sgechain->Length, chainlength);
4378 4384 ddi_put32(acc_hdl, &sgechain->Address.Low,
4379 4385 p->m_phys_addr);
4380 4386 /* SGL is allocated in the first 4G mem range */
4381 4387 ddi_put32(acc_hdl, &sgechain->Address.High, 0);
4382 4388
4383 4389 /*
4384 4390 * If there are more than 2 frames left we have to
4385 4391 * fill in the next chain offset to the location of
4386 4392 * the chain element in the next frame.
4387 4393 * sgemax is the number of simple elements in an extra
4388 4394 * frame. Note that the value NextChainOffset should be
4389 4395 * in double-words (4 bytes).
4390 4396 */
4391 4397 if (frames >= 2) {
4392 4398 ddi_put8(acc_hdl, &sgechain->NextChainOffset,
4393 4399 (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4394 4400 } else {
4395 4401 ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0);
4396 4402 }
4397 4403
4398 4404 /*
4399 4405 * Jump to next frame;
4400 4406 * Starting here, chain buffers go into the per command SGL.
4401 4407 * This buffer is allocated when chain buffers are needed.
4402 4408 */
4403 4409 sge = (pMpi2SGESimple64_t)p->m_frames_addr;
4404 4410 i = cookiec;
4405 4411
4406 4412 /*
4407 4413 * Start filling in frames with SGE's. If we
4408 4414 * reach the end of frame and still have SGE's
4409 4415 * to fill we need to add a chain element and
4410 4416 * use another frame. j will be our counter
4411 4417 * for what cookie we are at and i will be
4412 4418 * the total cookiec. k is the current frame
4413 4419 */
4414 4420 for (k = 1; k <= frames; k++) {
4415 4421 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4416 4422
4417 4423 /*
4418 4424 * If we have reached the end of frame
4419 4425 * and we have more SGE's to fill in
4420 4426 * we have to fill the final entry
4421 4427 * with a chain element and then
4422 4428 * continue to the next frame
4423 4429 */
4424 4430 if ((l == (sgemax + 1)) && (k != frames)) {
4425 4431 sgechain = (pMpi2SGEChain64_t)sge;
4426 4432 j--;
4427 4433 chainflags = (
4428 4434 MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4429 4435 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4430 4436 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4431 4437 ddi_put8(p->m_acc_hdl,
4432 4438 &sgechain->Flags, chainflags);
4433 4439 /*
4434 4440 * k is the frame counter and (k + 1)
4435 4441 * is the number of the next frame.
4436 4442 * Note that frames are in contiguous
4437 4443 * memory space.
4438 4444 */
4439 4445 ddi_put32(p->m_acc_hdl,
4440 4446 &sgechain->Address.Low,
4441 4447 (p->m_phys_addr +
4442 4448 (mpt->m_req_frame_size * k)));
4443 4449 ddi_put32(p->m_acc_hdl,
4444 4450 &sgechain->Address.High, 0);
4445 4451
4446 4452 /*
4447 4453 * If there are more than 2 frames left
4448 4454 * we have to next chain offset to
4449 4455 * the location of the chain element
4450 4456 * in the next frame and fill in the
4451 4457 * length of the next chain
4452 4458 */
4453 4459 if ((frames - k) >= 2) {
4454 4460 ddi_put8(p->m_acc_hdl,
4455 4461 &sgechain->NextChainOffset,
4456 4462 (sgemax *
4457 4463 sizeof (MPI2_SGE_SIMPLE64))
4458 4464 >> 2);
4459 4465 ddi_put16(p->m_acc_hdl,
4460 4466 &sgechain->Length,
4461 4467 mpt->m_req_frame_size /
4462 4468 sizeof (MPI2_SGE_SIMPLE64) *
4463 4469 sizeof (MPI2_SGE_SIMPLE64));
4464 4470 } else {
4465 4471 /*
4466 4472 * This is the last frame. Set
4467 4473 * the NextChainOffset to 0 and
4468 4474 * Length is the total size of
4469 4475 * all remaining simple elements
4470 4476 */
4471 4477 ddi_put8(p->m_acc_hdl,
4472 4478 &sgechain->NextChainOffset,
4473 4479 0);
4474 4480 ddi_put16(p->m_acc_hdl,
4475 4481 &sgechain->Length,
4476 4482 (cookiec - j) *
4477 4483 sizeof (MPI2_SGE_SIMPLE64));
4478 4484 }
4479 4485
4480 4486 /* Jump to the next frame */
4481 4487 sge = (pMpi2SGESimple64_t)
4482 4488 ((char *)p->m_frames_addr +
4483 4489 (int)mpt->m_req_frame_size * k);
4484 4490
4485 4491 continue;
4486 4492 }
4487 4493
4488 4494 ddi_put32(p->m_acc_hdl,
4489 4495 &sge->Address.Low,
4490 4496 dmap->addr.address64.Low);
4491 4497 ddi_put32(p->m_acc_hdl,
4492 4498 &sge->Address.High,
4493 4499 dmap->addr.address64.High);
4494 4500 ddi_put32(p->m_acc_hdl,
4495 4501 &sge->FlagsLength, dmap->count);
4496 4502 flags = ddi_get32(p->m_acc_hdl,
4497 4503 &sge->FlagsLength);
4498 4504 flags |= ((uint32_t)(
4499 4505 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4500 4506 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4501 4507 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4502 4508 MPI2_SGE_FLAGS_SHIFT);
4503 4509
4504 4510 /*
4505 4511 * If we are at the end of the frame and
4506 4512 * there is another frame to fill in
4507 4513 * we set the last simple element as last
4508 4514 * element
4509 4515 */
4510 4516 if ((l == sgemax) && (k != frames)) {
4511 4517 flags |= ((uint32_t)
4512 4518 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4513 4519 MPI2_SGE_FLAGS_SHIFT);
4514 4520 }
4515 4521
4516 4522 /*
4517 4523 * If this is the final cookie we
4518 4524 * indicate it by setting the flags
4519 4525 */
4520 4526 if (j == i) {
4521 4527 flags |= ((uint32_t)
4522 4528 (MPI2_SGE_FLAGS_LAST_ELEMENT |
4523 4529 MPI2_SGE_FLAGS_END_OF_BUFFER |
4524 4530 MPI2_SGE_FLAGS_END_OF_LIST) <<
4525 4531 MPI2_SGE_FLAGS_SHIFT);
4526 4532 }
4527 4533 if (cmd->cmd_flags & CFLAG_DMASEND) {
4528 4534 flags |=
4529 4535 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4530 4536 MPI2_SGE_FLAGS_SHIFT);
4531 4537 } else {
4532 4538 flags |=
4533 4539 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4534 4540 MPI2_SGE_FLAGS_SHIFT);
4535 4541 }
4536 4542 ddi_put32(p->m_acc_hdl,
4537 4543 &sge->FlagsLength, flags);
4538 4544 dmap++;
4539 4545 sge++;
4540 4546 }
4541 4547 }
4542 4548
4543 4549 /*
4544 4550 * Sync DMA with the chain buffers that were just created
4545 4551 */
4546 4552 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4547 4553 }
4548 4554
4549 4555 static void
4550 4556 mptsas_ieee_sge_mainframe(mptsas_cmd_t *cmd, pMpi2SCSIIORequest_t frame,
4551 4557 ddi_acc_handle_t acc_hdl, uint_t cookiec, uint8_t end_flag)
4552 4558 {
4553 4559 pMpi2IeeeSgeSimple64_t ieeesge;
4554 4560 mptti_t *dmap;
4555 4561 uint8_t flags;
4556 4562
4557 4563 dmap = cmd->cmd_sg;
4558 4564
4559 4565 NDBG1(("mptsas_ieee_sge_mainframe: cookiec=%d, %s", cookiec,
4560 4566 cmd->cmd_flags & CFLAG_DMASEND?"Out":"In"));
4561 4567
4562 4568 ieeesge = (pMpi2IeeeSgeSimple64_t)(&frame->SGL);
4563 4569 while (cookiec--) {
4564 4570 ddi_put32(acc_hdl,
4565 4571 &ieeesge->Address.Low, dmap->addr.address64.Low);
4566 4572 ddi_put32(acc_hdl,
4567 4573 &ieeesge->Address.High, dmap->addr.address64.High);
4568 4574 ddi_put32(acc_hdl, &ieeesge->Length,
4569 4575 dmap->count);
4570 4576 NDBG1(("mptsas_ieee_sge_mainframe: len=%d", dmap->count));
4571 4577 flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
4572 4578 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4573 4579
4574 4580 /*
4575 4581 * If this is the last cookie, we set the flags
4576 4582 * to indicate so
4577 4583 */
4578 4584 if (cookiec == 0) {
4579 4585 flags |= end_flag;
4580 4586 }
4581 4587
4582 4588 ddi_put8(acc_hdl, &ieeesge->Flags, flags);
4583 4589 dmap++;
4584 4590 ieeesge++;
4585 4591 }
4586 4592 }
4587 4593
4588 4594 static void
4589 4595 mptsas_ieee_sge_chain(mptsas_t *mpt, mptsas_cmd_t *cmd,
4590 4596 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4591 4597 {
4592 4598 pMpi2IeeeSgeSimple64_t ieeesge;
4593 4599 pMpi25IeeeSgeChain64_t ieeesgechain;
4594 4600 uint_t cookiec;
4595 4601 mptti_t *dmap;
4596 4602 uint8_t flags;
4597 4603
4598 4604 /*
4599 4605 * Save the number of entries in the DMA
4600 4606 * Scatter/Gather list
4601 4607 */
4602 4608 cookiec = cmd->cmd_cookiec;
4603 4609
4604 4610 NDBG1(("mptsas_ieee_sge_chain: cookiec=%d", cookiec));
4605 4611
4606 4612 /*
4607 4613 * Hereby we start to deal with multiple frames.
4608 4614 * The process is as follows:
4609 4615 * 1. Determine how many frames are needed for SGL element
4610 4616 * storage; Note that all frames are stored in contiguous
4611 4617 * memory space and in 64-bit DMA mode each element is
4612 4618 * 4 double-words (16 bytes) long.
4613 4619 * 2. Fill up the main frame. We need to do this separately
4614 4620 * since it contains the SCSI IO request header and needs
4615 4621 * dedicated processing. Note that the last 4 double-words
4616 4622 * of the SCSI IO header is for SGL element storage
4617 4623 * (MPI2_SGE_IO_UNION).
4618 4624 * 3. Fill the chain element in the main frame, so the DMA
4619 4625 * engine can use the following frames.
4620 4626 * 4. Enter a loop to fill the remaining frames. Note that the
4621 4627 * last frame contains no chain element. The remaining
4622 4628 * frames go into the mpt SGL buffer allocated on the fly,
4623 4629 * not immediately following the main message frame, as in
4624 4630 * Gen1.
4625 4631 * Restrictions:
4626 4632 * For 64-bit DMA, the simple element and chain element
4627 4633 * are both of 4 double-words (16 bytes) in size, even
4628 4634 * though all frames are stored in the first 4G of mem
4629 4635 * range and the higher 32-bits of the address are always 0.
4630 4636 */
4631 4637 int i, j, k, l, frames, sgemax;
4632 4638 int temp;
4633 4639 uint8_t chainflags;
4634 4640 uint32_t chainlength;
4635 4641 mptsas_cache_frames_t *p;
4636 4642
4637 4643 /*
4638 4644 * Sgemax is the number of SGE's that will fit
4639 4645 * each extra frame and frames is total
4640 4646 * number of frames we'll need. 1 sge entry per
4641 4647 * frame is reseverd for the chain element thus the -1 below.
4642 4648 */
4643 4649 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_IEEE_SGE_SIMPLE64))
4644 4650 - 1);
4645 4651 temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4646 4652
4647 4653 /*
4648 4654 * A little check to see if we need to round up the number
4649 4655 * of frames we need
4650 4656 */
4651 4657 if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4652 4658 sgemax) > 1) {
4653 4659 frames = (temp + 1);
4654 4660 } else {
4655 4661 frames = temp;
4656 4662 }
4657 4663 NDBG1(("mptsas_ieee_sge_chain: temp=%d, frames=%d", temp, frames));
4658 4664 dmap = cmd->cmd_sg;
4659 4665 ieeesge = (pMpi2IeeeSgeSimple64_t)(&frame->SGL);
4660 4666
4661 4667 /*
4662 4668 * First fill in the main frame
4663 4669 */
4664 4670 j = MPTSAS_MAX_FRAME_SGES64(mpt) - 1;
4665 4671 mptsas_ieee_sge_mainframe(cmd, frame, acc_hdl, j, 0);
4666 4672 dmap += j;
4667 4673 ieeesge += j;
4668 4674 j++;
4669 4675
4670 4676 /*
4671 4677 * Fill in the chain element in the main frame.
4672 4678 * About calculation on ChainOffset:
4673 4679 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4674 4680 * in the end reserved for SGL element storage
4675 4681 * (MPI2_SGE_IO_UNION); we should count it in our
4676 4682 * calculation. See its definition in the header file.
4677 4683 * 2. Constant j is the counter of the current SGL element
4678 4684 * that will be processed, and (j - 1) is the number of
4679 4685 * SGL elements that have been processed (stored in the
4680 4686 * main frame).
4681 4687 * 3. ChainOffset value should be in units of quad-words (16
4682 4688 * bytes) so the last value should be divided by 16.
4683 4689 */
4684 4690 ddi_put8(acc_hdl, &frame->ChainOffset,
4685 4691 (sizeof (MPI2_SCSI_IO_REQUEST) -
4686 4692 sizeof (MPI2_SGE_IO_UNION) +
4687 4693 (j - 1) * sizeof (MPI2_IEEE_SGE_SIMPLE64)) >> 4);
4688 4694 ieeesgechain = (pMpi25IeeeSgeChain64_t)ieeesge;
4689 4695 chainflags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
4690 4696 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4691 4697 ddi_put8(acc_hdl, &ieeesgechain->Flags, chainflags);
4692 4698
4693 4699 /*
4694 4700 * The size of the next frame is the accurate size of space
4695 4701 * (in bytes) used to store the SGL elements. j is the counter
4696 4702 * of SGL elements. (j - 1) is the number of SGL elements that
4697 4703 * have been processed (stored in frames).
4698 4704 */
4699 4705 if (frames >= 2) {
4700 4706 ASSERT(mpt->m_req_frame_size >=
4701 4707 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4702 4708 chainlength = mpt->m_req_frame_size /
4703 4709 sizeof (MPI2_IEEE_SGE_SIMPLE64) *
4704 4710 sizeof (MPI2_IEEE_SGE_SIMPLE64);
4705 4711 } else {
4706 4712 chainlength = ((cookiec - (j - 1)) *
4707 4713 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4708 4714 }
4709 4715
4710 4716 p = cmd->cmd_extra_frames;
4711 4717
4712 4718 ddi_put32(acc_hdl, &ieeesgechain->Length, chainlength);
4713 4719 ddi_put32(acc_hdl, &ieeesgechain->Address.Low,
4714 4720 p->m_phys_addr);
4715 4721 /* SGL is allocated in the first 4G mem range */
4716 4722 ddi_put32(acc_hdl, &ieeesgechain->Address.High, 0);
4717 4723
4718 4724 /*
4719 4725 * If there are more than 2 frames left we have to
4720 4726 * fill in the next chain offset to the location of
4721 4727 * the chain element in the next frame.
4722 4728 * sgemax is the number of simple elements in an extra
4723 4729 * frame. Note that the value NextChainOffset should be
4724 4730 * in double-words (4 bytes).
4725 4731 */
4726 4732 if (frames >= 2) {
4727 4733 ddi_put8(acc_hdl, &ieeesgechain->NextChainOffset,
4728 4734 (sgemax * sizeof (MPI2_IEEE_SGE_SIMPLE64)) >> 4);
4729 4735 } else {
4730 4736 ddi_put8(acc_hdl, &ieeesgechain->NextChainOffset, 0);
4731 4737 }
4732 4738
4733 4739 /*
4734 4740 * Jump to next frame;
4735 4741 * Starting here, chain buffers go into the per command SGL.
4736 4742 * This buffer is allocated when chain buffers are needed.
4737 4743 */
4738 4744 ieeesge = (pMpi2IeeeSgeSimple64_t)p->m_frames_addr;
4739 4745 i = cookiec;
4740 4746
4741 4747 /*
4742 4748 * Start filling in frames with SGE's. If we
4743 4749 * reach the end of frame and still have SGE's
4744 4750 * to fill we need to add a chain element and
4745 4751 * use another frame. j will be our counter
4746 4752 * for what cookie we are at and i will be
4747 4753 * the total cookiec. k is the current frame
4748 4754 */
4749 4755 for (k = 1; k <= frames; k++) {
4750 4756 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4751 4757
4752 4758 /*
4753 4759 * If we have reached the end of frame
4754 4760 * and we have more SGE's to fill in
4755 4761 * we have to fill the final entry
4756 4762 * with a chain element and then
4757 4763 * continue to the next frame
4758 4764 */
4759 4765 if ((l == (sgemax + 1)) && (k != frames)) {
4760 4766 ieeesgechain = (pMpi25IeeeSgeChain64_t)ieeesge;
4761 4767 j--;
4762 4768 chainflags =
4763 4769 MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
4764 4770 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
4765 4771 ddi_put8(p->m_acc_hdl,
4766 4772 &ieeesgechain->Flags, chainflags);
4767 4773 /*
4768 4774 * k is the frame counter and (k + 1)
4769 4775 * is the number of the next frame.
4770 4776 * Note that frames are in contiguous
4771 4777 * memory space.
4772 4778 */
4773 4779 ddi_put32(p->m_acc_hdl,
4774 4780 &ieeesgechain->Address.Low,
4775 4781 (p->m_phys_addr +
4776 4782 (mpt->m_req_frame_size * k)));
4777 4783 ddi_put32(p->m_acc_hdl,
4778 4784 &ieeesgechain->Address.High, 0);
4779 4785
4780 4786 /*
4781 4787 * If there are more than 2 frames left
4782 4788 * we have to next chain offset to
4783 4789 * the location of the chain element
4784 4790 * in the next frame and fill in the
4785 4791 * length of the next chain
4786 4792 */
4787 4793 if ((frames - k) >= 2) {
4788 4794 ddi_put8(p->m_acc_hdl,
4789 4795 &ieeesgechain->NextChainOffset,
4790 4796 (sgemax *
4791 4797 sizeof (MPI2_IEEE_SGE_SIMPLE64))
4792 4798 >> 4);
4793 4799 ASSERT(mpt->m_req_frame_size >=
4794 4800 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4795 4801 ddi_put32(p->m_acc_hdl,
4796 4802 &ieeesgechain->Length,
4797 4803 mpt->m_req_frame_size /
4798 4804 sizeof (MPI2_IEEE_SGE_SIMPLE64) *
4799 4805 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4800 4806 } else {
4801 4807 /*
4802 4808 * This is the last frame. Set
4803 4809 * the NextChainOffset to 0 and
4804 4810 * Length is the total size of
4805 4811 * all remaining simple elements
4806 4812 */
4807 4813 ddi_put8(p->m_acc_hdl,
4808 4814 &ieeesgechain->NextChainOffset,
4809 4815 0);
4810 4816 ddi_put32(p->m_acc_hdl,
4811 4817 &ieeesgechain->Length,
4812 4818 (cookiec - j) *
4813 4819 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4814 4820 }
4815 4821
4816 4822 /* Jump to the next frame */
4817 4823 ieeesge = (pMpi2IeeeSgeSimple64_t)
4818 4824 ((char *)p->m_frames_addr +
4819 4825 (int)mpt->m_req_frame_size * k);
4820 4826
4821 4827 continue;
4822 4828 }
4823 4829
4824 4830 ddi_put32(p->m_acc_hdl,
4825 4831 &ieeesge->Address.Low,
4826 4832 dmap->addr.address64.Low);
4827 4833 ddi_put32(p->m_acc_hdl,
4828 4834 &ieeesge->Address.High,
4829 4835 dmap->addr.address64.High);
4830 4836 ddi_put32(p->m_acc_hdl,
4831 4837 &ieeesge->Length, dmap->count);
4832 4838 flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
4833 4839 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4834 4840
4835 4841 /*
4836 4842 * If we are at the end of the frame and
4837 4843 * there is another frame to fill in
4838 4844 * do we need to do anything?
4839 4845 * if ((l == sgemax) && (k != frames)) {
4840 4846 * }
4841 4847 */
4842 4848
4843 4849 /*
4844 4850 * If this is the final cookie set end of list.
4845 4851 */
4846 4852 if (j == i) {
4847 4853 flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
4848 4854 }
4849 4855
4850 4856 ddi_put8(p->m_acc_hdl, &ieeesge->Flags, flags);
4851 4857 dmap++;
4852 4858 ieeesge++;
4853 4859 }
4854 4860 }
4855 4861
4856 4862 /*
4857 4863 * Sync DMA with the chain buffers that were just created
4858 4864 */
4859 4865 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4860 4866 }
4861 4867
4862 4868 static void
4863 4869 mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control,
4864 4870 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4865 4871 {
4866 4872 ASSERT(cmd->cmd_flags & CFLAG_DMAVALID);
4867 4873
4868 4874 NDBG1(("mptsas_sge_setup: cookiec=%d", cmd->cmd_cookiec));
4869 4875
4870 4876 /*
4871 4877 * Set read/write bit in control.
4872 4878 */
4873 4879 if (cmd->cmd_flags & CFLAG_DMASEND) {
4874 4880 *control |= MPI2_SCSIIO_CONTROL_WRITE;
4875 4881 } else {
4876 4882 *control |= MPI2_SCSIIO_CONTROL_READ;
4877 4883 }
4878 4884
4879 4885 ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount);
4880 4886
4881 4887 /*
4882 4888 * We have 4 cases here. First where we can fit all the
4883 4889 * SG elements into the main frame, and the case
4884 4890 * where we can't. The SG element is also different when using
4885 4891 * MPI2.5 interface.
4886 4892 * If we have more cookies than we can attach to a frame
4887 4893 * we will need to use a chain element to point
4888 4894 * a location of memory where the rest of the S/G
4889 4895 * elements reside.
4890 4896 */
4891 4897 if (cmd->cmd_cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) {
4892 4898 if (mpt->m_MPI25) {
4893 4899 mptsas_ieee_sge_mainframe(cmd, frame, acc_hdl,
4894 4900 cmd->cmd_cookiec,
4895 4901 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
4896 4902 } else {
4897 4903 mptsas_sge_mainframe(cmd, frame, acc_hdl,
4898 4904 cmd->cmd_cookiec,
4899 4905 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
4900 4906 | MPI2_SGE_FLAGS_END_OF_BUFFER
4901 4907 | MPI2_SGE_FLAGS_END_OF_LIST) <<
4902 4908 MPI2_SGE_FLAGS_SHIFT));
4903 4909 }
4904 4910 } else {
4905 4911 if (mpt->m_MPI25) {
4906 4912 mptsas_ieee_sge_chain(mpt, cmd, frame, acc_hdl);
4907 4913 } else {
4908 4914 mptsas_sge_chain(mpt, cmd, frame, acc_hdl);
4909 4915 }
4910 4916 }
4911 4917 }
4912 4918
4913 4919 /*
4914 4920 * Interrupt handling
4915 4921 * Utility routine. Poll for status of a command sent to HBA
4916 4922 * without interrupts (a FLAG_NOINTR command).
4917 4923 */
4918 4924 int
4919 4925 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
4920 4926 {
4921 4927 int rval = TRUE;
4922 4928
4923 4929 NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
4924 4930
4925 4931 if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
4926 4932 mptsas_restart_hba(mpt);
4927 4933 }
4928 4934
4929 4935 /*
4930 4936 * Wait, using drv_usecwait(), long enough for the command to
4931 4937 * reasonably return from the target if the target isn't
4932 4938 * "dead". A polled command may well be sent from scsi_poll, and
4933 4939 * there are retries built in to scsi_poll if the transport
4934 4940 * accepted the packet (TRAN_ACCEPT). scsi_poll waits 1 second
4935 4941 * and retries the transport up to scsi_poll_busycnt times
4936 4942 * (currently 60) if
4937 4943 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
4938 4944 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
4939 4945 *
4940 4946 * limit the waiting to avoid a hang in the event that the
4941 4947 * cmd never gets started but we are still receiving interrupts
4942 4948 */
4943 4949 while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
4944 4950 if (mptsas_wait_intr(mpt, polltime) == FALSE) {
4945 4951 NDBG5(("mptsas_poll: command incomplete"));
4946 4952 rval = FALSE;
4947 4953 break;
4948 4954 }
4949 4955 }
4950 4956
4951 4957 if (rval == FALSE) {
4952 4958
4953 4959 /*
4954 4960 * this isn't supposed to happen, the hba must be wedged
4955 4961 * Mark this cmd as a timeout.
4956 4962 */
4957 4963 mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
4958 4964 (STAT_TIMEOUT|STAT_ABORTED));
4959 4965
4960 4966 if (poll_cmd->cmd_queued == FALSE) {
4961 4967
4962 4968 NDBG5(("mptsas_poll: not on waitq"));
4963 4969
4964 4970 poll_cmd->cmd_pkt->pkt_state |=
4965 4971 (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
4966 4972 } else {
4967 4973
4968 4974 /* find and remove it from the waitq */
4969 4975 NDBG5(("mptsas_poll: delete from waitq"));
4970 4976 mptsas_waitq_delete(mpt, poll_cmd);
4971 4977 }
4972 4978
4973 4979 }
4974 4980 mptsas_fma_check(mpt, poll_cmd);
4975 4981 NDBG5(("mptsas_poll: done"));
4976 4982 return (rval);
4977 4983 }
4978 4984
4979 4985 /*
4980 4986 * Used for polling cmds and TM function
4981 4987 */
4982 4988 static int
4983 4989 mptsas_wait_intr(mptsas_t *mpt, int polltime)
4984 4990 {
4985 4991 int cnt;
4986 4992 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
4987 4993 uint32_t int_mask;
4988 4994
4989 4995 NDBG5(("mptsas_wait_intr"));
4990 4996
4991 4997 mpt->m_polled_intr = 1;
4992 4998
4993 4999 /*
4994 5000 * Get the current interrupt mask and disable interrupts. When
4995 5001 * re-enabling ints, set mask to saved value.
4996 5002 */
4997 5003 int_mask = ddi_get32(mpt->m_datap, &mpt->m_reg->HostInterruptMask);
4998 5004 MPTSAS_DISABLE_INTR(mpt);
4999 5005
5000 5006 /*
5001 5007 * Keep polling for at least (polltime * 1000) seconds
5002 5008 */
5003 5009 for (cnt = 0; cnt < polltime; cnt++) {
5004 5010 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5005 5011 DDI_DMA_SYNC_FORCPU);
5006 5012
5007 5013 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5008 5014 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5009 5015
5010 5016 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5011 5017 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5012 5018 ddi_get32(mpt->m_acc_post_queue_hdl,
5013 5019 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5014 5020 drv_usecwait(1000);
5015 5021 continue;
5016 5022 }
5017 5023
5018 5024 /*
5019 5025 * The reply is valid, process it according to its
5020 5026 * type.
5021 5027 */
5022 5028 mptsas_process_intr(mpt, reply_desc_union);
5023 5029
5024 5030 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5025 5031 mpt->m_post_index = 0;
5026 5032 }
5027 5033
5028 5034 /*
5029 5035 * Update the global reply index
5030 5036 */
5031 5037 ddi_put32(mpt->m_datap,
5032 5038 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5033 5039 mpt->m_polled_intr = 0;
5034 5040
5035 5041 /*
5036 5042 * Re-enable interrupts and quit.
5037 5043 */
5038 5044 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask,
5039 5045 int_mask);
5040 5046 return (TRUE);
5041 5047
5042 5048 }
5043 5049
5044 5050 /*
5045 5051 * Clear polling flag, re-enable interrupts and quit.
5046 5052 */
5047 5053 mpt->m_polled_intr = 0;
5048 5054 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask);
5049 5055 return (FALSE);
5050 5056 }
5051 5057
5052 5058 static void
5053 5059 mptsas_handle_scsi_io_success(mptsas_t *mpt,
5054 5060 pMpi2ReplyDescriptorsUnion_t reply_desc)
5055 5061 {
5056 5062 pMpi2SCSIIOSuccessReplyDescriptor_t scsi_io_success;
5057 5063 uint16_t SMID;
5058 5064 mptsas_slots_t *slots = mpt->m_active;
5059 5065 mptsas_cmd_t *cmd = NULL;
5060 5066 struct scsi_pkt *pkt;
5061 5067
5062 5068 ASSERT(mutex_owned(&mpt->m_mutex));
5063 5069
5064 5070 scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
5065 5071 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &scsi_io_success->SMID);
5066 5072
5067 5073 /*
5068 5074 * This is a success reply so just complete the IO. First, do a sanity
5069 5075 * check on the SMID. The final slot is used for TM requests, which
5070 5076 * would not come into this reply handler.
5071 5077 */
5072 5078 if ((SMID == 0) || (SMID > slots->m_n_normal)) {
5073 5079 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
5074 5080 SMID);
5075 5081 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5076 5082 return;
5077 5083 }
5078 5084
5079 5085 cmd = slots->m_slot[SMID];
5080 5086
5081 5087 /*
5082 5088 * print warning and return if the slot is empty
5083 5089 */
5084 5090 if (cmd == NULL) {
5085 5091 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
5086 5092 "in slot %d", SMID);
5087 5093 return;
5088 5094 }
5089 5095
5090 5096 pkt = CMD2PKT(cmd);
5091 5097 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
5092 5098 STATE_GOT_STATUS);
5093 5099 if (cmd->cmd_flags & CFLAG_DMAVALID) {
5094 5100 pkt->pkt_state |= STATE_XFERRED_DATA;
5095 5101 }
5096 5102 pkt->pkt_resid = 0;
5097 5103
5098 5104 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
5099 5105 cmd->cmd_flags |= CFLAG_FINISHED;
5100 5106 cv_broadcast(&mpt->m_passthru_cv);
5101 5107 return;
5102 5108 } else {
5103 5109 mptsas_remove_cmd(mpt, cmd);
5104 5110 }
5105 5111
5106 5112 if (cmd->cmd_flags & CFLAG_RETRY) {
5107 5113 /*
5108 5114 * The target returned QFULL or busy, do not add tihs
5109 5115 * pkt to the doneq since the hba will retry
5110 5116 * this cmd.
5111 5117 *
5112 5118 * The pkt has already been resubmitted in
5113 5119 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5114 5120 * Remove this cmd_flag here.
5115 5121 */
5116 5122 cmd->cmd_flags &= ~CFLAG_RETRY;
5117 5123 } else {
5118 5124 mptsas_doneq_add(mpt, cmd);
5119 5125 }
5120 5126 }
5121 5127
5122 5128 static void
5123 5129 mptsas_handle_address_reply(mptsas_t *mpt,
5124 5130 pMpi2ReplyDescriptorsUnion_t reply_desc)
5125 5131 {
5126 5132 pMpi2AddressReplyDescriptor_t address_reply;
5127 5133 pMPI2DefaultReply_t reply;
5128 5134 mptsas_fw_diagnostic_buffer_t *pBuffer;
5129 5135 uint32_t reply_addr;
5130 5136 uint16_t SMID, iocstatus;
5131 5137 mptsas_slots_t *slots = mpt->m_active;
5132 5138 mptsas_cmd_t *cmd = NULL;
5133 5139 uint8_t function, buffer_type;
5134 5140 m_replyh_arg_t *args;
5135 5141 int reply_frame_no;
5136 5142
5137 5143 ASSERT(mutex_owned(&mpt->m_mutex));
5138 5144
5139 5145 address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
5140 5146 reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
5141 5147 &address_reply->ReplyFrameAddress);
5142 5148 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID);
5143 5149
5144 5150 /*
5145 5151 * If reply frame is not in the proper range we should ignore this
5146 5152 * message and exit the interrupt handler.
5147 5153 */
5148 5154 if ((reply_addr < mpt->m_reply_frame_dma_addr) ||
5149 5155 (reply_addr >= (mpt->m_reply_frame_dma_addr +
5150 5156 (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
5151 5157 ((reply_addr - mpt->m_reply_frame_dma_addr) %
5152 5158 mpt->m_reply_frame_size != 0)) {
5153 5159 mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
5154 5160 "address 0x%x\n", reply_addr);
5155 5161 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5156 5162 return;
5157 5163 }
5158 5164
5159 5165 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
5160 5166 DDI_DMA_SYNC_FORCPU);
5161 5167 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
5162 5168 mpt->m_reply_frame_dma_addr));
5163 5169 function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
5164 5170
5165 5171 NDBG31(("mptsas_handle_address_reply: function 0x%x, reply_addr=0x%x",
5166 5172 function, reply_addr));
5167 5173
5168 5174 /*
5169 5175 * don't get slot information and command for events since these values
5170 5176 * don't exist
5171 5177 */
5172 5178 if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) &&
5173 5179 (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) {
5174 5180 /*
5175 5181 * This could be a TM reply, which use the last allocated SMID,
5176 5182 * so allow for that.
5177 5183 */
5178 5184 if ((SMID == 0) || (SMID > (slots->m_n_normal + 1))) {
5179 5185 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of "
5180 5186 "%d\n", SMID);
5181 5187 ddi_fm_service_impact(mpt->m_dip,
5182 5188 DDI_SERVICE_UNAFFECTED);
5183 5189 return;
5184 5190 }
5185 5191
5186 5192 cmd = slots->m_slot[SMID];
5187 5193
5188 5194 /*
5189 5195 * print warning and return if the slot is empty
5190 5196 */
5191 5197 if (cmd == NULL) {
5192 5198 mptsas_log(mpt, CE_WARN, "?NULL command for address "
5193 5199 "reply in slot %d", SMID);
5194 5200 return;
5195 5201 }
5196 5202 if ((cmd->cmd_flags &
5197 5203 (CFLAG_PASSTHRU | CFLAG_CONFIG | CFLAG_FW_DIAG))) {
5198 5204 cmd->cmd_rfm = reply_addr;
5199 5205 cmd->cmd_flags |= CFLAG_FINISHED;
5200 5206 cv_broadcast(&mpt->m_passthru_cv);
5201 5207 cv_broadcast(&mpt->m_config_cv);
5202 5208 cv_broadcast(&mpt->m_fw_diag_cv);
5203 5209 return;
5204 5210 } else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
5205 5211 mptsas_remove_cmd(mpt, cmd);
5206 5212 }
5207 5213 NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
5208 5214 }
5209 5215 /*
5210 5216 * Depending on the function, we need to handle
5211 5217 * the reply frame (and cmd) differently.
5212 5218 */
5213 5219 switch (function) {
5214 5220 case MPI2_FUNCTION_SCSI_IO_REQUEST:
5215 5221 mptsas_check_scsi_io_error(mpt, (pMpi2SCSIIOReply_t)reply, cmd);
5216 5222 break;
5217 5223 case MPI2_FUNCTION_SCSI_TASK_MGMT:
5218 5224 cmd->cmd_rfm = reply_addr;
5219 5225 mptsas_check_task_mgt(mpt, (pMpi2SCSIManagementReply_t)reply,
5220 5226 cmd);
5221 5227 break;
5222 5228 case MPI2_FUNCTION_FW_DOWNLOAD:
5223 5229 cmd->cmd_flags |= CFLAG_FINISHED;
5224 5230 cv_signal(&mpt->m_fw_cv);
5225 5231 break;
5226 5232 case MPI2_FUNCTION_EVENT_NOTIFICATION:
5227 5233 reply_frame_no = (reply_addr - mpt->m_reply_frame_dma_addr) /
5228 5234 mpt->m_reply_frame_size;
5229 5235 args = &mpt->m_replyh_args[reply_frame_no];
5230 5236 args->mpt = (void *)mpt;
5231 5237 args->rfm = reply_addr;
5232 5238
5233 5239 /*
5234 5240 * Record the event if its type is enabled in
5235 5241 * this mpt instance by ioctl.
5236 5242 */
5237 5243 mptsas_record_event(args);
5238 5244
5239 5245 /*
5240 5246 * Handle time critical events
5241 5247 * NOT_RESPONDING/ADDED only now
5242 5248 */
5243 5249 if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
5244 5250 /*
5245 5251 * Would not return main process,
5246 5252 * just let taskq resolve ack action
5247 5253 * and ack would be sent in taskq thread
5248 5254 */
5249 5255 NDBG20(("send mptsas_handle_event_sync success"));
5250 5256 }
5251 5257
5252 5258 if (mpt->m_in_reset) {
5253 5259 NDBG20(("dropping event received during reset"));
5254 5260 return;
5255 5261 }
5256 5262
5257 5263 if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
5258 5264 (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
5259 5265 mptsas_log(mpt, CE_WARN, "No memory available"
5260 5266 "for dispatch taskq");
5261 5267 /*
5262 5268 * Return the reply frame to the free queue.
5263 5269 */
5264 5270 ddi_put32(mpt->m_acc_free_queue_hdl,
5265 5271 &((uint32_t *)(void *)
5266 5272 mpt->m_free_queue)[mpt->m_free_index], reply_addr);
5267 5273 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5268 5274 DDI_DMA_SYNC_FORDEV);
5269 5275 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5270 5276 mpt->m_free_index = 0;
5271 5277 }
5272 5278
5273 5279 ddi_put32(mpt->m_datap,
5274 5280 &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
5275 5281 }
5276 5282 return;
5277 5283 case MPI2_FUNCTION_DIAG_BUFFER_POST:
5278 5284 /*
5279 5285 * If SMID is 0, this implies that the reply is due to a
5280 5286 * release function with a status that the buffer has been
5281 5287 * released. Set the buffer flags accordingly.
5282 5288 */
5283 5289 if (SMID == 0) {
5284 5290 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
5285 5291 &reply->IOCStatus);
5286 5292 buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl,
5287 5293 &(((pMpi2DiagBufferPostReply_t)reply)->BufferType));
5288 5294 if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) {
5289 5295 pBuffer =
5290 5296 &mpt->m_fw_diag_buffer_list[buffer_type];
5291 5297 pBuffer->valid_data = TRUE;
5292 5298 pBuffer->owned_by_firmware = FALSE;
5293 5299 pBuffer->immediate = FALSE;
5294 5300 }
5295 5301 } else {
5296 5302 /*
5297 5303 * Normal handling of diag post reply with SMID.
5298 5304 */
5299 5305 cmd = slots->m_slot[SMID];
5300 5306
5301 5307 /*
5302 5308 * print warning and return if the slot is empty
5303 5309 */
5304 5310 if (cmd == NULL) {
5305 5311 mptsas_log(mpt, CE_WARN, "?NULL command for "
5306 5312 "address reply in slot %d", SMID);
5307 5313 return;
5308 5314 }
5309 5315 cmd->cmd_rfm = reply_addr;
5310 5316 cmd->cmd_flags |= CFLAG_FINISHED;
5311 5317 cv_broadcast(&mpt->m_fw_diag_cv);
5312 5318 }
5313 5319 return;
5314 5320 default:
5315 5321 mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
5316 5322 break;
5317 5323 }
5318 5324
5319 5325 /*
5320 5326 * Return the reply frame to the free queue.
5321 5327 */
5322 5328 ddi_put32(mpt->m_acc_free_queue_hdl,
5323 5329 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
5324 5330 reply_addr);
5325 5331 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5326 5332 DDI_DMA_SYNC_FORDEV);
5327 5333 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5328 5334 mpt->m_free_index = 0;
5329 5335 }
5330 5336 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
5331 5337 mpt->m_free_index);
5332 5338
5333 5339 if (cmd->cmd_flags & CFLAG_FW_CMD)
5334 5340 return;
5335 5341
5336 5342 if (cmd->cmd_flags & CFLAG_RETRY) {
5337 5343 /*
5338 5344 * The target returned QFULL or busy, do not add this
5339 5345 * pkt to the doneq since the hba will retry
5340 5346 * this cmd.
5341 5347 *
5342 5348 * The pkt has already been resubmitted in
5343 5349 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5344 5350 * Remove this cmd_flag here.
5345 5351 */
5346 5352 cmd->cmd_flags &= ~CFLAG_RETRY;
5347 5353 } else {
5348 5354 mptsas_doneq_add(mpt, cmd);
5349 5355 }
5350 5356 }
5351 5357
5352 5358 static void
5353 5359 mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
5354 5360 mptsas_cmd_t *cmd)
5355 5361 {
5356 5362 uint8_t scsi_status, scsi_state;
5357 5363 uint16_t ioc_status;
5358 5364 uint32_t xferred, sensecount, responsedata, loginfo = 0;
5359 5365 struct scsi_pkt *pkt;
5360 5366 struct scsi_arq_status *arqstat;
5361 5367 struct buf *bp;
5362 5368 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5363 5369 uint8_t *sensedata = NULL;
5364 5370 uint64_t sas_wwn;
5365 5371 uint8_t phy;
5366 5372 char wwn_str[MPTSAS_WWN_STRLEN];
5367 5373
5368 5374 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
5369 5375 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
5370 5376 bp = cmd->cmd_ext_arq_buf;
5371 5377 } else {
5372 5378 bp = cmd->cmd_arq_buf;
5373 5379 }
5374 5380
5375 5381 scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
5376 5382 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5377 5383 scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
5378 5384 xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
5379 5385 sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
5380 5386 responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl,
5381 5387 &reply->ResponseInfo);
5382 5388
5383 5389 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
5384 5390 sas_wwn = ptgt->m_addr.mta_wwn;
5385 5391 phy = ptgt->m_phynum;
5386 5392 if (sas_wwn == 0) {
5387 5393 (void) sprintf(wwn_str, "p%x", phy);
5388 5394 } else {
5389 5395 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
5390 5396 }
5391 5397 loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
5392 5398 &reply->IOCLogInfo);
5393 5399 mptsas_log(mpt, CE_NOTE,
5394 5400 "?Log info 0x%x received for target %d %s.\n"
5395 5401 "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5396 5402 loginfo, Tgt(cmd), wwn_str, scsi_status, ioc_status,
5397 5403 scsi_state);
5398 5404 }
5399 5405
5400 5406 NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5401 5407 scsi_status, ioc_status, scsi_state));
5402 5408
5403 5409 pkt = CMD2PKT(cmd);
5404 5410 *(pkt->pkt_scbp) = scsi_status;
5405 5411
5406 5412 if (loginfo == 0x31170000) {
5407 5413 /*
5408 5414 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
5409 5415 * 0x31170000 comes, that means the device missing delay
5410 5416 * is in progressing, the command need retry later.
5411 5417 */
5412 5418 *(pkt->pkt_scbp) = STATUS_BUSY;
5413 5419 return;
5414 5420 }
5415 5421
5416 5422 if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
5417 5423 ((ioc_status & MPI2_IOCSTATUS_MASK) ==
5418 5424 MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
5419 5425 pkt->pkt_reason = CMD_INCOMPLETE;
5420 5426 pkt->pkt_state |= STATE_GOT_BUS;
5421 5427 if (ptgt->m_reset_delay == 0) {
5422 5428 mptsas_set_throttle(mpt, ptgt,
5423 5429 DRAIN_THROTTLE);
5424 5430 }
5425 5431 return;
5426 5432 }
5427 5433
5428 5434 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5429 5435 responsedata &= 0x000000FF;
5430 5436 if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
5431 5437 mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n");
5432 5438 pkt->pkt_reason = CMD_TLR_OFF;
5433 5439 return;
5434 5440 }
5435 5441 }
5436 5442
5437 5443
5438 5444 switch (scsi_status) {
5439 5445 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5440 5446 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5441 5447 arqstat = (void*)(pkt->pkt_scbp);
5442 5448 arqstat->sts_rqpkt_status = *((struct scsi_status *)
5443 5449 (pkt->pkt_scbp));
5444 5450 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
5445 5451 STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
5446 5452 if (cmd->cmd_flags & CFLAG_XARQ) {
5447 5453 pkt->pkt_state |= STATE_XARQ_DONE;
5448 5454 }
5449 5455 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5450 5456 pkt->pkt_state |= STATE_XFERRED_DATA;
5451 5457 }
5452 5458 arqstat->sts_rqpkt_reason = pkt->pkt_reason;
5453 5459 arqstat->sts_rqpkt_state = pkt->pkt_state;
5454 5460 arqstat->sts_rqpkt_state |= STATE_XFERRED_DATA;
5455 5461 arqstat->sts_rqpkt_statistics = pkt->pkt_statistics;
5456 5462 sensedata = (uint8_t *)&arqstat->sts_sensedata;
5457 5463
5458 5464 bcopy((uchar_t *)bp->b_un.b_addr, sensedata,
5459 5465 ((cmd->cmd_rqslen >= sensecount) ? sensecount :
5460 5466 cmd->cmd_rqslen));
5461 5467 arqstat->sts_rqpkt_resid = (cmd->cmd_rqslen - sensecount);
5462 5468 cmd->cmd_flags |= CFLAG_CMDARQ;
5463 5469 /*
5464 5470 * Set proper status for pkt if autosense was valid
5465 5471 */
5466 5472 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5467 5473 struct scsi_status zero_status = { 0 };
5468 5474 arqstat->sts_rqpkt_status = zero_status;
5469 5475 }
5470 5476
5471 5477 /*
5472 5478 * ASC=0x47 is parity error
5473 5479 * ASC=0x48 is initiator detected error received
5474 5480 */
5475 5481 if ((scsi_sense_key(sensedata) == KEY_ABORTED_COMMAND) &&
5476 5482 ((scsi_sense_asc(sensedata) == 0x47) ||
5477 5483 (scsi_sense_asc(sensedata) == 0x48))) {
5478 5484 mptsas_log(mpt, CE_NOTE, "Aborted_command!");
5479 5485 }
5480 5486
5481 5487 /*
5482 5488 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5483 5489 * ASC/ASCQ=0x25/0x00 means invalid lun
5484 5490 */
5485 5491 if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
5486 5492 (scsi_sense_asc(sensedata) == 0x3F) &&
5487 5493 (scsi_sense_ascq(sensedata) == 0x0E)) ||
5488 5494 ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
5489 5495 (scsi_sense_asc(sensedata) == 0x25) &&
5490 5496 (scsi_sense_ascq(sensedata) == 0x00))) {
5491 5497 mptsas_topo_change_list_t *topo_node = NULL;
5492 5498
5493 5499 topo_node = kmem_zalloc(
5494 5500 sizeof (mptsas_topo_change_list_t),
5495 5501 KM_NOSLEEP);
5496 5502 if (topo_node == NULL) {
5497 5503 mptsas_log(mpt, CE_NOTE, "No memory"
5498 5504 "resource for handle SAS dynamic"
5499 5505 "reconfigure.\n");
5500 5506 break;
5501 5507 }
5502 5508 topo_node->mpt = mpt;
5503 5509 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
5504 5510 topo_node->un.phymask = ptgt->m_addr.mta_phymask;
5505 5511 topo_node->devhdl = ptgt->m_devhdl;
5506 5512 topo_node->object = (void *)ptgt;
5507 5513 topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5508 5514
5509 5515 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5510 5516 mptsas_handle_dr,
5511 5517 (void *)topo_node,
5512 5518 DDI_NOSLEEP)) != DDI_SUCCESS) {
5513 5519 kmem_free(topo_node,
5514 5520 sizeof (mptsas_topo_change_list_t));
5515 5521 mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5516 5522 "for handle SAS dynamic reconfigure"
5517 5523 "failed. \n");
5518 5524 }
5519 5525 }
5520 5526 break;
5521 5527 case MPI2_SCSI_STATUS_GOOD:
5522 5528 switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5523 5529 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5524 5530 pkt->pkt_reason = CMD_DEV_GONE;
5525 5531 pkt->pkt_state |= STATE_GOT_BUS;
5526 5532 if (ptgt->m_reset_delay == 0) {
5527 5533 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5528 5534 }
5529 5535 NDBG31(("lost disk for target%d, command:%x",
5530 5536 Tgt(cmd), pkt->pkt_cdbp[0]));
5531 5537 break;
5532 5538 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5533 5539 NDBG31(("data overrun: xferred=%d", xferred));
5534 5540 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5535 5541 pkt->pkt_reason = CMD_DATA_OVR;
5536 5542 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5537 5543 | STATE_SENT_CMD | STATE_GOT_STATUS
5538 5544 | STATE_XFERRED_DATA);
5539 5545 pkt->pkt_resid = 0;
5540 5546 break;
5541 5547 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5542 5548 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5543 5549 NDBG31(("data underrun: xferred=%d", xferred));
5544 5550 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5545 5551 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5546 5552 | STATE_SENT_CMD | STATE_GOT_STATUS);
5547 5553 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5548 5554 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5549 5555 pkt->pkt_state |= STATE_XFERRED_DATA;
5550 5556 }
5551 5557 break;
5552 5558 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5553 5559 if (cmd->cmd_active_expiration <= gethrtime()) {
5554 5560 /*
5555 5561 * When timeout requested, propagate
5556 5562 * proper reason and statistics to
5557 5563 * target drivers.
5558 5564 */
5559 5565 mptsas_set_pkt_reason(mpt, cmd, CMD_TIMEOUT,
5560 5566 STAT_BUS_RESET | STAT_TIMEOUT);
5561 5567 } else {
5562 5568 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
5563 5569 STAT_BUS_RESET);
5564 5570 }
5565 5571 break;
5566 5572 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5567 5573 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5568 5574 mptsas_set_pkt_reason(mpt,
5569 5575 cmd, CMD_RESET, STAT_DEV_RESET);
5570 5576 break;
5571 5577 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5572 5578 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5573 5579 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5574 5580 mptsas_set_pkt_reason(mpt,
5575 5581 cmd, CMD_TERMINATED, STAT_TERMINATED);
5576 5582 break;
5577 5583 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5578 5584 case MPI2_IOCSTATUS_BUSY:
5579 5585 /*
5580 5586 * set throttles to drain
5581 5587 */
5582 5588 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
5583 5589 ptgt = refhash_next(mpt->m_targets, ptgt)) {
5584 5590 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5585 5591 }
5586 5592
5587 5593 /*
5588 5594 * retry command
5589 5595 */
5590 5596 cmd->cmd_flags |= CFLAG_RETRY;
5591 5597 cmd->cmd_pkt_flags |= FLAG_HEAD;
5592 5598
5593 5599 (void) mptsas_accept_pkt(mpt, cmd);
5594 5600 break;
5595 5601 default:
5596 5602 mptsas_log(mpt, CE_WARN,
5597 5603 "unknown ioc_status = %x\n", ioc_status);
5598 5604 mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5599 5605 "count = %x, scsi_status = %x", scsi_state,
5600 5606 xferred, scsi_status);
5601 5607 break;
5602 5608 }
5603 5609 break;
5604 5610 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5605 5611 mptsas_handle_qfull(mpt, cmd);
5606 5612 break;
5607 5613 case MPI2_SCSI_STATUS_BUSY:
5608 5614 NDBG31(("scsi_status busy received"));
5609 5615 break;
5610 5616 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5611 5617 NDBG31(("scsi_status reservation conflict received"));
5612 5618 break;
5613 5619 default:
5614 5620 mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x\n",
5615 5621 scsi_status, ioc_status);
5616 5622 mptsas_log(mpt, CE_WARN,
5617 5623 "mptsas_process_intr: invalid scsi status\n");
5618 5624 break;
5619 5625 }
5620 5626 }
5621 5627
5622 5628 static void
5623 5629 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5624 5630 mptsas_cmd_t *cmd)
5625 5631 {
5626 5632 uint8_t task_type;
5627 5633 uint16_t ioc_status;
5628 5634 uint32_t log_info;
5629 5635 uint16_t dev_handle;
5630 5636 struct scsi_pkt *pkt = CMD2PKT(cmd);
5631 5637
5632 5638 task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5633 5639 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5634 5640 log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5635 5641 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5636 5642
5637 5643 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5638 5644 mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5639 5645 "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
5640 5646 task_type, ioc_status, log_info, dev_handle);
5641 5647 pkt->pkt_reason = CMD_INCOMPLETE;
5642 5648 return;
5643 5649 }
5644 5650
5645 5651 switch (task_type) {
5646 5652 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5647 5653 case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5648 5654 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5649 5655 case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5650 5656 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5651 5657 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5652 5658 break;
5653 5659 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5654 5660 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5655 5661 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5656 5662 /*
5657 5663 * Check for invalid DevHandle of 0 in case application
5658 5664 * sends bad command. DevHandle of 0 could cause problems.
5659 5665 */
5660 5666 if (dev_handle == 0) {
5661 5667 mptsas_log(mpt, CE_WARN, "!Can't flush target with"
5662 5668 " DevHandle of 0.");
5663 5669 } else {
5664 5670 mptsas_flush_target(mpt, dev_handle, Lun(cmd),
5665 5671 task_type);
5666 5672 }
5667 5673 break;
5668 5674 default:
5669 5675 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5670 5676 task_type);
5671 5677 mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5672 5678 break;
5673 5679 }
5674 5680 }
5675 5681
5676 5682 static void
5677 5683 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5678 5684 {
5679 5685 mptsas_t *mpt = arg->mpt;
5680 5686 uint64_t t = arg->t;
5681 5687 mptsas_cmd_t *cmd;
5682 5688 struct scsi_pkt *pkt;
5683 5689 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
5684 5690
5685 5691 mutex_enter(&item->mutex);
5686 5692 while (item->flag & MPTSAS_DONEQ_THREAD_ACTIVE) {
5687 5693 if (!item->doneq) {
5688 5694 cv_wait(&item->cv, &item->mutex);
5689 5695 }
5690 5696 pkt = NULL;
5691 5697 if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5692 5698 cmd->cmd_flags |= CFLAG_COMPLETED;
5693 5699 pkt = CMD2PKT(cmd);
5694 5700 }
5695 5701 mutex_exit(&item->mutex);
5696 5702 if (pkt) {
5697 5703 mptsas_pkt_comp(pkt, cmd);
5698 5704 }
5699 5705 mutex_enter(&item->mutex);
5700 5706 }
5701 5707 mutex_exit(&item->mutex);
5702 5708 mutex_enter(&mpt->m_doneq_mutex);
5703 5709 mpt->m_doneq_thread_n--;
5704 5710 cv_broadcast(&mpt->m_doneq_thread_cv);
5705 5711 mutex_exit(&mpt->m_doneq_mutex);
5706 5712 }
5707 5713
5708 5714
5709 5715 /*
5710 5716 * mpt interrupt handler.
5711 5717 */
5712 5718 static uint_t
5713 5719 mptsas_intr(caddr_t arg1, caddr_t arg2)
5714 5720 {
5715 5721 mptsas_t *mpt = (void *)arg1;
5716 5722 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
5717 5723 uchar_t did_reply = FALSE;
5718 5724
5719 5725 NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5720 5726
5721 5727 mutex_enter(&mpt->m_mutex);
5722 5728
5723 5729 /*
5724 5730 * If interrupts are shared by two channels then check whether this
5725 5731 * interrupt is genuinely for this channel by making sure first the
5726 5732 * chip is in high power state.
5727 5733 */
5728 5734 if ((mpt->m_options & MPTSAS_OPT_PM) &&
5729 5735 (mpt->m_power_level != PM_LEVEL_D0)) {
5730 5736 mutex_exit(&mpt->m_mutex);
5731 5737 return (DDI_INTR_UNCLAIMED);
5732 5738 }
5733 5739
5734 5740 /*
5735 5741 * If polling, interrupt was triggered by some shared interrupt because
5736 5742 * IOC interrupts are disabled during polling, so polling routine will
5737 5743 * handle any replies. Considering this, if polling is happening,
5738 5744 * return with interrupt unclaimed.
5739 5745 */
5740 5746 if (mpt->m_polled_intr) {
5741 5747 mutex_exit(&mpt->m_mutex);
5742 5748 mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt");
5743 5749 return (DDI_INTR_UNCLAIMED);
5744 5750 }
5745 5751
5746 5752 /*
5747 5753 * Read the istat register.
5748 5754 */
5749 5755 if ((INTPENDING(mpt)) != 0) {
5750 5756 /*
5751 5757 * read fifo until empty.
5752 5758 */
5753 5759 #ifndef __lock_lint
5754 5760 _NOTE(CONSTCOND)
5755 5761 #endif
5756 5762 while (TRUE) {
5757 5763 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5758 5764 DDI_DMA_SYNC_FORCPU);
5759 5765 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5760 5766 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5761 5767
5762 5768 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5763 5769 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5764 5770 ddi_get32(mpt->m_acc_post_queue_hdl,
5765 5771 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5766 5772 break;
5767 5773 }
5768 5774
5769 5775 /*
5770 5776 * The reply is valid, process it according to its
5771 5777 * type. Also, set a flag for updating the reply index
5772 5778 * after they've all been processed.
5773 5779 */
5774 5780 did_reply = TRUE;
5775 5781
5776 5782 mptsas_process_intr(mpt, reply_desc_union);
5777 5783
5778 5784 /*
5779 5785 * Increment post index and roll over if needed.
5780 5786 */
5781 5787 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5782 5788 mpt->m_post_index = 0;
5783 5789 }
5784 5790 }
5785 5791
5786 5792 /*
5787 5793 * Update the global reply index if at least one reply was
5788 5794 * processed.
5789 5795 */
5790 5796 if (did_reply) {
5791 5797 ddi_put32(mpt->m_datap,
5792 5798 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5793 5799 }
5794 5800 } else {
5795 5801 mutex_exit(&mpt->m_mutex);
5796 5802 return (DDI_INTR_UNCLAIMED);
5797 5803 }
5798 5804 NDBG1(("mptsas_intr complete"));
5799 5805
5800 5806 /*
5801 5807 * If no helper threads are created, process the doneq in ISR. If
5802 5808 * helpers are created, use the doneq length as a metric to measure the
5803 5809 * load on the interrupt CPU. If it is long enough, which indicates the
5804 5810 * load is heavy, then we deliver the IO completions to the helpers.
5805 5811 * This measurement has some limitations, although it is simple and
5806 5812 * straightforward and works well for most of the cases at present.
5807 5813 */
5808 5814 if (!mpt->m_doneq_thread_n ||
5809 5815 (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)) {
5810 5816 mptsas_doneq_empty(mpt);
5811 5817 } else {
5812 5818 mptsas_deliver_doneq_thread(mpt);
5813 5819 }
5814 5820
5815 5821 /*
5816 5822 * If there are queued cmd, start them now.
5817 5823 */
5818 5824 if (mpt->m_waitq != NULL) {
5819 5825 mptsas_restart_waitq(mpt);
5820 5826 }
5821 5827
5822 5828 mutex_exit(&mpt->m_mutex);
5823 5829 return (DDI_INTR_CLAIMED);
5824 5830 }
5825 5831
5826 5832 static void
5827 5833 mptsas_process_intr(mptsas_t *mpt,
5828 5834 pMpi2ReplyDescriptorsUnion_t reply_desc_union)
5829 5835 {
5830 5836 uint8_t reply_type;
5831 5837
5832 5838 ASSERT(mutex_owned(&mpt->m_mutex));
5833 5839
5834 5840 /*
5835 5841 * The reply is valid, process it according to its
5836 5842 * type. Also, set a flag for updated the reply index
5837 5843 * after they've all been processed.
5838 5844 */
5839 5845 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
5840 5846 &reply_desc_union->Default.ReplyFlags);
5841 5847 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
5842 5848 if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
5843 5849 reply_type == MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS) {
5844 5850 mptsas_handle_scsi_io_success(mpt, reply_desc_union);
5845 5851 } else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
5846 5852 mptsas_handle_address_reply(mpt, reply_desc_union);
5847 5853 } else {
5848 5854 mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
5849 5855 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5850 5856 }
5851 5857
5852 5858 /*
5853 5859 * Clear the reply descriptor for re-use and increment
5854 5860 * index.
5855 5861 */
5856 5862 ddi_put64(mpt->m_acc_post_queue_hdl,
5857 5863 &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
5858 5864 0xFFFFFFFFFFFFFFFF);
5859 5865 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5860 5866 DDI_DMA_SYNC_FORDEV);
5861 5867 }
5862 5868
5863 5869 /*
5864 5870 * handle qfull condition
5865 5871 */
5866 5872 static void
5867 5873 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
5868 5874 {
5869 5875 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5870 5876
5871 5877 if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
5872 5878 (ptgt->m_qfull_retries == 0)) {
5873 5879 /*
5874 5880 * We have exhausted the retries on QFULL, or,
5875 5881 * the target driver has indicated that it
5876 5882 * wants to handle QFULL itself by setting
5877 5883 * qfull-retries capability to 0. In either case
5878 5884 * we want the target driver's QFULL handling
5879 5885 * to kick in. We do this by having pkt_reason
5880 5886 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
5881 5887 */
5882 5888 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5883 5889 } else {
5884 5890 if (ptgt->m_reset_delay == 0) {
5885 5891 ptgt->m_t_throttle =
5886 5892 max((ptgt->m_t_ncmds - 2), 0);
5887 5893 }
5888 5894
5889 5895 cmd->cmd_pkt_flags |= FLAG_HEAD;
5890 5896 cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
5891 5897 cmd->cmd_flags |= CFLAG_RETRY;
5892 5898
5893 5899 (void) mptsas_accept_pkt(mpt, cmd);
5894 5900
5895 5901 /*
5896 5902 * when target gives queue full status with no commands
5897 5903 * outstanding (m_t_ncmds == 0), throttle is set to 0
5898 5904 * (HOLD_THROTTLE), and the queue full handling start
5899 5905 * (see psarc/1994/313); if there are commands outstanding,
5900 5906 * throttle is set to (m_t_ncmds - 2)
5901 5907 */
5902 5908 if (ptgt->m_t_throttle == HOLD_THROTTLE) {
5903 5909 /*
5904 5910 * By setting throttle to QFULL_THROTTLE, we
5905 5911 * avoid submitting new commands and in
5906 5912 * mptsas_restart_cmd find out slots which need
5907 5913 * their throttles to be cleared.
5908 5914 */
5909 5915 mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
5910 5916 if (mpt->m_restart_cmd_timeid == 0) {
5911 5917 mpt->m_restart_cmd_timeid =
5912 5918 timeout(mptsas_restart_cmd, mpt,
5913 5919 ptgt->m_qfull_retry_interval);
5914 5920 }
5915 5921 }
5916 5922 }
5917 5923 }
5918 5924
5919 5925 mptsas_phymask_t
5920 5926 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
5921 5927 {
5922 5928 mptsas_phymask_t phy_mask = 0;
5923 5929 uint8_t i = 0;
5924 5930
5925 5931 NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
5926 5932
5927 5933 ASSERT(mutex_owned(&mpt->m_mutex));
5928 5934
5929 5935 /*
5930 5936 * If physport is 0xFF, this is a RAID volume. Use phymask of 0.
5931 5937 */
5932 5938 if (physport == 0xFF) {
5933 5939 return (0);
5934 5940 }
5935 5941
5936 5942 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
5937 5943 if (mpt->m_phy_info[i].attached_devhdl &&
5938 5944 (mpt->m_phy_info[i].phy_mask != 0) &&
5939 5945 (mpt->m_phy_info[i].port_num == physport)) {
5940 5946 phy_mask = mpt->m_phy_info[i].phy_mask;
5941 5947 break;
5942 5948 }
5943 5949 }
5944 5950 NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
5945 5951 mpt->m_instance, physport, phy_mask));
5946 5952 return (phy_mask);
5947 5953 }
5948 5954
5949 5955 /*
5950 5956 * mpt free device handle after device gone, by use of passthrough
5951 5957 */
5952 5958 static int
5953 5959 mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl)
5954 5960 {
5955 5961 Mpi2SasIoUnitControlRequest_t req;
5956 5962 Mpi2SasIoUnitControlReply_t rep;
5957 5963 int ret;
5958 5964
5959 5965 ASSERT(mutex_owned(&mpt->m_mutex));
5960 5966
5961 5967 /*
5962 5968 * Need to compose a SAS IO Unit Control request message
5963 5969 * and call mptsas_do_passthru() function
5964 5970 */
5965 5971 bzero(&req, sizeof (req));
5966 5972 bzero(&rep, sizeof (rep));
5967 5973
5968 5974 req.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
5969 5975 req.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
5970 5976 req.DevHandle = LE_16(devhdl);
5971 5977
5972 5978 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
5973 5979 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
5974 5980 if (ret != 0) {
5975 5981 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5976 5982 "Control error %d", ret);
5977 5983 return (DDI_FAILURE);
5978 5984 }
5979 5985
5980 5986 /* do passthrough success, check the ioc status */
5981 5987 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
5982 5988 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5983 5989 "Control IOCStatus %d", LE_16(rep.IOCStatus));
5984 5990 return (DDI_FAILURE);
5985 5991 }
5986 5992
5987 5993 return (DDI_SUCCESS);
5988 5994 }
5989 5995
5990 5996 static void
5991 5997 mptsas_update_phymask(mptsas_t *mpt)
5992 5998 {
5993 5999 mptsas_phymask_t mask = 0, phy_mask;
5994 6000 char *phy_mask_name;
5995 6001 uint8_t current_port;
5996 6002 int i, j;
5997 6003
5998 6004 NDBG20(("mptsas%d update phymask ", mpt->m_instance));
5999 6005
6000 6006 ASSERT(mutex_owned(&mpt->m_mutex));
6001 6007
6002 6008 (void) mptsas_get_sas_io_unit_page(mpt);
6003 6009
6004 6010 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6005 6011
6006 6012 for (i = 0; i < mpt->m_num_phys; i++) {
6007 6013 phy_mask = 0x00;
6008 6014
6009 6015 if (mpt->m_phy_info[i].attached_devhdl == 0)
6010 6016 continue;
6011 6017
6012 6018 bzero(phy_mask_name, sizeof (phy_mask_name));
6013 6019
6014 6020 current_port = mpt->m_phy_info[i].port_num;
6015 6021
6016 6022 if ((mask & (1 << i)) != 0)
6017 6023 continue;
6018 6024
6019 6025 for (j = 0; j < mpt->m_num_phys; j++) {
6020 6026 if (mpt->m_phy_info[j].attached_devhdl &&
6021 6027 (mpt->m_phy_info[j].port_num == current_port)) {
6022 6028 phy_mask |= (1 << j);
6023 6029 }
6024 6030 }
6025 6031 mask = mask | phy_mask;
6026 6032
6027 6033 for (j = 0; j < mpt->m_num_phys; j++) {
6028 6034 if ((phy_mask >> j) & 0x01) {
6029 6035 mpt->m_phy_info[j].phy_mask = phy_mask;
6030 6036 }
6031 6037 }
6032 6038
6033 6039 (void) sprintf(phy_mask_name, "%x", phy_mask);
6034 6040
6035 6041 mutex_exit(&mpt->m_mutex);
6036 6042 /*
6037 6043 * register a iport, if the port has already been existed
6038 6044 * SCSA will do nothing and just return.
6039 6045 */
6040 6046 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
6041 6047 mutex_enter(&mpt->m_mutex);
6042 6048 }
6043 6049 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6044 6050 NDBG20(("mptsas%d update phymask return", mpt->m_instance));
6045 6051 }
6046 6052
6047 6053 /*
6048 6054 * mptsas_handle_dr is a task handler for DR, the DR action includes:
6049 6055 * 1. Directly attched Device Added/Removed.
6050 6056 * 2. Expander Device Added/Removed.
6051 6057 * 3. Indirectly Attached Device Added/Expander.
6052 6058 * 4. LUNs of a existing device status change.
6053 6059 * 5. RAID volume created/deleted.
6054 6060 * 6. Member of RAID volume is released because of RAID deletion.
6055 6061 * 7. Physical disks are removed because of RAID creation.
6056 6062 */
6057 6063 static void
6058 6064 mptsas_handle_dr(void *args) {
6059 6065 mptsas_topo_change_list_t *topo_node = NULL;
6060 6066 mptsas_topo_change_list_t *save_node = NULL;
6061 6067 mptsas_t *mpt;
6062 6068 dev_info_t *parent = NULL;
6063 6069 mptsas_phymask_t phymask = 0;
6064 6070 char *phy_mask_name;
6065 6071 uint8_t flags = 0, physport = 0xff;
6066 6072 uint8_t port_update = 0;
6067 6073 uint_t event;
6068 6074
6069 6075 topo_node = (mptsas_topo_change_list_t *)args;
6070 6076
6071 6077 mpt = topo_node->mpt;
6072 6078 event = topo_node->event;
6073 6079 flags = topo_node->flags;
6074 6080
6075 6081 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6076 6082
6077 6083 NDBG20(("mptsas%d handle_dr enter", mpt->m_instance));
6078 6084
6079 6085 switch (event) {
6080 6086 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6081 6087 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6082 6088 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE) ||
6083 6089 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6084 6090 /*
6085 6091 * Direct attached or expander attached device added
6086 6092 * into system or a Phys Disk that is being unhidden.
6087 6093 */
6088 6094 port_update = 1;
6089 6095 }
6090 6096 break;
6091 6097 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6092 6098 /*
6093 6099 * New expander added into system, it must be the head
6094 6100 * of topo_change_list_t
6095 6101 */
6096 6102 port_update = 1;
6097 6103 break;
6098 6104 default:
6099 6105 port_update = 0;
6100 6106 break;
6101 6107 }
6102 6108 /*
6103 6109 * All cases port_update == 1 may cause initiator port form change
6104 6110 */
6105 6111 mutex_enter(&mpt->m_mutex);
6106 6112 if (mpt->m_port_chng && port_update) {
6107 6113 /*
6108 6114 * mpt->m_port_chng flag indicates some PHYs of initiator
6109 6115 * port have changed to online. So when expander added or
6110 6116 * directly attached device online event come, we force to
6111 6117 * update port information by issueing SAS IO Unit Page and
6112 6118 * update PHYMASKs.
6113 6119 */
6114 6120 (void) mptsas_update_phymask(mpt);
6115 6121 mpt->m_port_chng = 0;
6116 6122
6117 6123 }
6118 6124 mutex_exit(&mpt->m_mutex);
6119 6125 while (topo_node) {
6120 6126 phymask = 0;
6121 6127 if (parent == NULL) {
6122 6128 physport = topo_node->un.physport;
6123 6129 event = topo_node->event;
6124 6130 flags = topo_node->flags;
6125 6131 if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET |
6126 6132 MPTSAS_DR_EVENT_OFFLINE_SMP)) {
6127 6133 /*
6128 6134 * For all offline events, phymask is known
6129 6135 */
6130 6136 phymask = topo_node->un.phymask;
6131 6137 goto find_parent;
6132 6138 }
6133 6139 if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6134 6140 goto handle_topo_change;
6135 6141 }
6136 6142 if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) {
6137 6143 phymask = topo_node->un.phymask;
6138 6144 goto find_parent;
6139 6145 }
6140 6146
6141 6147 if ((flags ==
6142 6148 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) &&
6143 6149 (event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) {
6144 6150 /*
6145 6151 * There is no any field in IR_CONFIG_CHANGE
6146 6152 * event indicate physport/phynum, let's get
6147 6153 * parent after SAS Device Page0 request.
6148 6154 */
6149 6155 goto handle_topo_change;
6150 6156 }
6151 6157
6152 6158 mutex_enter(&mpt->m_mutex);
6153 6159 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6154 6160 /*
6155 6161 * If the direct attached device added or a
6156 6162 * phys disk is being unhidden, argument
6157 6163 * physport actually is PHY#, so we have to get
6158 6164 * phymask according PHY#.
6159 6165 */
6160 6166 physport = mpt->m_phy_info[physport].port_num;
6161 6167 }
6162 6168
6163 6169 /*
6164 6170 * Translate physport to phymask so that we can search
6165 6171 * parent dip.
6166 6172 */
6167 6173 phymask = mptsas_physport_to_phymask(mpt,
6168 6174 physport);
6169 6175 mutex_exit(&mpt->m_mutex);
6170 6176
6171 6177 find_parent:
6172 6178 bzero(phy_mask_name, MPTSAS_MAX_PHYS);
6173 6179 /*
6174 6180 * For RAID topology change node, write the iport name
6175 6181 * as v0.
6176 6182 */
6177 6183 if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6178 6184 (void) sprintf(phy_mask_name, "v0");
6179 6185 } else {
6180 6186 /*
6181 6187 * phymask can bo 0 if the drive has been
6182 6188 * pulled by the time an add event is
6183 6189 * processed. If phymask is 0, just skip this
6184 6190 * event and continue.
6185 6191 */
6186 6192 if (phymask == 0) {
6187 6193 mutex_enter(&mpt->m_mutex);
6188 6194 save_node = topo_node;
6189 6195 topo_node = topo_node->next;
6190 6196 ASSERT(save_node);
6191 6197 kmem_free(save_node,
6192 6198 sizeof (mptsas_topo_change_list_t));
6193 6199 mutex_exit(&mpt->m_mutex);
6194 6200
6195 6201 parent = NULL;
6196 6202 continue;
6197 6203 }
6198 6204 (void) sprintf(phy_mask_name, "%x", phymask);
6199 6205 }
6200 6206 parent = scsi_hba_iport_find(mpt->m_dip,
6201 6207 phy_mask_name);
6202 6208 if (parent == NULL) {
6203 6209 mptsas_log(mpt, CE_WARN, "Failed to find an "
6204 6210 "iport, should not happen!");
6205 6211 goto out;
6206 6212 }
6207 6213
6208 6214 }
6209 6215 ASSERT(parent);
6210 6216 handle_topo_change:
6211 6217
6212 6218 mutex_enter(&mpt->m_mutex);
6213 6219 /*
6214 6220 * If HBA is being reset, don't perform operations depending
6215 6221 * on the IOC. We must free the topo list, however.
6216 6222 */
6217 6223 if (!mpt->m_in_reset)
6218 6224 mptsas_handle_topo_change(topo_node, parent);
6219 6225 else
6220 6226 NDBG20(("skipping topo change received during reset"));
6221 6227 save_node = topo_node;
6222 6228 topo_node = topo_node->next;
6223 6229 ASSERT(save_node);
6224 6230 kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
6225 6231 mutex_exit(&mpt->m_mutex);
6226 6232
6227 6233 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6228 6234 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
6229 6235 (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
6230 6236 /*
6231 6237 * If direct attached device associated, make sure
6232 6238 * reset the parent before start the next one. But
6233 6239 * all devices associated with expander shares the
6234 6240 * parent. Also, reset parent if this is for RAID.
6235 6241 */
6236 6242 parent = NULL;
6237 6243 }
6238 6244 }
6239 6245 out:
6240 6246 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6241 6247 }
6242 6248
6243 6249 static void
6244 6250 mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
6245 6251 dev_info_t *parent)
6246 6252 {
6247 6253 mptsas_target_t *ptgt = NULL;
6248 6254 mptsas_smp_t *psmp = NULL;
6249 6255 mptsas_t *mpt = (void *)topo_node->mpt;
6250 6256 uint16_t devhdl;
6251 6257 uint16_t attached_devhdl;
6252 6258 uint64_t sas_wwn = 0;
6253 6259 int rval = 0;
6254 6260 uint32_t page_address;
6255 6261 uint8_t phy, flags;
6256 6262 char *addr = NULL;
6257 6263 dev_info_t *lundip;
6258 6264 int circ = 0, circ1 = 0;
6259 6265 char attached_wwnstr[MPTSAS_WWN_STRLEN];
6260 6266
6261 6267 NDBG20(("mptsas%d handle_topo_change enter, devhdl 0x%x,"
6262 6268 "event 0x%x, flags 0x%x", mpt->m_instance, topo_node->devhdl,
6263 6269 topo_node->event, topo_node->flags));
6264 6270
6265 6271 ASSERT(mutex_owned(&mpt->m_mutex));
6266 6272
6267 6273 switch (topo_node->event) {
6268 6274 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6269 6275 {
6270 6276 char *phy_mask_name;
6271 6277 mptsas_phymask_t phymask = 0;
6272 6278
6273 6279 if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6274 6280 /*
6275 6281 * Get latest RAID info.
6276 6282 */
6277 6283 (void) mptsas_get_raid_info(mpt);
6278 6284 ptgt = refhash_linear_search(mpt->m_targets,
6279 6285 mptsas_target_eval_devhdl, &topo_node->devhdl);
6280 6286 if (ptgt == NULL)
6281 6287 break;
6282 6288 } else {
6283 6289 ptgt = (void *)topo_node->object;
6284 6290 }
6285 6291
6286 6292 if (ptgt == NULL) {
6287 6293 /*
6288 6294 * If a Phys Disk was deleted, RAID info needs to be
6289 6295 * updated to reflect the new topology.
6290 6296 */
6291 6297 (void) mptsas_get_raid_info(mpt);
6292 6298
6293 6299 /*
6294 6300 * Get sas device page 0 by DevHandle to make sure if
6295 6301 * SSP/SATA end device exist.
6296 6302 */
6297 6303 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
6298 6304 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
6299 6305 topo_node->devhdl;
6300 6306
6301 6307 rval = mptsas_get_target_device_info(mpt, page_address,
6302 6308 &devhdl, &ptgt);
6303 6309 if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
6304 6310 mptsas_log(mpt, CE_NOTE,
6305 6311 "mptsas_handle_topo_change: target %d is "
6306 6312 "not a SAS/SATA device. \n",
6307 6313 topo_node->devhdl);
6308 6314 } else if (rval == DEV_INFO_FAIL_ALLOC) {
6309 6315 mptsas_log(mpt, CE_NOTE,
6310 6316 "mptsas_handle_topo_change: could not "
6311 6317 "allocate memory. \n");
6312 6318 }
6313 6319 /*
6314 6320 * If rval is DEV_INFO_PHYS_DISK than there is nothing
6315 6321 * else to do, just leave.
6316 6322 */
6317 6323 if (rval != DEV_INFO_SUCCESS) {
6318 6324 return;
6319 6325 }
6320 6326 }
6321 6327
6322 6328 ASSERT(ptgt->m_devhdl == topo_node->devhdl);
6323 6329
6324 6330 mutex_exit(&mpt->m_mutex);
6325 6331 flags = topo_node->flags;
6326 6332
6327 6333 if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
6328 6334 phymask = ptgt->m_addr.mta_phymask;
6329 6335 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6330 6336 (void) sprintf(phy_mask_name, "%x", phymask);
6331 6337 parent = scsi_hba_iport_find(mpt->m_dip,
6332 6338 phy_mask_name);
6333 6339 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6334 6340 if (parent == NULL) {
6335 6341 mptsas_log(mpt, CE_WARN, "Failed to find a "
6336 6342 "iport for PD, should not happen!");
6337 6343 mutex_enter(&mpt->m_mutex);
6338 6344 break;
6339 6345 }
6340 6346 }
6341 6347
6342 6348 if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6343 6349 ndi_devi_enter(parent, &circ1);
6344 6350 (void) mptsas_config_raid(parent, topo_node->devhdl,
6345 6351 &lundip);
6346 6352 ndi_devi_exit(parent, circ1);
6347 6353 } else {
6348 6354 /*
6349 6355 * hold nexus for bus configure
6350 6356 */
6351 6357 ndi_devi_enter(scsi_vhci_dip, &circ);
6352 6358 ndi_devi_enter(parent, &circ1);
6353 6359 rval = mptsas_config_target(parent, ptgt);
6354 6360 /*
6355 6361 * release nexus for bus configure
6356 6362 */
6357 6363 ndi_devi_exit(parent, circ1);
6358 6364 ndi_devi_exit(scsi_vhci_dip, circ);
6359 6365
6360 6366 /*
6361 6367 * Add parent's props for SMHBA support
6362 6368 */
6363 6369 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6364 6370 bzero(attached_wwnstr,
6365 6371 sizeof (attached_wwnstr));
6366 6372 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
6367 6373 ptgt->m_addr.mta_wwn);
6368 6374 if (ddi_prop_update_string(DDI_DEV_T_NONE,
6369 6375 parent,
6370 6376 SCSI_ADDR_PROP_ATTACHED_PORT,
6371 6377 attached_wwnstr)
6372 6378 != DDI_PROP_SUCCESS) {
6373 6379 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6374 6380 parent,
6375 6381 SCSI_ADDR_PROP_ATTACHED_PORT);
6376 6382 mptsas_log(mpt, CE_WARN, "Failed to"
6377 6383 "attached-port props");
6378 6384 return;
6379 6385 }
6380 6386 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6381 6387 MPTSAS_NUM_PHYS, 1) !=
6382 6388 DDI_PROP_SUCCESS) {
6383 6389 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6384 6390 parent, MPTSAS_NUM_PHYS);
6385 6391 mptsas_log(mpt, CE_WARN, "Failed to"
6386 6392 " create num-phys props");
6387 6393 return;
6388 6394 }
6389 6395
6390 6396 /*
6391 6397 * Update PHY info for smhba
6392 6398 */
6393 6399 mutex_enter(&mpt->m_mutex);
6394 6400 if (mptsas_smhba_phy_init(mpt)) {
6395 6401 mutex_exit(&mpt->m_mutex);
6396 6402 mptsas_log(mpt, CE_WARN, "mptsas phy"
6397 6403 " update failed");
6398 6404 return;
6399 6405 }
6400 6406 mutex_exit(&mpt->m_mutex);
6401 6407
6402 6408 /*
6403 6409 * topo_node->un.physport is really the PHY#
6404 6410 * for direct attached devices
6405 6411 */
6406 6412 mptsas_smhba_set_one_phy_props(mpt, parent,
6407 6413 topo_node->un.physport, &attached_devhdl);
6408 6414
6409 6415 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6410 6416 MPTSAS_VIRTUAL_PORT, 0) !=
6411 6417 DDI_PROP_SUCCESS) {
6412 6418 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6413 6419 parent, MPTSAS_VIRTUAL_PORT);
6414 6420 mptsas_log(mpt, CE_WARN,
6415 6421 "mptsas virtual-port"
6416 6422 "port prop update failed");
6417 6423 return;
6418 6424 }
6419 6425 }
6420 6426 }
6421 6427 mutex_enter(&mpt->m_mutex);
6422 6428
6423 6429 NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
6424 6430 "phymask:%x.", mpt->m_instance, ptgt->m_devhdl,
6425 6431 ptgt->m_addr.mta_phymask));
6426 6432 break;
6427 6433 }
6428 6434 case MPTSAS_DR_EVENT_OFFLINE_TARGET:
6429 6435 {
6430 6436 devhdl = topo_node->devhdl;
6431 6437 ptgt = refhash_linear_search(mpt->m_targets,
6432 6438 mptsas_target_eval_devhdl, &devhdl);
6433 6439 if (ptgt == NULL)
6434 6440 break;
6435 6441
6436 6442 sas_wwn = ptgt->m_addr.mta_wwn;
6437 6443 phy = ptgt->m_phynum;
6438 6444
6439 6445 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
6440 6446
6441 6447 if (sas_wwn) {
6442 6448 (void) sprintf(addr, "w%016"PRIx64, sas_wwn);
6443 6449 } else {
6444 6450 (void) sprintf(addr, "p%x", phy);
6445 6451 }
6446 6452 ASSERT(ptgt->m_devhdl == devhdl);
6447 6453
6448 6454 if ((topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) ||
6449 6455 (topo_node->flags ==
6450 6456 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6451 6457 /*
6452 6458 * Get latest RAID info if RAID volume status changes
6453 6459 * or Phys Disk status changes
6454 6460 */
6455 6461 (void) mptsas_get_raid_info(mpt);
6456 6462 }
6457 6463 /*
6458 6464 * Abort all outstanding command on the device
6459 6465 */
6460 6466 rval = mptsas_do_scsi_reset(mpt, devhdl);
6461 6467 if (rval) {
6462 6468 NDBG20(("mptsas%d handle_topo_change to reset target "
6463 6469 "before offline devhdl:%x, phymask:%x, rval:%x",
6464 6470 mpt->m_instance, ptgt->m_devhdl,
6465 6471 ptgt->m_addr.mta_phymask, rval));
6466 6472 }
6467 6473
6468 6474 mutex_exit(&mpt->m_mutex);
6469 6475
6470 6476 ndi_devi_enter(scsi_vhci_dip, &circ);
6471 6477 ndi_devi_enter(parent, &circ1);
6472 6478 rval = mptsas_offline_target(parent, addr);
6473 6479 ndi_devi_exit(parent, circ1);
6474 6480 ndi_devi_exit(scsi_vhci_dip, circ);
6475 6481 NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
6476 6482 "phymask:%x, rval:%x", mpt->m_instance,
6477 6483 ptgt->m_devhdl, ptgt->m_addr.mta_phymask, rval));
6478 6484
6479 6485 kmem_free(addr, SCSI_MAXNAMELEN);
6480 6486
6481 6487 /*
6482 6488 * Clear parent's props for SMHBA support
6483 6489 */
6484 6490 flags = topo_node->flags;
6485 6491 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6486 6492 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6487 6493 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6488 6494 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6489 6495 DDI_PROP_SUCCESS) {
6490 6496 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6491 6497 SCSI_ADDR_PROP_ATTACHED_PORT);
6492 6498 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6493 6499 "prop update failed");
6494 6500 break;
6495 6501 }
6496 6502 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6497 6503 MPTSAS_NUM_PHYS, 0) !=
6498 6504 DDI_PROP_SUCCESS) {
6499 6505 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6500 6506 MPTSAS_NUM_PHYS);
6501 6507 mptsas_log(mpt, CE_WARN, "mptsas num phys "
6502 6508 "prop update failed");
6503 6509 break;
6504 6510 }
6505 6511 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6506 6512 MPTSAS_VIRTUAL_PORT, 1) !=
6507 6513 DDI_PROP_SUCCESS) {
6508 6514 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6509 6515 MPTSAS_VIRTUAL_PORT);
6510 6516 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6511 6517 "prop update failed");
6512 6518 break;
6513 6519 }
6514 6520 }
6515 6521
6516 6522 mutex_enter(&mpt->m_mutex);
6517 6523 ptgt->m_led_status = 0;
6518 6524 (void) mptsas_flush_led_status(mpt, ptgt);
6519 6525 if (rval == DDI_SUCCESS) {
6520 6526 refhash_remove(mpt->m_targets, ptgt);
6521 6527 ptgt = NULL;
6522 6528 } else {
6523 6529 /*
6524 6530 * clean DR_INTRANSITION flag to allow I/O down to
6525 6531 * PHCI driver since failover finished.
6526 6532 * Invalidate the devhdl
6527 6533 */
6528 6534 ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6529 6535 ptgt->m_tgt_unconfigured = 0;
6530 6536 mutex_enter(&mpt->m_tx_waitq_mutex);
6531 6537 ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6532 6538 mutex_exit(&mpt->m_tx_waitq_mutex);
6533 6539 }
6534 6540
6535 6541 /*
6536 6542 * Send SAS IO Unit Control to free the dev handle
6537 6543 */
6538 6544 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6539 6545 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
6540 6546 rval = mptsas_free_devhdl(mpt, devhdl);
6541 6547
6542 6548 NDBG20(("mptsas%d handle_topo_change to remove "
6543 6549 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6544 6550 rval));
6545 6551 }
6546 6552
6547 6553 break;
6548 6554 }
6549 6555 case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
6550 6556 {
6551 6557 devhdl = topo_node->devhdl;
6552 6558 /*
6553 6559 * If this is the remove handle event, do a reset first.
6554 6560 */
6555 6561 if (topo_node->event == MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6556 6562 rval = mptsas_do_scsi_reset(mpt, devhdl);
6557 6563 if (rval) {
6558 6564 NDBG20(("mpt%d reset target before remove "
6559 6565 "devhdl:%x, rval:%x", mpt->m_instance,
6560 6566 devhdl, rval));
6561 6567 }
6562 6568 }
6563 6569
6564 6570 /*
6565 6571 * Send SAS IO Unit Control to free the dev handle
6566 6572 */
6567 6573 rval = mptsas_free_devhdl(mpt, devhdl);
6568 6574 NDBG20(("mptsas%d handle_topo_change to remove "
6569 6575 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6570 6576 rval));
6571 6577 break;
6572 6578 }
6573 6579 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6574 6580 {
6575 6581 mptsas_smp_t smp;
6576 6582 dev_info_t *smpdip;
6577 6583
6578 6584 devhdl = topo_node->devhdl;
6579 6585
6580 6586 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
6581 6587 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)devhdl;
6582 6588 rval = mptsas_get_sas_expander_page0(mpt, page_address, &smp);
6583 6589 if (rval != DDI_SUCCESS) {
6584 6590 mptsas_log(mpt, CE_WARN, "failed to online smp, "
6585 6591 "handle %x", devhdl);
6586 6592 return;
6587 6593 }
6588 6594
6589 6595 psmp = mptsas_smp_alloc(mpt, &smp);
6590 6596 if (psmp == NULL) {
6591 6597 return;
6592 6598 }
6593 6599
6594 6600 mutex_exit(&mpt->m_mutex);
6595 6601 ndi_devi_enter(parent, &circ1);
6596 6602 (void) mptsas_online_smp(parent, psmp, &smpdip);
6597 6603 ndi_devi_exit(parent, circ1);
6598 6604
6599 6605 mutex_enter(&mpt->m_mutex);
6600 6606 break;
6601 6607 }
6602 6608 case MPTSAS_DR_EVENT_OFFLINE_SMP:
6603 6609 {
6604 6610 devhdl = topo_node->devhdl;
6605 6611 uint32_t dev_info;
6606 6612
6607 6613 psmp = refhash_linear_search(mpt->m_smp_targets,
6608 6614 mptsas_smp_eval_devhdl, &devhdl);
6609 6615 if (psmp == NULL)
6610 6616 break;
6611 6617 /*
6612 6618 * The mptsas_smp_t data is released only if the dip is offlined
6613 6619 * successfully.
6614 6620 */
6615 6621 mutex_exit(&mpt->m_mutex);
6616 6622
6617 6623 ndi_devi_enter(parent, &circ1);
6618 6624 rval = mptsas_offline_smp(parent, psmp, NDI_DEVI_REMOVE);
6619 6625 ndi_devi_exit(parent, circ1);
6620 6626
6621 6627 dev_info = psmp->m_deviceinfo;
6622 6628 if ((dev_info & DEVINFO_DIRECT_ATTACHED) ==
6623 6629 DEVINFO_DIRECT_ATTACHED) {
6624 6630 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6625 6631 MPTSAS_VIRTUAL_PORT, 1) !=
6626 6632 DDI_PROP_SUCCESS) {
6627 6633 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6628 6634 MPTSAS_VIRTUAL_PORT);
6629 6635 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6630 6636 "prop update failed");
6631 6637 return;
6632 6638 }
6633 6639 /*
6634 6640 * Check whether the smp connected to the iport,
6635 6641 */
6636 6642 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6637 6643 MPTSAS_NUM_PHYS, 0) !=
6638 6644 DDI_PROP_SUCCESS) {
6639 6645 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6640 6646 MPTSAS_NUM_PHYS);
6641 6647 mptsas_log(mpt, CE_WARN, "mptsas num phys"
6642 6648 "prop update failed");
6643 6649 return;
6644 6650 }
6645 6651 /*
6646 6652 * Clear parent's attached-port props
6647 6653 */
6648 6654 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6649 6655 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6650 6656 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6651 6657 DDI_PROP_SUCCESS) {
6652 6658 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6653 6659 SCSI_ADDR_PROP_ATTACHED_PORT);
6654 6660 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6655 6661 "prop update failed");
6656 6662 return;
6657 6663 }
6658 6664 }
6659 6665
6660 6666 mutex_enter(&mpt->m_mutex);
6661 6667 NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
6662 6668 "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
6663 6669 if (rval == DDI_SUCCESS) {
6664 6670 refhash_remove(mpt->m_smp_targets, psmp);
6665 6671 } else {
6666 6672 psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
6667 6673 }
6668 6674
6669 6675 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6670 6676
6671 6677 break;
6672 6678 }
6673 6679 default:
6674 6680 return;
6675 6681 }
6676 6682 }
6677 6683
6678 6684 /*
6679 6685 * Record the event if its type is enabled in mpt instance by ioctl.
6680 6686 */
6681 6687 static void
6682 6688 mptsas_record_event(void *args)
6683 6689 {
6684 6690 m_replyh_arg_t *replyh_arg;
6685 6691 pMpi2EventNotificationReply_t eventreply;
6686 6692 uint32_t event, rfm;
6687 6693 mptsas_t *mpt;
6688 6694 int i, j;
6689 6695 uint16_t event_data_len;
6690 6696 boolean_t sendAEN = FALSE;
6691 6697
6692 6698 replyh_arg = (m_replyh_arg_t *)args;
6693 6699 rfm = replyh_arg->rfm;
6694 6700 mpt = replyh_arg->mpt;
6695 6701
6696 6702 eventreply = (pMpi2EventNotificationReply_t)
6697 6703 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6698 6704 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6699 6705
6700 6706
6701 6707 /*
6702 6708 * Generate a system event to let anyone who cares know that a
6703 6709 * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the
6704 6710 * event mask is set to.
6705 6711 */
6706 6712 if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
6707 6713 sendAEN = TRUE;
6708 6714 }
6709 6715
6710 6716 /*
6711 6717 * Record the event only if it is not masked. Determine which dword
6712 6718 * and bit of event mask to test.
6713 6719 */
6714 6720 i = (uint8_t)(event / 32);
6715 6721 j = (uint8_t)(event % 32);
6716 6722 if ((i < 4) && ((1 << j) & mpt->m_event_mask[i])) {
6717 6723 i = mpt->m_event_index;
6718 6724 mpt->m_events[i].Type = event;
6719 6725 mpt->m_events[i].Number = ++mpt->m_event_number;
6720 6726 bzero(mpt->m_events[i].Data, MPTSAS_MAX_EVENT_DATA_LENGTH * 4);
6721 6727 event_data_len = ddi_get16(mpt->m_acc_reply_frame_hdl,
6722 6728 &eventreply->EventDataLength);
6723 6729
6724 6730 if (event_data_len > 0) {
6725 6731 /*
6726 6732 * Limit data to size in m_event entry
6727 6733 */
6728 6734 if (event_data_len > MPTSAS_MAX_EVENT_DATA_LENGTH) {
6729 6735 event_data_len = MPTSAS_MAX_EVENT_DATA_LENGTH;
6730 6736 }
6731 6737 for (j = 0; j < event_data_len; j++) {
6732 6738 mpt->m_events[i].Data[j] =
6733 6739 ddi_get32(mpt->m_acc_reply_frame_hdl,
6734 6740 &(eventreply->EventData[j]));
6735 6741 }
6736 6742
6737 6743 /*
6738 6744 * check for index wrap-around
6739 6745 */
6740 6746 if (++i == MPTSAS_EVENT_QUEUE_SIZE) {
6741 6747 i = 0;
6742 6748 }
6743 6749 mpt->m_event_index = (uint8_t)i;
6744 6750
6745 6751 /*
6746 6752 * Set flag to send the event.
6747 6753 */
6748 6754 sendAEN = TRUE;
6749 6755 }
6750 6756 }
6751 6757
6752 6758 /*
6753 6759 * Generate a system event if flag is set to let anyone who cares know
6754 6760 * that an event has occurred.
6755 6761 */
6756 6762 if (sendAEN) {
6757 6763 (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
6758 6764 "SAS", NULL, NULL, DDI_NOSLEEP);
6759 6765 }
6760 6766 }
6761 6767
6762 6768 #define SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS
6763 6769 /*
6764 6770 * handle sync events from ioc in interrupt
6765 6771 * return value:
6766 6772 * DDI_SUCCESS: The event is handled by this func
6767 6773 * DDI_FAILURE: Event is not handled
6768 6774 */
6769 6775 static int
6770 6776 mptsas_handle_event_sync(void *args)
6771 6777 {
6772 6778 m_replyh_arg_t *replyh_arg;
6773 6779 pMpi2EventNotificationReply_t eventreply;
6774 6780 uint32_t event, rfm;
6775 6781 mptsas_t *mpt;
6776 6782 uint_t iocstatus;
6777 6783
6778 6784 replyh_arg = (m_replyh_arg_t *)args;
6779 6785 rfm = replyh_arg->rfm;
6780 6786 mpt = replyh_arg->mpt;
6781 6787
6782 6788 ASSERT(mutex_owned(&mpt->m_mutex));
6783 6789
6784 6790 eventreply = (pMpi2EventNotificationReply_t)
6785 6791 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6786 6792 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6787 6793
6788 6794 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
6789 6795 &eventreply->IOCStatus)) {
6790 6796 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
6791 6797 mptsas_log(mpt, CE_WARN,
6792 6798 "!mptsas_handle_event_sync: event 0x%x, "
6793 6799 "IOCStatus=0x%x, "
6794 6800 "IOCLogInfo=0x%x", event, iocstatus,
6795 6801 ddi_get32(mpt->m_acc_reply_frame_hdl,
6796 6802 &eventreply->IOCLogInfo));
6797 6803 } else {
6798 6804 mptsas_log(mpt, CE_WARN,
6799 6805 "mptsas_handle_event_sync: event 0x%x, "
6800 6806 "IOCStatus=0x%x, "
6801 6807 "(IOCLogInfo=0x%x)", event, iocstatus,
6802 6808 ddi_get32(mpt->m_acc_reply_frame_hdl,
6803 6809 &eventreply->IOCLogInfo));
6804 6810 }
6805 6811 }
6806 6812
6807 6813 /*
6808 6814 * figure out what kind of event we got and handle accordingly
6809 6815 */
6810 6816 switch (event) {
6811 6817 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
6812 6818 {
6813 6819 pMpi2EventDataSasTopologyChangeList_t sas_topo_change_list;
6814 6820 uint8_t num_entries, expstatus, phy;
6815 6821 uint8_t phystatus, physport, state, i;
6816 6822 uint8_t start_phy_num, link_rate;
6817 6823 uint16_t dev_handle, reason_code;
6818 6824 uint16_t enc_handle, expd_handle;
6819 6825 char string[80], curr[80], prev[80];
6820 6826 mptsas_topo_change_list_t *topo_head = NULL;
6821 6827 mptsas_topo_change_list_t *topo_tail = NULL;
6822 6828 mptsas_topo_change_list_t *topo_node = NULL;
6823 6829 mptsas_target_t *ptgt;
6824 6830 mptsas_smp_t *psmp;
6825 6831 uint8_t flags = 0, exp_flag;
6826 6832 smhba_info_t *pSmhba = NULL;
6827 6833
6828 6834 NDBG20(("mptsas_handle_event_sync: SAS topology change"));
6829 6835
6830 6836 sas_topo_change_list = (pMpi2EventDataSasTopologyChangeList_t)
6831 6837 eventreply->EventData;
6832 6838
6833 6839 enc_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6834 6840 &sas_topo_change_list->EnclosureHandle);
6835 6841 expd_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6836 6842 &sas_topo_change_list->ExpanderDevHandle);
6837 6843 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6838 6844 &sas_topo_change_list->NumEntries);
6839 6845 start_phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
6840 6846 &sas_topo_change_list->StartPhyNum);
6841 6847 expstatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6842 6848 &sas_topo_change_list->ExpStatus);
6843 6849 physport = ddi_get8(mpt->m_acc_reply_frame_hdl,
6844 6850 &sas_topo_change_list->PhysicalPort);
6845 6851
6846 6852 string[0] = 0;
6847 6853 if (expd_handle) {
6848 6854 flags = MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED;
6849 6855 switch (expstatus) {
6850 6856 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6851 6857 (void) sprintf(string, " added");
6852 6858 /*
6853 6859 * New expander device added
6854 6860 */
6855 6861 mpt->m_port_chng = 1;
6856 6862 topo_node = kmem_zalloc(
6857 6863 sizeof (mptsas_topo_change_list_t),
6858 6864 KM_SLEEP);
6859 6865 topo_node->mpt = mpt;
6860 6866 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_SMP;
6861 6867 topo_node->un.physport = physport;
6862 6868 topo_node->devhdl = expd_handle;
6863 6869 topo_node->flags = flags;
6864 6870 topo_node->object = NULL;
6865 6871 if (topo_head == NULL) {
6866 6872 topo_head = topo_tail = topo_node;
6867 6873 } else {
6868 6874 topo_tail->next = topo_node;
6869 6875 topo_tail = topo_node;
6870 6876 }
6871 6877 break;
6872 6878 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6873 6879 (void) sprintf(string, " not responding, "
6874 6880 "removed");
6875 6881 psmp = refhash_linear_search(mpt->m_smp_targets,
6876 6882 mptsas_smp_eval_devhdl, &expd_handle);
6877 6883 if (psmp == NULL)
6878 6884 break;
6879 6885
6880 6886 topo_node = kmem_zalloc(
6881 6887 sizeof (mptsas_topo_change_list_t),
6882 6888 KM_SLEEP);
6883 6889 topo_node->mpt = mpt;
6884 6890 topo_node->un.phymask =
6885 6891 psmp->m_addr.mta_phymask;
6886 6892 topo_node->event = MPTSAS_DR_EVENT_OFFLINE_SMP;
6887 6893 topo_node->devhdl = expd_handle;
6888 6894 topo_node->flags = flags;
6889 6895 topo_node->object = NULL;
6890 6896 if (topo_head == NULL) {
6891 6897 topo_head = topo_tail = topo_node;
6892 6898 } else {
6893 6899 topo_tail->next = topo_node;
6894 6900 topo_tail = topo_node;
6895 6901 }
6896 6902 break;
6897 6903 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6898 6904 break;
6899 6905 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6900 6906 (void) sprintf(string, " not responding, "
6901 6907 "delaying removal");
6902 6908 break;
6903 6909 default:
6904 6910 break;
6905 6911 }
6906 6912 } else {
6907 6913 flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
6908 6914 }
6909 6915
6910 6916 NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
6911 6917 enc_handle, expd_handle, string));
6912 6918 for (i = 0; i < num_entries; i++) {
6913 6919 phy = i + start_phy_num;
6914 6920 phystatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6915 6921 &sas_topo_change_list->PHY[i].PhyStatus);
6916 6922 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6917 6923 &sas_topo_change_list->PHY[i].AttachedDevHandle);
6918 6924 reason_code = phystatus & MPI2_EVENT_SAS_TOPO_RC_MASK;
6919 6925 /*
6920 6926 * Filter out processing of Phy Vacant Status unless
6921 6927 * the reason code is "Not Responding". Process all
6922 6928 * other combinations of Phy Status and Reason Codes.
6923 6929 */
6924 6930 if ((phystatus &
6925 6931 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) &&
6926 6932 (reason_code !=
6927 6933 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
6928 6934 continue;
6929 6935 }
6930 6936 curr[0] = 0;
6931 6937 prev[0] = 0;
6932 6938 string[0] = 0;
6933 6939 switch (reason_code) {
6934 6940 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6935 6941 {
6936 6942 NDBG20(("mptsas%d phy %d physical_port %d "
6937 6943 "dev_handle %d added", mpt->m_instance, phy,
6938 6944 physport, dev_handle));
6939 6945 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6940 6946 &sas_topo_change_list->PHY[i].LinkRate);
6941 6947 state = (link_rate &
6942 6948 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6943 6949 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6944 6950 switch (state) {
6945 6951 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6946 6952 (void) sprintf(curr, "is disabled");
6947 6953 break;
6948 6954 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6949 6955 (void) sprintf(curr, "is offline, "
6950 6956 "failed speed negotiation");
6951 6957 break;
6952 6958 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6953 6959 (void) sprintf(curr, "SATA OOB "
6954 6960 "complete");
6955 6961 break;
6956 6962 case SMP_RESET_IN_PROGRESS:
6957 6963 (void) sprintf(curr, "SMP reset in "
6958 6964 "progress");
6959 6965 break;
6960 6966 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6961 6967 (void) sprintf(curr, "is online at "
6962 6968 "1.5 Gbps");
6963 6969 break;
6964 6970 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6965 6971 (void) sprintf(curr, "is online at 3.0 "
6966 6972 "Gbps");
6967 6973 break;
6968 6974 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6969 6975 (void) sprintf(curr, "is online at 6.0 "
6970 6976 "Gbps");
6971 6977 break;
6972 6978 case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
6973 6979 (void) sprintf(curr,
6974 6980 "is online at 12.0 Gbps");
6975 6981 break;
6976 6982 default:
6977 6983 (void) sprintf(curr, "state is "
6978 6984 "unknown");
6979 6985 break;
6980 6986 }
6981 6987 /*
6982 6988 * New target device added into the system.
6983 6989 * Set association flag according to if an
6984 6990 * expander is used or not.
6985 6991 */
6986 6992 exp_flag =
6987 6993 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6988 6994 if (flags ==
6989 6995 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6990 6996 flags = exp_flag;
6991 6997 }
6992 6998 topo_node = kmem_zalloc(
6993 6999 sizeof (mptsas_topo_change_list_t),
6994 7000 KM_SLEEP);
6995 7001 topo_node->mpt = mpt;
6996 7002 topo_node->event =
6997 7003 MPTSAS_DR_EVENT_RECONFIG_TARGET;
6998 7004 if (expd_handle == 0) {
6999 7005 /*
7000 7006 * Per MPI 2, if expander dev handle
7001 7007 * is 0, it's a directly attached
7002 7008 * device. So driver use PHY to decide
7003 7009 * which iport is associated
7004 7010 */
7005 7011 physport = phy;
7006 7012 mpt->m_port_chng = 1;
7007 7013 }
7008 7014 topo_node->un.physport = physport;
7009 7015 topo_node->devhdl = dev_handle;
7010 7016 topo_node->flags = flags;
7011 7017 topo_node->object = NULL;
7012 7018 if (topo_head == NULL) {
7013 7019 topo_head = topo_tail = topo_node;
7014 7020 } else {
7015 7021 topo_tail->next = topo_node;
7016 7022 topo_tail = topo_node;
7017 7023 }
7018 7024 break;
7019 7025 }
7020 7026 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7021 7027 {
7022 7028 NDBG20(("mptsas%d phy %d physical_port %d "
7023 7029 "dev_handle %d removed", mpt->m_instance,
7024 7030 phy, physport, dev_handle));
7025 7031 /*
7026 7032 * Set association flag according to if an
7027 7033 * expander is used or not.
7028 7034 */
7029 7035 exp_flag =
7030 7036 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
7031 7037 if (flags ==
7032 7038 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
7033 7039 flags = exp_flag;
7034 7040 }
7035 7041 /*
7036 7042 * Target device is removed from the system
7037 7043 * Before the device is really offline from
7038 7044 * from system.
7039 7045 */
7040 7046 ptgt = refhash_linear_search(mpt->m_targets,
7041 7047 mptsas_target_eval_devhdl, &dev_handle);
7042 7048 /*
7043 7049 * If ptgt is NULL here, it means that the
7044 7050 * DevHandle is not in the hash table. This is
7045 7051 * reasonable sometimes. For example, if a
7046 7052 * disk was pulled, then added, then pulled
7047 7053 * again, the disk will not have been put into
7048 7054 * the hash table because the add event will
7049 7055 * have an invalid phymask. BUT, this does not
7050 7056 * mean that the DevHandle is invalid. The
7051 7057 * controller will still have a valid DevHandle
7052 7058 * that must be removed. To do this, use the
7053 7059 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
7054 7060 */
7055 7061 if (ptgt == NULL) {
7056 7062 topo_node = kmem_zalloc(
7057 7063 sizeof (mptsas_topo_change_list_t),
7058 7064 KM_SLEEP);
7059 7065 topo_node->mpt = mpt;
7060 7066 topo_node->un.phymask = 0;
7061 7067 topo_node->event =
7062 7068 MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
7063 7069 topo_node->devhdl = dev_handle;
7064 7070 topo_node->flags = flags;
7065 7071 topo_node->object = NULL;
7066 7072 if (topo_head == NULL) {
7067 7073 topo_head = topo_tail =
7068 7074 topo_node;
7069 7075 } else {
7070 7076 topo_tail->next = topo_node;
7071 7077 topo_tail = topo_node;
7072 7078 }
7073 7079 break;
7074 7080 }
7075 7081
7076 7082 /*
7077 7083 * Update DR flag immediately avoid I/O failure
7078 7084 * before failover finish. Pay attention to the
7079 7085 * mutex protect, we need grab m_tx_waitq_mutex
7080 7086 * during set m_dr_flag because we won't add
7081 7087 * the following command into waitq, instead,
7082 7088 * we need return TRAN_BUSY in the tran_start
7083 7089 * context.
7084 7090 */
7085 7091 mutex_enter(&mpt->m_tx_waitq_mutex);
7086 7092 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7087 7093 mutex_exit(&mpt->m_tx_waitq_mutex);
7088 7094
7089 7095 topo_node = kmem_zalloc(
7090 7096 sizeof (mptsas_topo_change_list_t),
7091 7097 KM_SLEEP);
7092 7098 topo_node->mpt = mpt;
7093 7099 topo_node->un.phymask =
7094 7100 ptgt->m_addr.mta_phymask;
7095 7101 topo_node->event =
7096 7102 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7097 7103 topo_node->devhdl = dev_handle;
7098 7104 topo_node->flags = flags;
7099 7105 topo_node->object = NULL;
7100 7106 if (topo_head == NULL) {
7101 7107 topo_head = topo_tail = topo_node;
7102 7108 } else {
7103 7109 topo_tail->next = topo_node;
7104 7110 topo_tail = topo_node;
7105 7111 }
7106 7112 break;
7107 7113 }
7108 7114 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7109 7115 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
7110 7116 &sas_topo_change_list->PHY[i].LinkRate);
7111 7117 state = (link_rate &
7112 7118 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
7113 7119 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
7114 7120 pSmhba = &mpt->m_phy_info[i].smhba_info;
7115 7121 pSmhba->negotiated_link_rate = state;
7116 7122 switch (state) {
7117 7123 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7118 7124 (void) sprintf(curr, "is disabled");
7119 7125 mptsas_smhba_log_sysevent(mpt,
7120 7126 ESC_SAS_PHY_EVENT,
7121 7127 SAS_PHY_REMOVE,
7122 7128 &mpt->m_phy_info[i].smhba_info);
7123 7129 mpt->m_phy_info[i].smhba_info.
7124 7130 negotiated_link_rate
7125 7131 = 0x1;
7126 7132 break;
7127 7133 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7128 7134 (void) sprintf(curr, "is offline, "
7129 7135 "failed speed negotiation");
7130 7136 mptsas_smhba_log_sysevent(mpt,
7131 7137 ESC_SAS_PHY_EVENT,
7132 7138 SAS_PHY_OFFLINE,
7133 7139 &mpt->m_phy_info[i].smhba_info);
7134 7140 break;
7135 7141 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7136 7142 (void) sprintf(curr, "SATA OOB "
7137 7143 "complete");
7138 7144 break;
7139 7145 case SMP_RESET_IN_PROGRESS:
7140 7146 (void) sprintf(curr, "SMP reset in "
7141 7147 "progress");
7142 7148 break;
7143 7149 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7144 7150 (void) sprintf(curr, "is online at "
7145 7151 "1.5 Gbps");
7146 7152 if ((expd_handle == 0) &&
7147 7153 (enc_handle == 1)) {
7148 7154 mpt->m_port_chng = 1;
7149 7155 }
7150 7156 mptsas_smhba_log_sysevent(mpt,
7151 7157 ESC_SAS_PHY_EVENT,
7152 7158 SAS_PHY_ONLINE,
7153 7159 &mpt->m_phy_info[i].smhba_info);
7154 7160 break;
7155 7161 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7156 7162 (void) sprintf(curr, "is online at 3.0 "
7157 7163 "Gbps");
7158 7164 if ((expd_handle == 0) &&
7159 7165 (enc_handle == 1)) {
7160 7166 mpt->m_port_chng = 1;
7161 7167 }
7162 7168 mptsas_smhba_log_sysevent(mpt,
7163 7169 ESC_SAS_PHY_EVENT,
7164 7170 SAS_PHY_ONLINE,
7165 7171 &mpt->m_phy_info[i].smhba_info);
7166 7172 break;
7167 7173 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7168 7174 (void) sprintf(curr, "is online at "
7169 7175 "6.0 Gbps");
7170 7176 if ((expd_handle == 0) &&
7171 7177 (enc_handle == 1)) {
7172 7178 mpt->m_port_chng = 1;
7173 7179 }
7174 7180 mptsas_smhba_log_sysevent(mpt,
7175 7181 ESC_SAS_PHY_EVENT,
7176 7182 SAS_PHY_ONLINE,
7177 7183 &mpt->m_phy_info[i].smhba_info);
7178 7184 break;
7179 7185 case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7180 7186 (void) sprintf(curr, "is online at "
7181 7187 "12.0 Gbps");
7182 7188 if ((expd_handle == 0) &&
7183 7189 (enc_handle == 1)) {
7184 7190 mpt->m_port_chng = 1;
7185 7191 }
7186 7192 mptsas_smhba_log_sysevent(mpt,
7187 7193 ESC_SAS_PHY_EVENT,
7188 7194 SAS_PHY_ONLINE,
7189 7195 &mpt->m_phy_info[i].smhba_info);
7190 7196 break;
7191 7197 default:
7192 7198 (void) sprintf(curr, "state is "
7193 7199 "unknown");
7194 7200 break;
7195 7201 }
7196 7202
7197 7203 state = (link_rate &
7198 7204 MPI2_EVENT_SAS_TOPO_LR_PREV_MASK) >>
7199 7205 MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT;
7200 7206 switch (state) {
7201 7207 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7202 7208 (void) sprintf(prev, ", was disabled");
7203 7209 break;
7204 7210 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7205 7211 (void) sprintf(prev, ", was offline, "
7206 7212 "failed speed negotiation");
7207 7213 break;
7208 7214 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7209 7215 (void) sprintf(prev, ", was SATA OOB "
7210 7216 "complete");
7211 7217 break;
7212 7218 case SMP_RESET_IN_PROGRESS:
7213 7219 (void) sprintf(prev, ", was SMP reset "
7214 7220 "in progress");
7215 7221 break;
7216 7222 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7217 7223 (void) sprintf(prev, ", was online at "
7218 7224 "1.5 Gbps");
7219 7225 break;
7220 7226 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7221 7227 (void) sprintf(prev, ", was online at "
7222 7228 "3.0 Gbps");
7223 7229 break;
7224 7230 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7225 7231 (void) sprintf(prev, ", was online at "
7226 7232 "6.0 Gbps");
7227 7233 break;
7228 7234 case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7229 7235 (void) sprintf(prev, ", was online at "
7230 7236 "12.0 Gbps");
7231 7237 break;
7232 7238 default:
7233 7239 break;
7234 7240 }
7235 7241 (void) sprintf(&string[strlen(string)], "link "
7236 7242 "changed, ");
7237 7243 break;
7238 7244 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7239 7245 continue;
7240 7246 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7241 7247 (void) sprintf(&string[strlen(string)],
7242 7248 "target not responding, delaying "
7243 7249 "removal");
7244 7250 break;
7245 7251 }
7246 7252 NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
7247 7253 mpt->m_instance, phy, dev_handle, string, curr,
7248 7254 prev));
7249 7255 }
7250 7256 if (topo_head != NULL) {
7251 7257 /*
7252 7258 * Launch DR taskq to handle topology change
7253 7259 */
7254 7260 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7255 7261 mptsas_handle_dr, (void *)topo_head,
7256 7262 DDI_NOSLEEP)) != DDI_SUCCESS) {
7257 7263 while (topo_head != NULL) {
7258 7264 topo_node = topo_head;
7259 7265 topo_head = topo_head->next;
7260 7266 kmem_free(topo_node,
7261 7267 sizeof (mptsas_topo_change_list_t));
7262 7268 }
7263 7269 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7264 7270 "for handle SAS DR event failed. \n");
7265 7271 }
7266 7272 }
7267 7273 break;
7268 7274 }
7269 7275 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7270 7276 {
7271 7277 Mpi2EventDataIrConfigChangeList_t *irChangeList;
7272 7278 mptsas_topo_change_list_t *topo_head = NULL;
7273 7279 mptsas_topo_change_list_t *topo_tail = NULL;
7274 7280 mptsas_topo_change_list_t *topo_node = NULL;
7275 7281 mptsas_target_t *ptgt;
7276 7282 uint8_t num_entries, i, reason;
7277 7283 uint16_t volhandle, diskhandle;
7278 7284
7279 7285 irChangeList = (pMpi2EventDataIrConfigChangeList_t)
7280 7286 eventreply->EventData;
7281 7287 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
7282 7288 &irChangeList->NumElements);
7283 7289
7284 7290 NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
7285 7291 mpt->m_instance));
7286 7292
7287 7293 for (i = 0; i < num_entries; i++) {
7288 7294 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7289 7295 &irChangeList->ConfigElement[i].ReasonCode);
7290 7296 volhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7291 7297 &irChangeList->ConfigElement[i].VolDevHandle);
7292 7298 diskhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7293 7299 &irChangeList->ConfigElement[i].PhysDiskDevHandle);
7294 7300
7295 7301 switch (reason) {
7296 7302 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
7297 7303 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
7298 7304 {
7299 7305 NDBG20(("mptsas %d volume added\n",
7300 7306 mpt->m_instance));
7301 7307
7302 7308 topo_node = kmem_zalloc(
7303 7309 sizeof (mptsas_topo_change_list_t),
7304 7310 KM_SLEEP);
7305 7311
7306 7312 topo_node->mpt = mpt;
7307 7313 topo_node->event =
7308 7314 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7309 7315 topo_node->un.physport = 0xff;
7310 7316 topo_node->devhdl = volhandle;
7311 7317 topo_node->flags =
7312 7318 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7313 7319 topo_node->object = NULL;
7314 7320 if (topo_head == NULL) {
7315 7321 topo_head = topo_tail = topo_node;
7316 7322 } else {
7317 7323 topo_tail->next = topo_node;
7318 7324 topo_tail = topo_node;
7319 7325 }
7320 7326 break;
7321 7327 }
7322 7328 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
7323 7329 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
7324 7330 {
7325 7331 NDBG20(("mptsas %d volume deleted\n",
7326 7332 mpt->m_instance));
7327 7333 ptgt = refhash_linear_search(mpt->m_targets,
7328 7334 mptsas_target_eval_devhdl, &volhandle);
7329 7335 if (ptgt == NULL)
7330 7336 break;
7331 7337
7332 7338 /*
7333 7339 * Clear any flags related to volume
7334 7340 */
7335 7341 (void) mptsas_delete_volume(mpt, volhandle);
7336 7342
7337 7343 /*
7338 7344 * Update DR flag immediately avoid I/O failure
7339 7345 */
7340 7346 mutex_enter(&mpt->m_tx_waitq_mutex);
7341 7347 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7342 7348 mutex_exit(&mpt->m_tx_waitq_mutex);
7343 7349
7344 7350 topo_node = kmem_zalloc(
7345 7351 sizeof (mptsas_topo_change_list_t),
7346 7352 KM_SLEEP);
7347 7353 topo_node->mpt = mpt;
7348 7354 topo_node->un.phymask =
7349 7355 ptgt->m_addr.mta_phymask;
7350 7356 topo_node->event =
7351 7357 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7352 7358 topo_node->devhdl = volhandle;
7353 7359 topo_node->flags =
7354 7360 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7355 7361 topo_node->object = (void *)ptgt;
7356 7362 if (topo_head == NULL) {
7357 7363 topo_head = topo_tail = topo_node;
7358 7364 } else {
7359 7365 topo_tail->next = topo_node;
7360 7366 topo_tail = topo_node;
7361 7367 }
7362 7368 break;
7363 7369 }
7364 7370 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
7365 7371 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
7366 7372 {
7367 7373 ptgt = refhash_linear_search(mpt->m_targets,
7368 7374 mptsas_target_eval_devhdl, &diskhandle);
7369 7375 if (ptgt == NULL)
7370 7376 break;
7371 7377
7372 7378 /*
7373 7379 * Update DR flag immediately avoid I/O failure
7374 7380 */
7375 7381 mutex_enter(&mpt->m_tx_waitq_mutex);
7376 7382 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7377 7383 mutex_exit(&mpt->m_tx_waitq_mutex);
7378 7384
7379 7385 topo_node = kmem_zalloc(
7380 7386 sizeof (mptsas_topo_change_list_t),
7381 7387 KM_SLEEP);
7382 7388 topo_node->mpt = mpt;
7383 7389 topo_node->un.phymask =
7384 7390 ptgt->m_addr.mta_phymask;
7385 7391 topo_node->event =
7386 7392 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7387 7393 topo_node->devhdl = diskhandle;
7388 7394 topo_node->flags =
7389 7395 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7390 7396 topo_node->object = (void *)ptgt;
7391 7397 if (topo_head == NULL) {
7392 7398 topo_head = topo_tail = topo_node;
7393 7399 } else {
7394 7400 topo_tail->next = topo_node;
7395 7401 topo_tail = topo_node;
7396 7402 }
7397 7403 break;
7398 7404 }
7399 7405 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
7400 7406 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
7401 7407 {
7402 7408 /*
7403 7409 * The physical drive is released by a IR
7404 7410 * volume. But we cannot get the the physport
7405 7411 * or phynum from the event data, so we only
7406 7412 * can get the physport/phynum after SAS
7407 7413 * Device Page0 request for the devhdl.
7408 7414 */
7409 7415 topo_node = kmem_zalloc(
7410 7416 sizeof (mptsas_topo_change_list_t),
7411 7417 KM_SLEEP);
7412 7418 topo_node->mpt = mpt;
7413 7419 topo_node->un.phymask = 0;
7414 7420 topo_node->event =
7415 7421 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7416 7422 topo_node->devhdl = diskhandle;
7417 7423 topo_node->flags =
7418 7424 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7419 7425 topo_node->object = NULL;
7420 7426 mpt->m_port_chng = 1;
7421 7427 if (topo_head == NULL) {
7422 7428 topo_head = topo_tail = topo_node;
7423 7429 } else {
7424 7430 topo_tail->next = topo_node;
7425 7431 topo_tail = topo_node;
7426 7432 }
7427 7433 break;
7428 7434 }
7429 7435 default:
7430 7436 break;
7431 7437 }
7432 7438 }
7433 7439
7434 7440 if (topo_head != NULL) {
7435 7441 /*
7436 7442 * Launch DR taskq to handle topology change
7437 7443 */
7438 7444 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7439 7445 mptsas_handle_dr, (void *)topo_head,
7440 7446 DDI_NOSLEEP)) != DDI_SUCCESS) {
7441 7447 while (topo_head != NULL) {
7442 7448 topo_node = topo_head;
7443 7449 topo_head = topo_head->next;
7444 7450 kmem_free(topo_node,
7445 7451 sizeof (mptsas_topo_change_list_t));
7446 7452 }
7447 7453 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7448 7454 "for handle SAS DR event failed. \n");
7449 7455 }
7450 7456 }
7451 7457 break;
7452 7458 }
7453 7459 default:
7454 7460 return (DDI_FAILURE);
7455 7461 }
7456 7462
7457 7463 return (DDI_SUCCESS);
7458 7464 }
7459 7465
7460 7466 /*
7461 7467 * handle events from ioc
7462 7468 */
7463 7469 static void
7464 7470 mptsas_handle_event(void *args)
7465 7471 {
7466 7472 m_replyh_arg_t *replyh_arg;
7467 7473 pMpi2EventNotificationReply_t eventreply;
7468 7474 uint32_t event, iocloginfo, rfm;
7469 7475 uint32_t status;
7470 7476 uint8_t port;
7471 7477 mptsas_t *mpt;
7472 7478 uint_t iocstatus;
7473 7479
7474 7480 replyh_arg = (m_replyh_arg_t *)args;
7475 7481 rfm = replyh_arg->rfm;
7476 7482 mpt = replyh_arg->mpt;
7477 7483
7478 7484 mutex_enter(&mpt->m_mutex);
7479 7485 /*
7480 7486 * If HBA is being reset, drop incoming event.
7481 7487 */
7482 7488 if (mpt->m_in_reset) {
7483 7489 NDBG20(("dropping event received prior to reset"));
7484 7490 mutex_exit(&mpt->m_mutex);
7485 7491 return;
7486 7492 }
7487 7493
7488 7494 eventreply = (pMpi2EventNotificationReply_t)
7489 7495 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
7490 7496 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7491 7497
7492 7498 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7493 7499 &eventreply->IOCStatus)) {
7494 7500 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7495 7501 mptsas_log(mpt, CE_WARN,
7496 7502 "!mptsas_handle_event: IOCStatus=0x%x, "
7497 7503 "IOCLogInfo=0x%x", iocstatus,
7498 7504 ddi_get32(mpt->m_acc_reply_frame_hdl,
7499 7505 &eventreply->IOCLogInfo));
7500 7506 } else {
7501 7507 mptsas_log(mpt, CE_WARN,
7502 7508 "mptsas_handle_event: IOCStatus=0x%x, "
7503 7509 "IOCLogInfo=0x%x", iocstatus,
7504 7510 ddi_get32(mpt->m_acc_reply_frame_hdl,
7505 7511 &eventreply->IOCLogInfo));
7506 7512 }
7507 7513 }
7508 7514
7509 7515 /*
7510 7516 * figure out what kind of event we got and handle accordingly
7511 7517 */
7512 7518 switch (event) {
7513 7519 case MPI2_EVENT_LOG_ENTRY_ADDED:
7514 7520 break;
7515 7521 case MPI2_EVENT_LOG_DATA:
7516 7522 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7517 7523 &eventreply->IOCLogInfo);
7518 7524 NDBG20(("mptsas %d log info %x received.\n", mpt->m_instance,
7519 7525 iocloginfo));
7520 7526 break;
7521 7527 case MPI2_EVENT_STATE_CHANGE:
7522 7528 NDBG20(("mptsas%d state change.", mpt->m_instance));
7523 7529 break;
7524 7530 case MPI2_EVENT_HARD_RESET_RECEIVED:
7525 7531 NDBG20(("mptsas%d event change.", mpt->m_instance));
7526 7532 break;
7527 7533 case MPI2_EVENT_SAS_DISCOVERY:
7528 7534 {
7529 7535 MPI2_EVENT_DATA_SAS_DISCOVERY *sasdiscovery;
7530 7536 char string[80];
7531 7537 uint8_t rc;
7532 7538
7533 7539 sasdiscovery =
7534 7540 (pMpi2EventDataSasDiscovery_t)eventreply->EventData;
7535 7541
7536 7542 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7537 7543 &sasdiscovery->ReasonCode);
7538 7544 port = ddi_get8(mpt->m_acc_reply_frame_hdl,
7539 7545 &sasdiscovery->PhysicalPort);
7540 7546 status = ddi_get32(mpt->m_acc_reply_frame_hdl,
7541 7547 &sasdiscovery->DiscoveryStatus);
7542 7548
7543 7549 string[0] = 0;
7544 7550 switch (rc) {
7545 7551 case MPI2_EVENT_SAS_DISC_RC_STARTED:
7546 7552 (void) sprintf(string, "STARTING");
7547 7553 break;
7548 7554 case MPI2_EVENT_SAS_DISC_RC_COMPLETED:
7549 7555 (void) sprintf(string, "COMPLETED");
7550 7556 break;
7551 7557 default:
7552 7558 (void) sprintf(string, "UNKNOWN");
7553 7559 break;
7554 7560 }
7555 7561
7556 7562 NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string,
7557 7563 port, status));
7558 7564
7559 7565 break;
7560 7566 }
7561 7567 case MPI2_EVENT_EVENT_CHANGE:
7562 7568 NDBG20(("mptsas%d event change.", mpt->m_instance));
7563 7569 break;
7564 7570 case MPI2_EVENT_TASK_SET_FULL:
7565 7571 {
7566 7572 pMpi2EventDataTaskSetFull_t taskfull;
7567 7573
7568 7574 taskfull = (pMpi2EventDataTaskSetFull_t)eventreply->EventData;
7569 7575
7570 7576 NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
7571 7577 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7572 7578 &taskfull->CurrentDepth)));
7573 7579 break;
7574 7580 }
7575 7581 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7576 7582 {
7577 7583 /*
7578 7584 * SAS TOPOLOGY CHANGE LIST Event has already been handled
7579 7585 * in mptsas_handle_event_sync() of interrupt context
7580 7586 */
7581 7587 break;
7582 7588 }
7583 7589 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
7584 7590 {
7585 7591 pMpi2EventDataSasEnclDevStatusChange_t encstatus;
7586 7592 uint8_t rc;
7587 7593 char string[80];
7588 7594
7589 7595 encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
7590 7596 eventreply->EventData;
7591 7597
7592 7598 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7593 7599 &encstatus->ReasonCode);
7594 7600 switch (rc) {
7595 7601 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7596 7602 (void) sprintf(string, "added");
7597 7603 break;
7598 7604 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7599 7605 (void) sprintf(string, ", not responding");
7600 7606 break;
7601 7607 default:
7602 7608 break;
7603 7609 }
7604 7610 NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure "
7605 7611 "%x%s\n", mpt->m_instance,
7606 7612 ddi_get16(mpt->m_acc_reply_frame_hdl,
7607 7613 &encstatus->EnclosureHandle), string));
7608 7614 break;
7609 7615 }
7610 7616
7611 7617 /*
7612 7618 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
7613 7619 * mptsas_handle_event_sync,in here just send ack message.
7614 7620 */
7615 7621 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
7616 7622 {
7617 7623 pMpi2EventDataSasDeviceStatusChange_t statuschange;
7618 7624 uint8_t rc;
7619 7625 uint16_t devhdl;
7620 7626 uint64_t wwn = 0;
7621 7627 uint32_t wwn_lo, wwn_hi;
7622 7628
7623 7629 statuschange = (pMpi2EventDataSasDeviceStatusChange_t)
7624 7630 eventreply->EventData;
7625 7631 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7626 7632 &statuschange->ReasonCode);
7627 7633 wwn_lo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7628 7634 (uint32_t *)(void *)&statuschange->SASAddress);
7629 7635 wwn_hi = ddi_get32(mpt->m_acc_reply_frame_hdl,
7630 7636 (uint32_t *)(void *)&statuschange->SASAddress + 1);
7631 7637 wwn = ((uint64_t)wwn_hi << 32) | wwn_lo;
7632 7638 devhdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
7633 7639 &statuschange->DevHandle);
7634 7640
7635 7641 NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64,
7636 7642 wwn));
7637 7643
7638 7644 switch (rc) {
7639 7645 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7640 7646 NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x",
7641 7647 ddi_get8(mpt->m_acc_reply_frame_hdl,
7642 7648 &statuschange->ASC),
7643 7649 ddi_get8(mpt->m_acc_reply_frame_hdl,
7644 7650 &statuschange->ASCQ)));
7645 7651 break;
7646 7652
7647 7653 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7648 7654 NDBG20(("Device not supported"));
7649 7655 break;
7650 7656
7651 7657 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7652 7658 NDBG20(("IOC internally generated the Target Reset "
7653 7659 "for devhdl:%x", devhdl));
7654 7660 break;
7655 7661
7656 7662 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7657 7663 NDBG20(("IOC's internally generated Target Reset "
7658 7664 "completed for devhdl:%x", devhdl));
7659 7665 break;
7660 7666
7661 7667 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7662 7668 NDBG20(("IOC internally generated Abort Task"));
7663 7669 break;
7664 7670
7665 7671 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7666 7672 NDBG20(("IOC's internally generated Abort Task "
7667 7673 "completed"));
7668 7674 break;
7669 7675
7670 7676 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7671 7677 NDBG20(("IOC internally generated Abort Task Set"));
7672 7678 break;
7673 7679
7674 7680 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7675 7681 NDBG20(("IOC internally generated Clear Task Set"));
7676 7682 break;
7677 7683
7678 7684 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7679 7685 NDBG20(("IOC internally generated Query Task"));
7680 7686 break;
7681 7687
7682 7688 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7683 7689 NDBG20(("Device sent an Asynchronous Notification"));
7684 7690 break;
7685 7691
7686 7692 default:
7687 7693 break;
7688 7694 }
7689 7695 break;
7690 7696 }
7691 7697 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7692 7698 {
7693 7699 /*
7694 7700 * IR TOPOLOGY CHANGE LIST Event has already been handled
7695 7701 * in mpt_handle_event_sync() of interrupt context
7696 7702 */
7697 7703 break;
7698 7704 }
7699 7705 case MPI2_EVENT_IR_OPERATION_STATUS:
7700 7706 {
7701 7707 Mpi2EventDataIrOperationStatus_t *irOpStatus;
7702 7708 char reason_str[80];
7703 7709 uint8_t rc, percent;
7704 7710 uint16_t handle;
7705 7711
7706 7712 irOpStatus = (pMpi2EventDataIrOperationStatus_t)
7707 7713 eventreply->EventData;
7708 7714 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7709 7715 &irOpStatus->RAIDOperation);
7710 7716 percent = ddi_get8(mpt->m_acc_reply_frame_hdl,
7711 7717 &irOpStatus->PercentComplete);
7712 7718 handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7713 7719 &irOpStatus->VolDevHandle);
7714 7720
7715 7721 switch (rc) {
7716 7722 case MPI2_EVENT_IR_RAIDOP_RESYNC:
7717 7723 (void) sprintf(reason_str, "resync");
7718 7724 break;
7719 7725 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
7720 7726 (void) sprintf(reason_str, "online capacity "
7721 7727 "expansion");
7722 7728 break;
7723 7729 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
7724 7730 (void) sprintf(reason_str, "consistency check");
7725 7731 break;
7726 7732 default:
7727 7733 (void) sprintf(reason_str, "unknown reason %x",
7728 7734 rc);
7729 7735 }
7730 7736
7731 7737 NDBG20(("mptsas%d raid operational status: (%s)"
7732 7738 "\thandle(0x%04x), percent complete(%d)\n",
7733 7739 mpt->m_instance, reason_str, handle, percent));
7734 7740 break;
7735 7741 }
7736 7742 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
7737 7743 {
7738 7744 pMpi2EventDataSasBroadcastPrimitive_t sas_broadcast;
7739 7745 uint8_t phy_num;
7740 7746 uint8_t primitive;
7741 7747
7742 7748 sas_broadcast = (pMpi2EventDataSasBroadcastPrimitive_t)
7743 7749 eventreply->EventData;
7744 7750
7745 7751 phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
7746 7752 &sas_broadcast->PhyNum);
7747 7753 primitive = ddi_get8(mpt->m_acc_reply_frame_hdl,
7748 7754 &sas_broadcast->Primitive);
7749 7755
7750 7756 switch (primitive) {
7751 7757 case MPI2_EVENT_PRIMITIVE_CHANGE:
7752 7758 mptsas_smhba_log_sysevent(mpt,
7753 7759 ESC_SAS_HBA_PORT_BROADCAST,
7754 7760 SAS_PORT_BROADCAST_CHANGE,
7755 7761 &mpt->m_phy_info[phy_num].smhba_info);
7756 7762 break;
7757 7763 case MPI2_EVENT_PRIMITIVE_SES:
7758 7764 mptsas_smhba_log_sysevent(mpt,
7759 7765 ESC_SAS_HBA_PORT_BROADCAST,
7760 7766 SAS_PORT_BROADCAST_SES,
7761 7767 &mpt->m_phy_info[phy_num].smhba_info);
7762 7768 break;
7763 7769 case MPI2_EVENT_PRIMITIVE_EXPANDER:
7764 7770 mptsas_smhba_log_sysevent(mpt,
7765 7771 ESC_SAS_HBA_PORT_BROADCAST,
7766 7772 SAS_PORT_BROADCAST_D01_4,
7767 7773 &mpt->m_phy_info[phy_num].smhba_info);
7768 7774 break;
7769 7775 case MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT:
7770 7776 mptsas_smhba_log_sysevent(mpt,
7771 7777 ESC_SAS_HBA_PORT_BROADCAST,
7772 7778 SAS_PORT_BROADCAST_D04_7,
7773 7779 &mpt->m_phy_info[phy_num].smhba_info);
7774 7780 break;
7775 7781 case MPI2_EVENT_PRIMITIVE_RESERVED3:
7776 7782 mptsas_smhba_log_sysevent(mpt,
7777 7783 ESC_SAS_HBA_PORT_BROADCAST,
7778 7784 SAS_PORT_BROADCAST_D16_7,
7779 7785 &mpt->m_phy_info[phy_num].smhba_info);
7780 7786 break;
7781 7787 case MPI2_EVENT_PRIMITIVE_RESERVED4:
7782 7788 mptsas_smhba_log_sysevent(mpt,
7783 7789 ESC_SAS_HBA_PORT_BROADCAST,
7784 7790 SAS_PORT_BROADCAST_D29_7,
7785 7791 &mpt->m_phy_info[phy_num].smhba_info);
7786 7792 break;
7787 7793 case MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED:
7788 7794 mptsas_smhba_log_sysevent(mpt,
7789 7795 ESC_SAS_HBA_PORT_BROADCAST,
7790 7796 SAS_PORT_BROADCAST_D24_0,
7791 7797 &mpt->m_phy_info[phy_num].smhba_info);
7792 7798 break;
7793 7799 case MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED:
7794 7800 mptsas_smhba_log_sysevent(mpt,
7795 7801 ESC_SAS_HBA_PORT_BROADCAST,
7796 7802 SAS_PORT_BROADCAST_D27_4,
7797 7803 &mpt->m_phy_info[phy_num].smhba_info);
7798 7804 break;
7799 7805 default:
7800 7806 NDBG16(("mptsas%d: unknown BROADCAST PRIMITIVE"
7801 7807 " %x received",
7802 7808 mpt->m_instance, primitive));
7803 7809 break;
7804 7810 }
7805 7811 NDBG16(("mptsas%d sas broadcast primitive: "
7806 7812 "\tprimitive(0x%04x), phy(%d) complete\n",
7807 7813 mpt->m_instance, primitive, phy_num));
7808 7814 break;
7809 7815 }
7810 7816 case MPI2_EVENT_IR_VOLUME:
7811 7817 {
7812 7818 Mpi2EventDataIrVolume_t *irVolume;
7813 7819 uint16_t devhandle;
7814 7820 uint32_t state;
7815 7821 int config, vol;
7816 7822 uint8_t found = FALSE;
7817 7823
7818 7824 irVolume = (pMpi2EventDataIrVolume_t)eventreply->EventData;
7819 7825 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7820 7826 &irVolume->NewValue);
7821 7827 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7822 7828 &irVolume->VolDevHandle);
7823 7829
7824 7830 NDBG20(("EVENT_IR_VOLUME event is received"));
7825 7831
7826 7832 /*
7827 7833 * Get latest RAID info and then find the DevHandle for this
7828 7834 * event in the configuration. If the DevHandle is not found
7829 7835 * just exit the event.
7830 7836 */
7831 7837 (void) mptsas_get_raid_info(mpt);
7832 7838 for (config = 0; (config < mpt->m_num_raid_configs) &&
7833 7839 (!found); config++) {
7834 7840 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
7835 7841 if (mpt->m_raidconfig[config].m_raidvol[vol].
7836 7842 m_raidhandle == devhandle) {
7837 7843 found = TRUE;
7838 7844 break;
7839 7845 }
7840 7846 }
7841 7847 }
7842 7848 if (!found) {
7843 7849 break;
7844 7850 }
7845 7851
7846 7852 switch (irVolume->ReasonCode) {
7847 7853 case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
7848 7854 {
7849 7855 uint32_t i;
7850 7856 mpt->m_raidconfig[config].m_raidvol[vol].m_settings =
7851 7857 state;
7852 7858
7853 7859 i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
7854 7860 mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
7855 7861 ", auto-config of hot-swap drives is %s"
7856 7862 ", write caching is %s"
7857 7863 ", hot-spare pool mask is %02x\n",
7858 7864 vol, state &
7859 7865 MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
7860 7866 ? "disabled" : "enabled",
7861 7867 i == MPI2_RAIDVOL0_SETTING_UNCHANGED
7862 7868 ? "controlled by member disks" :
7863 7869 i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
7864 7870 ? "disabled" :
7865 7871 i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
7866 7872 ? "enabled" :
7867 7873 "incorrectly set",
7868 7874 (state >> 16) & 0xff);
7869 7875 break;
7870 7876 }
7871 7877 case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
7872 7878 {
7873 7879 mpt->m_raidconfig[config].m_raidvol[vol].m_state =
7874 7880 (uint8_t)state;
7875 7881
7876 7882 mptsas_log(mpt, CE_NOTE,
7877 7883 "Volume %d is now %s\n", vol,
7878 7884 state == MPI2_RAID_VOL_STATE_OPTIMAL
7879 7885 ? "optimal" :
7880 7886 state == MPI2_RAID_VOL_STATE_DEGRADED
7881 7887 ? "degraded" :
7882 7888 state == MPI2_RAID_VOL_STATE_ONLINE
7883 7889 ? "online" :
7884 7890 state == MPI2_RAID_VOL_STATE_INITIALIZING
7885 7891 ? "initializing" :
7886 7892 state == MPI2_RAID_VOL_STATE_FAILED
7887 7893 ? "failed" :
7888 7894 state == MPI2_RAID_VOL_STATE_MISSING
7889 7895 ? "missing" :
7890 7896 "state unknown");
7891 7897 break;
7892 7898 }
7893 7899 case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
7894 7900 {
7895 7901 mpt->m_raidconfig[config].m_raidvol[vol].
7896 7902 m_statusflags = state;
7897 7903
7898 7904 mptsas_log(mpt, CE_NOTE,
7899 7905 " Volume %d is now %s%s%s%s%s%s%s%s%s\n",
7900 7906 vol,
7901 7907 state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
7902 7908 ? ", enabled" : ", disabled",
7903 7909 state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
7904 7910 ? ", quiesced" : "",
7905 7911 state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
7906 7912 ? ", inactive" : ", active",
7907 7913 state &
7908 7914 MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
7909 7915 ? ", bad block table is full" : "",
7910 7916 state &
7911 7917 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
7912 7918 ? ", resync in progress" : "",
7913 7919 state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
7914 7920 ? ", background initialization in progress" : "",
7915 7921 state &
7916 7922 MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
7917 7923 ? ", capacity expansion in progress" : "",
7918 7924 state &
7919 7925 MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
7920 7926 ? ", consistency check in progress" : "",
7921 7927 state & MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB
7922 7928 ? ", data scrub in progress" : "");
7923 7929 break;
7924 7930 }
7925 7931 default:
7926 7932 break;
7927 7933 }
7928 7934 break;
7929 7935 }
7930 7936 case MPI2_EVENT_IR_PHYSICAL_DISK:
7931 7937 {
7932 7938 Mpi2EventDataIrPhysicalDisk_t *irPhysDisk;
7933 7939 uint16_t devhandle, enchandle, slot;
7934 7940 uint32_t status, state;
7935 7941 uint8_t physdisknum, reason;
7936 7942
7937 7943 irPhysDisk = (Mpi2EventDataIrPhysicalDisk_t *)
7938 7944 eventreply->EventData;
7939 7945 physdisknum = ddi_get8(mpt->m_acc_reply_frame_hdl,
7940 7946 &irPhysDisk->PhysDiskNum);
7941 7947 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7942 7948 &irPhysDisk->PhysDiskDevHandle);
7943 7949 enchandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7944 7950 &irPhysDisk->EnclosureHandle);
7945 7951 slot = ddi_get16(mpt->m_acc_reply_frame_hdl,
7946 7952 &irPhysDisk->Slot);
7947 7953 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7948 7954 &irPhysDisk->NewValue);
7949 7955 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7950 7956 &irPhysDisk->ReasonCode);
7951 7957
7952 7958 NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
7953 7959
7954 7960 switch (reason) {
7955 7961 case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
7956 7962 mptsas_log(mpt, CE_NOTE,
7957 7963 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7958 7964 "for enclosure with handle 0x%x is now in hot "
7959 7965 "spare pool %d",
7960 7966 physdisknum, devhandle, slot, enchandle,
7961 7967 (state >> 16) & 0xff);
7962 7968 break;
7963 7969
7964 7970 case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
7965 7971 status = state;
7966 7972 mptsas_log(mpt, CE_NOTE,
7967 7973 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7968 7974 "for enclosure with handle 0x%x is now "
7969 7975 "%s%s%s%s%s\n", physdisknum, devhandle, slot,
7970 7976 enchandle,
7971 7977 status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
7972 7978 ? ", inactive" : ", active",
7973 7979 status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
7974 7980 ? ", out of sync" : "",
7975 7981 status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
7976 7982 ? ", quiesced" : "",
7977 7983 status &
7978 7984 MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
7979 7985 ? ", write cache enabled" : "",
7980 7986 status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
7981 7987 ? ", capacity expansion target" : "");
7982 7988 break;
7983 7989
7984 7990 case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
7985 7991 mptsas_log(mpt, CE_NOTE,
7986 7992 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7987 7993 "for enclosure with handle 0x%x is now %s\n",
7988 7994 physdisknum, devhandle, slot, enchandle,
7989 7995 state == MPI2_RAID_PD_STATE_OPTIMAL
7990 7996 ? "optimal" :
7991 7997 state == MPI2_RAID_PD_STATE_REBUILDING
7992 7998 ? "rebuilding" :
7993 7999 state == MPI2_RAID_PD_STATE_DEGRADED
7994 8000 ? "degraded" :
7995 8001 state == MPI2_RAID_PD_STATE_HOT_SPARE
7996 8002 ? "a hot spare" :
7997 8003 state == MPI2_RAID_PD_STATE_ONLINE
7998 8004 ? "online" :
7999 8005 state == MPI2_RAID_PD_STATE_OFFLINE
8000 8006 ? "offline" :
8001 8007 state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
8002 8008 ? "not compatible" :
8003 8009 state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
8004 8010 ? "not configured" :
8005 8011 "state unknown");
8006 8012 break;
8007 8013 }
8008 8014 break;
8009 8015 }
8010 8016 default:
8011 8017 NDBG20(("mptsas%d: unknown event %x received",
8012 8018 mpt->m_instance, event));
8013 8019 break;
8014 8020 }
8015 8021
8016 8022 /*
8017 8023 * Return the reply frame to the free queue.
8018 8024 */
8019 8025 ddi_put32(mpt->m_acc_free_queue_hdl,
8020 8026 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm);
8021 8027 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
8022 8028 DDI_DMA_SYNC_FORDEV);
8023 8029 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
8024 8030 mpt->m_free_index = 0;
8025 8031 }
8026 8032 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
8027 8033 mpt->m_free_index);
8028 8034 mutex_exit(&mpt->m_mutex);
8029 8035 }
8030 8036
8031 8037 /*
8032 8038 * invoked from timeout() to restart qfull cmds with throttle == 0
8033 8039 */
8034 8040 static void
8035 8041 mptsas_restart_cmd(void *arg)
8036 8042 {
8037 8043 mptsas_t *mpt = arg;
8038 8044 mptsas_target_t *ptgt = NULL;
8039 8045
8040 8046 mutex_enter(&mpt->m_mutex);
8041 8047
8042 8048 mpt->m_restart_cmd_timeid = 0;
8043 8049
8044 8050 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
8045 8051 ptgt = refhash_next(mpt->m_targets, ptgt)) {
8046 8052 if (ptgt->m_reset_delay == 0) {
8047 8053 if (ptgt->m_t_throttle == QFULL_THROTTLE) {
8048 8054 mptsas_set_throttle(mpt, ptgt,
8049 8055 MAX_THROTTLE);
8050 8056 }
8051 8057 }
8052 8058 }
8053 8059 mptsas_restart_hba(mpt);
8054 8060 mutex_exit(&mpt->m_mutex);
8055 8061 }
8056 8062
8057 8063 void
8058 8064 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8059 8065 {
8060 8066 int slot;
8061 8067 mptsas_slots_t *slots = mpt->m_active;
8062 8068 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8063 8069
8064 8070 ASSERT(cmd != NULL);
8065 8071 ASSERT(cmd->cmd_queued == FALSE);
8066 8072
8067 8073 /*
8068 8074 * Task Management cmds are removed in their own routines. Also,
8069 8075 * we don't want to modify timeout based on TM cmds.
8070 8076 */
8071 8077 if (cmd->cmd_flags & CFLAG_TM_CMD) {
8072 8078 return;
8073 8079 }
8074 8080
8075 8081 slot = cmd->cmd_slot;
8076 8082
8077 8083 /*
8078 8084 * remove the cmd.
8079 8085 */
8080 8086 if (cmd == slots->m_slot[slot]) {
8081 8087 NDBG31(("mptsas_remove_cmd: removing cmd=0x%p, flags "
8082 8088 "0x%x", (void *)cmd, cmd->cmd_flags));
8083 8089 slots->m_slot[slot] = NULL;
8084 8090 mpt->m_ncmds--;
8085 8091
8086 8092 /*
8087 8093 * only decrement per target ncmds if command
8088 8094 * has a target associated with it.
8089 8095 */
8090 8096 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
8091 8097 ptgt->m_t_ncmds--;
8092 8098 /*
8093 8099 * reset throttle if we just ran an untagged command
8094 8100 * to a tagged target
8095 8101 */
8096 8102 if ((ptgt->m_t_ncmds == 0) &&
8097 8103 ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
8098 8104 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8099 8105 }
8100 8106
8101 8107 /*
8102 8108 * Remove this command from the active queue.
8103 8109 */
8104 8110 if (cmd->cmd_active_expiration != 0) {
8105 8111 TAILQ_REMOVE(&ptgt->m_active_cmdq, cmd,
8106 8112 cmd_active_link);
8107 8113 cmd->cmd_active_expiration = 0;
8108 8114 }
8109 8115 }
8110 8116 }
8111 8117
8112 8118 /*
8113 8119 * This is all we need to do for ioc commands.
8114 8120 */
8115 8121 if (cmd->cmd_flags & CFLAG_CMDIOC) {
8116 8122 mptsas_return_to_pool(mpt, cmd);
8117 8123 return;
8118 8124 }
8119 8125
8120 8126 ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);
8121 8127 }
8122 8128
8123 8129 /*
8124 8130 * accept all cmds on the tx_waitq if any and then
8125 8131 * start a fresh request from the top of the device queue.
8126 8132 *
8127 8133 * since there are always cmds queued on the tx_waitq, and rare cmds on
8128 8134 * the instance waitq, so this function should not be invoked in the ISR,
8129 8135 * the mptsas_restart_waitq() is invoked in the ISR instead. otherwise, the
8130 8136 * burden belongs to the IO dispatch CPUs is moved the interrupt CPU.
8131 8137 */
8132 8138 static void
8133 8139 mptsas_restart_hba(mptsas_t *mpt)
8134 8140 {
8135 8141 ASSERT(mutex_owned(&mpt->m_mutex));
8136 8142
8137 8143 mutex_enter(&mpt->m_tx_waitq_mutex);
8138 8144 if (mpt->m_tx_waitq) {
8139 8145 mptsas_accept_tx_waitq(mpt);
8140 8146 }
8141 8147 mutex_exit(&mpt->m_tx_waitq_mutex);
8142 8148 mptsas_restart_waitq(mpt);
8143 8149 }
8144 8150
8145 8151 /*
8146 8152 * start a fresh request from the top of the device queue
8147 8153 */
8148 8154 static void
8149 8155 mptsas_restart_waitq(mptsas_t *mpt)
8150 8156 {
8151 8157 mptsas_cmd_t *cmd, *next_cmd;
8152 8158 mptsas_target_t *ptgt = NULL;
8153 8159
8154 8160 NDBG1(("mptsas_restart_waitq: mpt=0x%p", (void *)mpt));
8155 8161
8156 8162 ASSERT(mutex_owned(&mpt->m_mutex));
8157 8163
8158 8164 /*
8159 8165 * If there is a reset delay, don't start any cmds. Otherwise, start
8160 8166 * as many cmds as possible.
8161 8167 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
8162 8168 * commands is m_max_requests - 2.
8163 8169 */
8164 8170 cmd = mpt->m_waitq;
8165 8171
8166 8172 while (cmd != NULL) {
8167 8173 next_cmd = cmd->cmd_linkp;
8168 8174 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
8169 8175 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8170 8176 /*
8171 8177 * passthru command get slot need
8172 8178 * set CFLAG_PREPARED.
8173 8179 */
8174 8180 cmd->cmd_flags |= CFLAG_PREPARED;
8175 8181 mptsas_waitq_delete(mpt, cmd);
8176 8182 mptsas_start_passthru(mpt, cmd);
8177 8183 }
8178 8184 cmd = next_cmd;
8179 8185 continue;
8180 8186 }
8181 8187 if (cmd->cmd_flags & CFLAG_CONFIG) {
8182 8188 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8183 8189 /*
8184 8190 * Send the config page request and delete it
8185 8191 * from the waitq.
8186 8192 */
8187 8193 cmd->cmd_flags |= CFLAG_PREPARED;
8188 8194 mptsas_waitq_delete(mpt, cmd);
8189 8195 mptsas_start_config_page_access(mpt, cmd);
8190 8196 }
8191 8197 cmd = next_cmd;
8192 8198 continue;
8193 8199 }
8194 8200 if (cmd->cmd_flags & CFLAG_FW_DIAG) {
8195 8201 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8196 8202 /*
8197 8203 * Send the FW Diag request and delete if from
8198 8204 * the waitq.
8199 8205 */
8200 8206 cmd->cmd_flags |= CFLAG_PREPARED;
8201 8207 mptsas_waitq_delete(mpt, cmd);
8202 8208 mptsas_start_diag(mpt, cmd);
8203 8209 }
8204 8210 cmd = next_cmd;
8205 8211 continue;
8206 8212 }
8207 8213
8208 8214 ptgt = cmd->cmd_tgt_addr;
8209 8215 if (ptgt && (ptgt->m_t_throttle == DRAIN_THROTTLE) &&
8210 8216 (ptgt->m_t_ncmds == 0)) {
8211 8217 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8212 8218 }
8213 8219 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
8214 8220 (ptgt && (ptgt->m_reset_delay == 0)) &&
8215 8221 (ptgt && (ptgt->m_t_ncmds <
8216 8222 ptgt->m_t_throttle))) {
8217 8223 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8218 8224 mptsas_waitq_delete(mpt, cmd);
8219 8225 (void) mptsas_start_cmd(mpt, cmd);
8220 8226 }
8221 8227 }
8222 8228 cmd = next_cmd;
8223 8229 }
8224 8230 }
8225 8231 /*
8226 8232 * Cmds are queued if tran_start() doesn't get the m_mutexlock(no wait).
8227 8233 * Accept all those queued cmds before new cmd is accept so that the
8228 8234 * cmds are sent in order.
8229 8235 */
8230 8236 static void
8231 8237 mptsas_accept_tx_waitq(mptsas_t *mpt)
8232 8238 {
8233 8239 mptsas_cmd_t *cmd;
8234 8240
8235 8241 ASSERT(mutex_owned(&mpt->m_mutex));
8236 8242 ASSERT(mutex_owned(&mpt->m_tx_waitq_mutex));
8237 8243
8238 8244 /*
8239 8245 * A Bus Reset could occur at any time and flush the tx_waitq,
8240 8246 * so we cannot count on the tx_waitq to contain even one cmd.
8241 8247 * And when the m_tx_waitq_mutex is released and run
8242 8248 * mptsas_accept_pkt(), the tx_waitq may be flushed.
8243 8249 */
8244 8250 cmd = mpt->m_tx_waitq;
8245 8251 for (;;) {
8246 8252 if ((cmd = mpt->m_tx_waitq) == NULL) {
8247 8253 mpt->m_tx_draining = 0;
8248 8254 break;
8249 8255 }
8250 8256 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL) {
8251 8257 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8252 8258 }
8253 8259 cmd->cmd_linkp = NULL;
8254 8260 mutex_exit(&mpt->m_tx_waitq_mutex);
8255 8261 if (mptsas_accept_pkt(mpt, cmd) != TRAN_ACCEPT)
8256 8262 cmn_err(CE_WARN, "mpt: mptsas_accept_tx_waitq: failed "
8257 8263 "to accept cmd on queue\n");
8258 8264 mutex_enter(&mpt->m_tx_waitq_mutex);
8259 8265 }
8260 8266 }
8261 8267
8262 8268
8263 8269 /*
8264 8270 * mpt tag type lookup
8265 8271 */
8266 8272 static char mptsas_tag_lookup[] =
8267 8273 {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
8268 8274
8269 8275 static int
8270 8276 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8271 8277 {
8272 8278 struct scsi_pkt *pkt = CMD2PKT(cmd);
8273 8279 uint32_t control = 0;
8274 8280 caddr_t mem;
8275 8281 pMpi2SCSIIORequest_t io_request;
8276 8282 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
8277 8283 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
8278 8284 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8279 8285 uint16_t SMID, io_flags = 0;
8280 8286 uint32_t request_desc_low, request_desc_high;
8281 8287 mptsas_cmd_t *c;
8282 8288
8283 8289 NDBG1(("mptsas_start_cmd: cmd=0x%p, flags 0x%x", (void *)cmd,
8284 8290 cmd->cmd_flags));
8285 8291
8286 8292 /*
8287 8293 * Set SMID and increment index. Rollover to 1 instead of 0 if index
8288 8294 * is at the max. 0 is an invalid SMID, so we call the first index 1.
8289 8295 */
8290 8296 SMID = cmd->cmd_slot;
8291 8297
8292 8298 /*
8293 8299 * It is possible for back to back device reset to
8294 8300 * happen before the reset delay has expired. That's
8295 8301 * ok, just let the device reset go out on the bus.
8296 8302 */
8297 8303 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8298 8304 ASSERT(ptgt->m_reset_delay == 0);
8299 8305 }
8300 8306
8301 8307 /*
8302 8308 * if a non-tagged cmd is submitted to an active tagged target
8303 8309 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
8304 8310 * to be untagged
8305 8311 */
8306 8312 if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
8307 8313 (ptgt->m_t_ncmds > 1) &&
8308 8314 ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
8309 8315 (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
8310 8316 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8311 8317 NDBG23(("target=%d, untagged cmd, start draining\n",
8312 8318 ptgt->m_devhdl));
8313 8319
8314 8320 if (ptgt->m_reset_delay == 0) {
8315 8321 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
8316 8322 }
8317 8323
8318 8324 mptsas_remove_cmd(mpt, cmd);
8319 8325 cmd->cmd_pkt_flags |= FLAG_HEAD;
8320 8326 mptsas_waitq_add(mpt, cmd);
8321 8327 }
8322 8328 return (DDI_FAILURE);
8323 8329 }
8324 8330
8325 8331 /*
8326 8332 * Set correct tag bits.
8327 8333 */
8328 8334 if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
8329 8335 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
8330 8336 FLAG_TAGMASK) >> 12)]) {
8331 8337 case MSG_SIMPLE_QTAG:
8332 8338 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8333 8339 break;
8334 8340 case MSG_HEAD_QTAG:
8335 8341 control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
8336 8342 break;
8337 8343 case MSG_ORDERED_QTAG:
8338 8344 control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
8339 8345 break;
8340 8346 default:
8341 8347 mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
8342 8348 break;
8343 8349 }
8344 8350 } else {
8345 8351 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
8346 8352 ptgt->m_t_throttle = 1;
8347 8353 }
8348 8354 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8349 8355 }
8350 8356
8351 8357 if (cmd->cmd_pkt_flags & FLAG_TLR) {
8352 8358 control |= MPI2_SCSIIO_CONTROL_TLR_ON;
8353 8359 }
8354 8360
8355 8361 mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
8356 8362 io_request = (pMpi2SCSIIORequest_t)mem;
8357 8363
8358 8364 bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
8359 8365 ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
8360 8366 (MPI2_SCSI_IO_REQUEST, SGL) / 4);
8361 8367 mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
8362 8368 MPI2_FUNCTION_SCSI_IO_REQUEST);
8363 8369
8364 8370 (void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
8365 8371 io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
8366 8372
8367 8373 io_flags = cmd->cmd_cdblen;
8368 8374 if (mptsas_use_fastpath &&
8369 8375 ptgt->m_io_flags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
8370 8376 io_flags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
8371 8377 request_desc_low = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
8372 8378 } else {
8373 8379 request_desc_low = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
8374 8380 }
8375 8381 ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
8376 8382 /*
8377 8383 * setup the Scatter/Gather DMA list for this request
8378 8384 */
8379 8385 if (cmd->cmd_cookiec > 0) {
8380 8386 mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
8381 8387 } else {
8382 8388 ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
8383 8389 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
8384 8390 MPI2_SGE_FLAGS_END_OF_BUFFER |
8385 8391 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
8386 8392 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
8387 8393 }
8388 8394
8389 8395 /*
8390 8396 * save ARQ information
8391 8397 */
8392 8398 ddi_put8(acc_hdl, &io_request->SenseBufferLength, cmd->cmd_rqslen);
8393 8399 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
8394 8400 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
8395 8401 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8396 8402 cmd->cmd_ext_arqcookie.dmac_address);
8397 8403 } else {
8398 8404 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8399 8405 cmd->cmd_arqcookie.dmac_address);
8400 8406 }
8401 8407
8402 8408 ddi_put32(acc_hdl, &io_request->Control, control);
8403 8409
8404 8410 NDBG31(("starting message=%d(0x%p), with cmd=0x%p",
8405 8411 SMID, (void *)io_request, (void *)cmd));
8406 8412
8407 8413 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
8408 8414
8409 8415 /*
8410 8416 * Build request descriptor and write it to the request desc post reg.
8411 8417 */
8412 8418 request_desc_low |= (SMID << 16);
8413 8419 request_desc_high = ptgt->m_devhdl << 16;
8414 8420 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
8415 8421
8416 8422 /*
8417 8423 * Start timeout.
8418 8424 */
8419 8425 cmd->cmd_active_expiration =
8420 8426 gethrtime() + (hrtime_t)pkt->pkt_time * NANOSEC;
8421 8427 #ifdef MPTSAS_TEST
8422 8428 /*
8423 8429 * Force timeouts to happen immediately.
8424 8430 */
8425 8431 if (mptsas_test_timeouts)
8426 8432 cmd->cmd_active_expiration = gethrtime();
8427 8433 #endif
8428 8434 c = TAILQ_FIRST(&ptgt->m_active_cmdq);
8429 8435 if (c == NULL ||
8430 8436 c->cmd_active_expiration < cmd->cmd_active_expiration) {
8431 8437 /*
8432 8438 * Common case is that this is the last pending expiration
8433 8439 * (or queue is empty). Insert at head of the queue.
8434 8440 */
8435 8441 TAILQ_INSERT_HEAD(&ptgt->m_active_cmdq, cmd, cmd_active_link);
8436 8442 } else {
8437 8443 /*
8438 8444 * Queue is not empty and first element expires later than
8439 8445 * this command. Search for element expiring sooner.
8440 8446 */
8441 8447 while ((c = TAILQ_NEXT(c, cmd_active_link)) != NULL) {
8442 8448 if (c->cmd_active_expiration <
8443 8449 cmd->cmd_active_expiration) {
8444 8450 TAILQ_INSERT_BEFORE(c, cmd, cmd_active_link);
8445 8451 break;
8446 8452 }
8447 8453 }
8448 8454 if (c == NULL) {
8449 8455 /*
8450 8456 * No element found expiring sooner, append to
8451 8457 * non-empty queue.
8452 8458 */
8453 8459 TAILQ_INSERT_TAIL(&ptgt->m_active_cmdq, cmd,
8454 8460 cmd_active_link);
8455 8461 }
8456 8462 }
8457 8463
8458 8464 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8459 8465 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8460 8466 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8461 8467 return (DDI_FAILURE);
8462 8468 }
8463 8469 return (DDI_SUCCESS);
8464 8470 }
8465 8471
8466 8472 /*
8467 8473 * Select a helper thread to handle current doneq
8468 8474 */
8469 8475 static void
8470 8476 mptsas_deliver_doneq_thread(mptsas_t *mpt)
8471 8477 {
8472 8478 uint64_t t, i;
8473 8479 uint32_t min = 0xffffffff;
8474 8480 mptsas_doneq_thread_list_t *item;
8475 8481
8476 8482 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
8477 8483 item = &mpt->m_doneq_thread_id[i];
8478 8484 /*
8479 8485 * If the completed command on help thread[i] less than
8480 8486 * doneq_thread_threshold, then pick the thread[i]. Otherwise
8481 8487 * pick a thread which has least completed command.
8482 8488 */
8483 8489
8484 8490 mutex_enter(&item->mutex);
8485 8491 if (item->len < mpt->m_doneq_thread_threshold) {
8486 8492 t = i;
8487 8493 mutex_exit(&item->mutex);
8488 8494 break;
8489 8495 }
8490 8496 if (item->len < min) {
8491 8497 min = item->len;
8492 8498 t = i;
8493 8499 }
8494 8500 mutex_exit(&item->mutex);
8495 8501 }
8496 8502 mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
8497 8503 mptsas_doneq_mv(mpt, t);
8498 8504 cv_signal(&mpt->m_doneq_thread_id[t].cv);
8499 8505 mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
8500 8506 }
8501 8507
8502 8508 /*
8503 8509 * move the current global doneq to the doneq of thead[t]
8504 8510 */
8505 8511 static void
8506 8512 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
8507 8513 {
8508 8514 mptsas_cmd_t *cmd;
8509 8515 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8510 8516
8511 8517 ASSERT(mutex_owned(&item->mutex));
8512 8518 while ((cmd = mpt->m_doneq) != NULL) {
8513 8519 if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
8514 8520 mpt->m_donetail = &mpt->m_doneq;
8515 8521 }
8516 8522 cmd->cmd_linkp = NULL;
8517 8523 *item->donetail = cmd;
8518 8524 item->donetail = &cmd->cmd_linkp;
8519 8525 mpt->m_doneq_len--;
8520 8526 item->len++;
8521 8527 }
8522 8528 }
8523 8529
8524 8530 void
8525 8531 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
8526 8532 {
8527 8533 struct scsi_pkt *pkt = CMD2PKT(cmd);
8528 8534
8529 8535 /* Check all acc and dma handles */
8530 8536 if ((mptsas_check_acc_handle(mpt->m_datap) !=
8531 8537 DDI_SUCCESS) ||
8532 8538 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
8533 8539 DDI_SUCCESS) ||
8534 8540 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
8535 8541 DDI_SUCCESS) ||
8536 8542 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
8537 8543 DDI_SUCCESS) ||
8538 8544 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
8539 8545 DDI_SUCCESS) ||
8540 8546 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
8541 8547 DDI_SUCCESS) ||
8542 8548 (mptsas_check_acc_handle(mpt->m_config_handle) !=
8543 8549 DDI_SUCCESS)) {
8544 8550 ddi_fm_service_impact(mpt->m_dip,
8545 8551 DDI_SERVICE_UNAFFECTED);
8546 8552 ddi_fm_acc_err_clear(mpt->m_config_handle,
8547 8553 DDI_FME_VER0);
8548 8554 pkt->pkt_reason = CMD_TRAN_ERR;
8549 8555 pkt->pkt_statistics = 0;
8550 8556 }
8551 8557 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
8552 8558 DDI_SUCCESS) ||
8553 8559 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
8554 8560 DDI_SUCCESS) ||
8555 8561 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
8556 8562 DDI_SUCCESS) ||
8557 8563 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
8558 8564 DDI_SUCCESS) ||
8559 8565 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
8560 8566 DDI_SUCCESS)) {
8561 8567 ddi_fm_service_impact(mpt->m_dip,
8562 8568 DDI_SERVICE_UNAFFECTED);
8563 8569 pkt->pkt_reason = CMD_TRAN_ERR;
8564 8570 pkt->pkt_statistics = 0;
8565 8571 }
8566 8572 if (cmd->cmd_dmahandle &&
8567 8573 (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
8568 8574 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8569 8575 pkt->pkt_reason = CMD_TRAN_ERR;
8570 8576 pkt->pkt_statistics = 0;
8571 8577 }
8572 8578 if ((cmd->cmd_extra_frames &&
8573 8579 ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
8574 8580 DDI_SUCCESS) ||
8575 8581 (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
8576 8582 DDI_SUCCESS)))) {
8577 8583 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8578 8584 pkt->pkt_reason = CMD_TRAN_ERR;
8579 8585 pkt->pkt_statistics = 0;
8580 8586 }
8581 8587 if (cmd->cmd_arqhandle &&
8582 8588 (mptsas_check_dma_handle(cmd->cmd_arqhandle) != DDI_SUCCESS)) {
8583 8589 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8584 8590 pkt->pkt_reason = CMD_TRAN_ERR;
8585 8591 pkt->pkt_statistics = 0;
8586 8592 }
8587 8593 if (cmd->cmd_ext_arqhandle &&
8588 8594 (mptsas_check_dma_handle(cmd->cmd_ext_arqhandle) != DDI_SUCCESS)) {
8589 8595 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8590 8596 pkt->pkt_reason = CMD_TRAN_ERR;
8591 8597 pkt->pkt_statistics = 0;
8592 8598 }
8593 8599 }
8594 8600
8595 8601 /*
8596 8602 * These routines manipulate the queue of commands that
8597 8603 * are waiting for their completion routines to be called.
8598 8604 * The queue is usually in FIFO order but on an MP system
8599 8605 * it's possible for the completion routines to get out
8600 8606 * of order. If that's a problem you need to add a global
8601 8607 * mutex around the code that calls the completion routine
8602 8608 * in the interrupt handler.
8603 8609 */
8604 8610 static void
8605 8611 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8606 8612 {
8607 8613 struct scsi_pkt *pkt = CMD2PKT(cmd);
8608 8614
8609 8615 NDBG31(("mptsas_doneq_add: cmd=0x%p", (void *)cmd));
8610 8616
8611 8617 ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
8612 8618 cmd->cmd_linkp = NULL;
8613 8619 cmd->cmd_flags |= CFLAG_FINISHED;
8614 8620 cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
8615 8621
8616 8622 mptsas_fma_check(mpt, cmd);
8617 8623
8618 8624 /*
8619 8625 * only add scsi pkts that have completion routines to
8620 8626 * the doneq. no intr cmds do not have callbacks.
8621 8627 */
8622 8628 if (pkt && (pkt->pkt_comp)) {
8623 8629 *mpt->m_donetail = cmd;
8624 8630 mpt->m_donetail = &cmd->cmd_linkp;
8625 8631 mpt->m_doneq_len++;
8626 8632 }
8627 8633 }
8628 8634
8629 8635 static mptsas_cmd_t *
8630 8636 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
8631 8637 {
8632 8638 mptsas_cmd_t *cmd;
8633 8639 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8634 8640
8635 8641 /* pop one off the done queue */
8636 8642 if ((cmd = item->doneq) != NULL) {
8637 8643 /* if the queue is now empty fix the tail pointer */
8638 8644 NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
8639 8645 if ((item->doneq = cmd->cmd_linkp) == NULL) {
8640 8646 item->donetail = &item->doneq;
8641 8647 }
8642 8648 cmd->cmd_linkp = NULL;
8643 8649 item->len--;
8644 8650 }
8645 8651 return (cmd);
8646 8652 }
8647 8653
8648 8654 static void
8649 8655 mptsas_doneq_empty(mptsas_t *mpt)
8650 8656 {
8651 8657 if (mpt->m_doneq && !mpt->m_in_callback) {
8652 8658 mptsas_cmd_t *cmd, *next;
8653 8659 struct scsi_pkt *pkt;
8654 8660
8655 8661 mpt->m_in_callback = 1;
8656 8662 cmd = mpt->m_doneq;
8657 8663 mpt->m_doneq = NULL;
8658 8664 mpt->m_donetail = &mpt->m_doneq;
8659 8665 mpt->m_doneq_len = 0;
8660 8666
8661 8667 mutex_exit(&mpt->m_mutex);
8662 8668 /*
8663 8669 * run the completion routines of all the
8664 8670 * completed commands
8665 8671 */
8666 8672 while (cmd != NULL) {
8667 8673 next = cmd->cmd_linkp;
8668 8674 cmd->cmd_linkp = NULL;
8669 8675 /* run this command's completion routine */
8670 8676 cmd->cmd_flags |= CFLAG_COMPLETED;
8671 8677 pkt = CMD2PKT(cmd);
8672 8678 mptsas_pkt_comp(pkt, cmd);
8673 8679 cmd = next;
8674 8680 }
8675 8681 mutex_enter(&mpt->m_mutex);
8676 8682 mpt->m_in_callback = 0;
8677 8683 }
8678 8684 }
8679 8685
8680 8686 /*
8681 8687 * These routines manipulate the target's queue of pending requests
8682 8688 */
8683 8689 void
8684 8690 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8685 8691 {
8686 8692 NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
8687 8693 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8688 8694 cmd->cmd_queued = TRUE;
8689 8695 if (ptgt)
8690 8696 ptgt->m_t_nwait++;
8691 8697 if (cmd->cmd_pkt_flags & FLAG_HEAD) {
8692 8698 if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
8693 8699 mpt->m_waitqtail = &cmd->cmd_linkp;
8694 8700 }
8695 8701 mpt->m_waitq = cmd;
8696 8702 } else {
8697 8703 cmd->cmd_linkp = NULL;
8698 8704 *(mpt->m_waitqtail) = cmd;
8699 8705 mpt->m_waitqtail = &cmd->cmd_linkp;
8700 8706 }
8701 8707 }
8702 8708
8703 8709 static mptsas_cmd_t *
8704 8710 mptsas_waitq_rm(mptsas_t *mpt)
8705 8711 {
8706 8712 mptsas_cmd_t *cmd;
8707 8713 mptsas_target_t *ptgt;
8708 8714 NDBG7(("mptsas_waitq_rm"));
8709 8715
8710 8716 MPTSAS_WAITQ_RM(mpt, cmd);
8711 8717
8712 8718 NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
8713 8719 if (cmd) {
8714 8720 ptgt = cmd->cmd_tgt_addr;
8715 8721 if (ptgt) {
8716 8722 ptgt->m_t_nwait--;
8717 8723 ASSERT(ptgt->m_t_nwait >= 0);
8718 8724 }
8719 8725 }
8720 8726 return (cmd);
8721 8727 }
8722 8728
8723 8729 /*
8724 8730 * remove specified cmd from the middle of the wait queue.
8725 8731 */
8726 8732 static void
8727 8733 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8728 8734 {
8729 8735 mptsas_cmd_t *prevp = mpt->m_waitq;
8730 8736 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8731 8737
8732 8738 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8733 8739 (void *)mpt, (void *)cmd));
8734 8740 if (ptgt) {
8735 8741 ptgt->m_t_nwait--;
8736 8742 ASSERT(ptgt->m_t_nwait >= 0);
8737 8743 }
8738 8744
8739 8745 if (prevp == cmd) {
8740 8746 if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
8741 8747 mpt->m_waitqtail = &mpt->m_waitq;
8742 8748
8743 8749 cmd->cmd_linkp = NULL;
8744 8750 cmd->cmd_queued = FALSE;
8745 8751 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8746 8752 (void *)mpt, (void *)cmd));
8747 8753 return;
8748 8754 }
8749 8755
8750 8756 while (prevp != NULL) {
8751 8757 if (prevp->cmd_linkp == cmd) {
8752 8758 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8753 8759 mpt->m_waitqtail = &prevp->cmd_linkp;
8754 8760
8755 8761 cmd->cmd_linkp = NULL;
8756 8762 cmd->cmd_queued = FALSE;
8757 8763 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8758 8764 (void *)mpt, (void *)cmd));
8759 8765 return;
8760 8766 }
8761 8767 prevp = prevp->cmd_linkp;
8762 8768 }
8763 8769 cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
8764 8770 }
8765 8771
8766 8772 static mptsas_cmd_t *
8767 8773 mptsas_tx_waitq_rm(mptsas_t *mpt)
8768 8774 {
8769 8775 mptsas_cmd_t *cmd;
8770 8776 NDBG7(("mptsas_tx_waitq_rm"));
8771 8777
8772 8778 MPTSAS_TX_WAITQ_RM(mpt, cmd);
8773 8779
8774 8780 NDBG7(("mptsas_tx_waitq_rm: cmd=0x%p", (void *)cmd));
8775 8781
8776 8782 return (cmd);
8777 8783 }
8778 8784
8779 8785 /*
8780 8786 * remove specified cmd from the middle of the tx_waitq.
8781 8787 */
8782 8788 static void
8783 8789 mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8784 8790 {
8785 8791 mptsas_cmd_t *prevp = mpt->m_tx_waitq;
8786 8792
8787 8793 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8788 8794 (void *)mpt, (void *)cmd));
8789 8795
8790 8796 if (prevp == cmd) {
8791 8797 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL)
8792 8798 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8793 8799
8794 8800 cmd->cmd_linkp = NULL;
8795 8801 cmd->cmd_queued = FALSE;
8796 8802 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8797 8803 (void *)mpt, (void *)cmd));
8798 8804 return;
8799 8805 }
8800 8806
8801 8807 while (prevp != NULL) {
8802 8808 if (prevp->cmd_linkp == cmd) {
8803 8809 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8804 8810 mpt->m_tx_waitqtail = &prevp->cmd_linkp;
8805 8811
8806 8812 cmd->cmd_linkp = NULL;
8807 8813 cmd->cmd_queued = FALSE;
8808 8814 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8809 8815 (void *)mpt, (void *)cmd));
8810 8816 return;
8811 8817 }
8812 8818 prevp = prevp->cmd_linkp;
8813 8819 }
8814 8820 cmn_err(CE_PANIC, "mpt: mptsas_tx_waitq_delete: queue botch");
8815 8821 }
8816 8822
8817 8823 /*
8818 8824 * device and bus reset handling
8819 8825 *
8820 8826 * Notes:
8821 8827 * - RESET_ALL: reset the controller
8822 8828 * - RESET_TARGET: reset the target specified in scsi_address
8823 8829 */
8824 8830 static int
8825 8831 mptsas_scsi_reset(struct scsi_address *ap, int level)
8826 8832 {
8827 8833 mptsas_t *mpt = ADDR2MPT(ap);
8828 8834 int rval;
8829 8835 mptsas_tgt_private_t *tgt_private;
8830 8836 mptsas_target_t *ptgt = NULL;
8831 8837
8832 8838 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
8833 8839 ptgt = tgt_private->t_private;
8834 8840 if (ptgt == NULL) {
8835 8841 return (FALSE);
8836 8842 }
8837 8843 NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,
8838 8844 level));
8839 8845
8840 8846 mutex_enter(&mpt->m_mutex);
8841 8847 /*
8842 8848 * if we are not in panic set up a reset delay for this target
8843 8849 */
8844 8850 if (!ddi_in_panic()) {
8845 8851 mptsas_setup_bus_reset_delay(mpt);
8846 8852 } else {
8847 8853 drv_usecwait(mpt->m_scsi_reset_delay * 1000);
8848 8854 }
8849 8855 rval = mptsas_do_scsi_reset(mpt, ptgt->m_devhdl);
8850 8856 mutex_exit(&mpt->m_mutex);
8851 8857
8852 8858 /*
8853 8859 * The transport layer expect to only see TRUE and
8854 8860 * FALSE. Therefore, we will adjust the return value
8855 8861 * if mptsas_do_scsi_reset returns FAILED.
8856 8862 */
8857 8863 if (rval == FAILED)
8858 8864 rval = FALSE;
8859 8865 return (rval);
8860 8866 }
8861 8867
8862 8868 static int
8863 8869 mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl)
8864 8870 {
8865 8871 int rval = FALSE;
8866 8872 uint8_t config, disk;
8867 8873
8868 8874 ASSERT(mutex_owned(&mpt->m_mutex));
8869 8875
8870 8876 if (mptsas_debug_resets) {
8871 8877 mptsas_log(mpt, CE_WARN, "mptsas_do_scsi_reset: target=%d",
8872 8878 devhdl);
8873 8879 }
8874 8880
8875 8881 /*
8876 8882 * Issue a Target Reset message to the target specified but not to a
8877 8883 * disk making up a raid volume. Just look through the RAID config
8878 8884 * Phys Disk list of DevHandles. If the target's DevHandle is in this
8879 8885 * list, then don't reset this target.
8880 8886 */
8881 8887 for (config = 0; config < mpt->m_num_raid_configs; config++) {
8882 8888 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
8883 8889 if (devhdl == mpt->m_raidconfig[config].
8884 8890 m_physdisk_devhdl[disk]) {
8885 8891 return (TRUE);
8886 8892 }
8887 8893 }
8888 8894 }
8889 8895
8890 8896 rval = mptsas_ioc_task_management(mpt,
8891 8897 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, devhdl, 0, NULL, 0, 0);
8892 8898
8893 8899 mptsas_doneq_empty(mpt);
8894 8900 return (rval);
8895 8901 }
8896 8902
8897 8903 static int
8898 8904 mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
8899 8905 void (*callback)(caddr_t), caddr_t arg)
8900 8906 {
8901 8907 mptsas_t *mpt = ADDR2MPT(ap);
8902 8908
8903 8909 NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap->a_target));
8904 8910
8905 8911 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
8906 8912 &mpt->m_mutex, &mpt->m_reset_notify_listf));
8907 8913 }
8908 8914
8909 8915 static int
8910 8916 mptsas_get_name(struct scsi_device *sd, char *name, int len)
8911 8917 {
8912 8918 dev_info_t *lun_dip = NULL;
8913 8919
8914 8920 ASSERT(sd != NULL);
8915 8921 ASSERT(name != NULL);
8916 8922 lun_dip = sd->sd_dev;
8917 8923 ASSERT(lun_dip != NULL);
8918 8924
8919 8925 if (mptsas_name_child(lun_dip, name, len) == DDI_SUCCESS) {
8920 8926 return (1);
8921 8927 } else {
8922 8928 return (0);
8923 8929 }
8924 8930 }
8925 8931
8926 8932 static int
8927 8933 mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len)
8928 8934 {
8929 8935 return (mptsas_get_name(sd, name, len));
8930 8936 }
8931 8937
8932 8938 void
8933 8939 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
8934 8940 {
8935 8941
8936 8942 NDBG25(("mptsas_set_throttle: throttle=%x", what));
8937 8943
8938 8944 /*
8939 8945 * if the bus is draining/quiesced, no changes to the throttles
8940 8946 * are allowed. Not allowing change of throttles during draining
8941 8947 * limits error recovery but will reduce draining time
8942 8948 *
8943 8949 * all throttles should have been set to HOLD_THROTTLE
8944 8950 */
8945 8951 if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
8946 8952 return;
8947 8953 }
8948 8954
8949 8955 if (what == HOLD_THROTTLE) {
8950 8956 ptgt->m_t_throttle = HOLD_THROTTLE;
8951 8957 } else if (ptgt->m_reset_delay == 0) {
8952 8958 ptgt->m_t_throttle = what;
8953 8959 }
8954 8960 }
8955 8961
8956 8962 /*
8957 8963 * Clean up from a device reset.
8958 8964 * For the case of target reset, this function clears the waitq of all
8959 8965 * commands for a particular target. For the case of abort task set, this
8960 8966 * function clears the waitq of all commonds for a particular target/lun.
8961 8967 */
8962 8968 static void
8963 8969 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
8964 8970 {
8965 8971 mptsas_slots_t *slots = mpt->m_active;
8966 8972 mptsas_cmd_t *cmd, *next_cmd;
8967 8973 int slot;
8968 8974 uchar_t reason;
8969 8975 uint_t stat;
8970 8976 hrtime_t timestamp;
8971 8977
8972 8978 NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
8973 8979
8974 8980 timestamp = gethrtime();
8975 8981
8976 8982 /*
8977 8983 * Make sure the I/O Controller has flushed all cmds
8978 8984 * that are associated with this target for a target reset
8979 8985 * and target/lun for abort task set.
8980 8986 * Account for TM requests, which use the last SMID.
8981 8987 */
8982 8988 for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
8983 8989 if ((cmd = slots->m_slot[slot]) == NULL)
8984 8990 continue;
8985 8991 reason = CMD_RESET;
8986 8992 stat = STAT_DEV_RESET;
8987 8993 switch (tasktype) {
8988 8994 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
8989 8995 if (Tgt(cmd) == target) {
8990 8996 if (cmd->cmd_active_expiration <= timestamp) {
8991 8997 /*
8992 8998 * When timeout requested, propagate
8993 8999 * proper reason and statistics to
8994 9000 * target drivers.
8995 9001 */
8996 9002 reason = CMD_TIMEOUT;
8997 9003 stat |= STAT_TIMEOUT;
8998 9004 }
8999 9005 NDBG25(("mptsas_flush_target discovered non-"
9000 9006 "NULL cmd in slot %d, tasktype 0x%x", slot,
9001 9007 tasktype));
9002 9008 mptsas_dump_cmd(mpt, cmd);
9003 9009 mptsas_remove_cmd(mpt, cmd);
9004 9010 mptsas_set_pkt_reason(mpt, cmd, reason, stat);
9005 9011 mptsas_doneq_add(mpt, cmd);
9006 9012 }
9007 9013 break;
9008 9014 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9009 9015 reason = CMD_ABORTED;
9010 9016 stat = STAT_ABORTED;
9011 9017 /*FALLTHROUGH*/
9012 9018 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9013 9019 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9014 9020
9015 9021 NDBG25(("mptsas_flush_target discovered non-"
9016 9022 "NULL cmd in slot %d, tasktype 0x%x", slot,
9017 9023 tasktype));
9018 9024 mptsas_dump_cmd(mpt, cmd);
9019 9025 mptsas_remove_cmd(mpt, cmd);
9020 9026 mptsas_set_pkt_reason(mpt, cmd, reason,
9021 9027 stat);
9022 9028 mptsas_doneq_add(mpt, cmd);
9023 9029 }
9024 9030 break;
9025 9031 default:
9026 9032 break;
9027 9033 }
9028 9034 }
9029 9035
9030 9036 /*
9031 9037 * Flush the waitq and tx_waitq of this target's cmds
9032 9038 */
9033 9039 cmd = mpt->m_waitq;
9034 9040
9035 9041 reason = CMD_RESET;
9036 9042 stat = STAT_DEV_RESET;
9037 9043
9038 9044 switch (tasktype) {
9039 9045 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
9040 9046 while (cmd != NULL) {
9041 9047 next_cmd = cmd->cmd_linkp;
9042 9048 if (Tgt(cmd) == target) {
9043 9049 mptsas_waitq_delete(mpt, cmd);
9044 9050 mptsas_set_pkt_reason(mpt, cmd,
9045 9051 reason, stat);
9046 9052 mptsas_doneq_add(mpt, cmd);
9047 9053 }
9048 9054 cmd = next_cmd;
9049 9055 }
9050 9056 mutex_enter(&mpt->m_tx_waitq_mutex);
9051 9057 cmd = mpt->m_tx_waitq;
9052 9058 while (cmd != NULL) {
9053 9059 next_cmd = cmd->cmd_linkp;
9054 9060 if (Tgt(cmd) == target) {
9055 9061 mptsas_tx_waitq_delete(mpt, cmd);
9056 9062 mutex_exit(&mpt->m_tx_waitq_mutex);
9057 9063 mptsas_set_pkt_reason(mpt, cmd,
9058 9064 reason, stat);
9059 9065 mptsas_doneq_add(mpt, cmd);
9060 9066 mutex_enter(&mpt->m_tx_waitq_mutex);
9061 9067 }
9062 9068 cmd = next_cmd;
9063 9069 }
9064 9070 mutex_exit(&mpt->m_tx_waitq_mutex);
9065 9071 break;
9066 9072 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9067 9073 reason = CMD_ABORTED;
9068 9074 stat = STAT_ABORTED;
9069 9075 /*FALLTHROUGH*/
9070 9076 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9071 9077 while (cmd != NULL) {
9072 9078 next_cmd = cmd->cmd_linkp;
9073 9079 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9074 9080 mptsas_waitq_delete(mpt, cmd);
9075 9081 mptsas_set_pkt_reason(mpt, cmd,
9076 9082 reason, stat);
9077 9083 mptsas_doneq_add(mpt, cmd);
9078 9084 }
9079 9085 cmd = next_cmd;
9080 9086 }
9081 9087 mutex_enter(&mpt->m_tx_waitq_mutex);
9082 9088 cmd = mpt->m_tx_waitq;
9083 9089 while (cmd != NULL) {
9084 9090 next_cmd = cmd->cmd_linkp;
9085 9091 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9086 9092 mptsas_tx_waitq_delete(mpt, cmd);
9087 9093 mutex_exit(&mpt->m_tx_waitq_mutex);
9088 9094 mptsas_set_pkt_reason(mpt, cmd,
9089 9095 reason, stat);
9090 9096 mptsas_doneq_add(mpt, cmd);
9091 9097 mutex_enter(&mpt->m_tx_waitq_mutex);
9092 9098 }
9093 9099 cmd = next_cmd;
9094 9100 }
9095 9101 mutex_exit(&mpt->m_tx_waitq_mutex);
9096 9102 break;
9097 9103 default:
9098 9104 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
9099 9105 tasktype);
9100 9106 break;
9101 9107 }
9102 9108 }
9103 9109
9104 9110 /*
9105 9111 * Clean up hba state, abort all outstanding command and commands in waitq
9106 9112 * reset timeout of all targets.
9107 9113 */
9108 9114 static void
9109 9115 mptsas_flush_hba(mptsas_t *mpt)
9110 9116 {
9111 9117 mptsas_slots_t *slots = mpt->m_active;
9112 9118 mptsas_cmd_t *cmd;
9113 9119 int slot;
9114 9120
9115 9121 NDBG25(("mptsas_flush_hba"));
9116 9122
9117 9123 /*
9118 9124 * The I/O Controller should have already sent back
9119 9125 * all commands via the scsi I/O reply frame. Make
9120 9126 * sure all commands have been flushed.
9121 9127 * Account for TM request, which use the last SMID.
9122 9128 */
9123 9129 for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
9124 9130 if ((cmd = slots->m_slot[slot]) == NULL)
9125 9131 continue;
9126 9132
9127 9133 if (cmd->cmd_flags & CFLAG_CMDIOC) {
9128 9134 /*
9129 9135 * Need to make sure to tell everyone that might be
9130 9136 * waiting on this command that it's going to fail. If
9131 9137 * we get here, this command will never timeout because
9132 9138 * the active command table is going to be re-allocated,
9133 9139 * so there will be nothing to check against a time out.
9134 9140 * Instead, mark the command as failed due to reset.
9135 9141 */
9136 9142 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
9137 9143 STAT_BUS_RESET);
9138 9144 if ((cmd->cmd_flags &
9139 9145 (CFLAG_PASSTHRU | CFLAG_CONFIG | CFLAG_FW_DIAG))) {
9140 9146 cmd->cmd_flags |= CFLAG_FINISHED;
9141 9147 cv_broadcast(&mpt->m_passthru_cv);
9142 9148 cv_broadcast(&mpt->m_config_cv);
9143 9149 cv_broadcast(&mpt->m_fw_diag_cv);
9144 9150 }
9145 9151 continue;
9146 9152 }
9147 9153
9148 9154 NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
9149 9155 slot));
9150 9156 mptsas_dump_cmd(mpt, cmd);
9151 9157
9152 9158 mptsas_remove_cmd(mpt, cmd);
9153 9159 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9154 9160 mptsas_doneq_add(mpt, cmd);
9155 9161 }
9156 9162
9157 9163 /*
9158 9164 * Flush the waitq.
9159 9165 */
9160 9166 while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
9161 9167 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9162 9168 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9163 9169 (cmd->cmd_flags & CFLAG_CONFIG) ||
9164 9170 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9165 9171 cmd->cmd_flags |= CFLAG_FINISHED;
9166 9172 cv_broadcast(&mpt->m_passthru_cv);
9167 9173 cv_broadcast(&mpt->m_config_cv);
9168 9174 cv_broadcast(&mpt->m_fw_diag_cv);
9169 9175 } else {
9170 9176 mptsas_doneq_add(mpt, cmd);
9171 9177 }
9172 9178 }
9173 9179
9174 9180 /*
9175 9181 * Flush the tx_waitq
9176 9182 */
9177 9183 mutex_enter(&mpt->m_tx_waitq_mutex);
9178 9184 while ((cmd = mptsas_tx_waitq_rm(mpt)) != NULL) {
9179 9185 mutex_exit(&mpt->m_tx_waitq_mutex);
9180 9186 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9181 9187 mptsas_doneq_add(mpt, cmd);
9182 9188 mutex_enter(&mpt->m_tx_waitq_mutex);
9183 9189 }
9184 9190 mutex_exit(&mpt->m_tx_waitq_mutex);
9185 9191
9186 9192 /*
9187 9193 * Drain the taskqs prior to reallocating resources.
9188 9194 */
9189 9195 mutex_exit(&mpt->m_mutex);
9190 9196 ddi_taskq_wait(mpt->m_event_taskq);
9191 9197 ddi_taskq_wait(mpt->m_dr_taskq);
9192 9198 mutex_enter(&mpt->m_mutex);
9193 9199 }
9194 9200
9195 9201 /*
9196 9202 * set pkt_reason and OR in pkt_statistics flag
9197 9203 */
9198 9204 static void
9199 9205 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
9200 9206 uint_t stat)
9201 9207 {
9202 9208 #ifndef __lock_lint
9203 9209 _NOTE(ARGUNUSED(mpt))
9204 9210 #endif
9205 9211
9206 9212 NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
9207 9213 (void *)cmd, reason, stat));
9208 9214
9209 9215 if (cmd) {
9210 9216 if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
9211 9217 cmd->cmd_pkt->pkt_reason = reason;
9212 9218 }
9213 9219 cmd->cmd_pkt->pkt_statistics |= stat;
9214 9220 }
9215 9221 }
9216 9222
9217 9223 static void
9218 9224 mptsas_start_watch_reset_delay()
9219 9225 {
9220 9226 NDBG22(("mptsas_start_watch_reset_delay"));
9221 9227
9222 9228 mutex_enter(&mptsas_global_mutex);
9223 9229 if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
9224 9230 mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
9225 9231 drv_usectohz((clock_t)
9226 9232 MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
9227 9233 ASSERT(mptsas_reset_watch != NULL);
9228 9234 }
9229 9235 mutex_exit(&mptsas_global_mutex);
9230 9236 }
9231 9237
9232 9238 static void
9233 9239 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
9234 9240 {
9235 9241 mptsas_target_t *ptgt = NULL;
9236 9242
9237 9243 ASSERT(MUTEX_HELD(&mpt->m_mutex));
9238 9244
9239 9245 NDBG22(("mptsas_setup_bus_reset_delay"));
9240 9246 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9241 9247 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9242 9248 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9243 9249 ptgt->m_reset_delay = mpt->m_scsi_reset_delay;
9244 9250 }
9245 9251
9246 9252 mptsas_start_watch_reset_delay();
9247 9253 }
9248 9254
9249 9255 /*
9250 9256 * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
9251 9257 * mpt instance for active reset delays
9252 9258 */
9253 9259 static void
9254 9260 mptsas_watch_reset_delay(void *arg)
9255 9261 {
9256 9262 #ifndef __lock_lint
9257 9263 _NOTE(ARGUNUSED(arg))
9258 9264 #endif
9259 9265
9260 9266 mptsas_t *mpt;
9261 9267 int not_done = 0;
9262 9268
9263 9269 NDBG22(("mptsas_watch_reset_delay"));
9264 9270
9265 9271 mutex_enter(&mptsas_global_mutex);
9266 9272 mptsas_reset_watch = 0;
9267 9273 mutex_exit(&mptsas_global_mutex);
9268 9274 rw_enter(&mptsas_global_rwlock, RW_READER);
9269 9275 for (mpt = mptsas_head; mpt != NULL; mpt = mpt->m_next) {
9270 9276 if (mpt->m_tran == 0) {
9271 9277 continue;
9272 9278 }
9273 9279 mutex_enter(&mpt->m_mutex);
9274 9280 not_done += mptsas_watch_reset_delay_subr(mpt);
9275 9281 mutex_exit(&mpt->m_mutex);
9276 9282 }
9277 9283 rw_exit(&mptsas_global_rwlock);
9278 9284
9279 9285 if (not_done) {
9280 9286 mptsas_start_watch_reset_delay();
9281 9287 }
9282 9288 }
9283 9289
9284 9290 static int
9285 9291 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
9286 9292 {
9287 9293 int done = 0;
9288 9294 int restart = 0;
9289 9295 mptsas_target_t *ptgt = NULL;
9290 9296
9291 9297 NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
9292 9298
9293 9299 ASSERT(mutex_owned(&mpt->m_mutex));
9294 9300
9295 9301 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9296 9302 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9297 9303 if (ptgt->m_reset_delay != 0) {
9298 9304 ptgt->m_reset_delay -=
9299 9305 MPTSAS_WATCH_RESET_DELAY_TICK;
9300 9306 if (ptgt->m_reset_delay <= 0) {
9301 9307 ptgt->m_reset_delay = 0;
9302 9308 mptsas_set_throttle(mpt, ptgt,
9303 9309 MAX_THROTTLE);
9304 9310 restart++;
9305 9311 } else {
9306 9312 done = -1;
9307 9313 }
9308 9314 }
9309 9315 }
9310 9316
9311 9317 if (restart > 0) {
9312 9318 mptsas_restart_hba(mpt);
9313 9319 }
9314 9320 return (done);
9315 9321 }
9316 9322
9317 9323 #ifdef MPTSAS_TEST
9318 9324 static void
9319 9325 mptsas_test_reset(mptsas_t *mpt, int target)
9320 9326 {
9321 9327 mptsas_target_t *ptgt = NULL;
9322 9328
9323 9329 if (mptsas_rtest == target) {
9324 9330 if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
9325 9331 mptsas_rtest = -1;
9326 9332 }
9327 9333 if (mptsas_rtest == -1) {
9328 9334 NDBG22(("mptsas_test_reset success"));
9329 9335 }
9330 9336 }
9331 9337 }
9332 9338 #endif
9333 9339
9334 9340 /*
9335 9341 * abort handling:
9336 9342 *
9337 9343 * Notes:
9338 9344 * - if pkt is not NULL, abort just that command
9339 9345 * - if pkt is NULL, abort all outstanding commands for target
9340 9346 */
9341 9347 static int
9342 9348 mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
9343 9349 {
9344 9350 mptsas_t *mpt = ADDR2MPT(ap);
9345 9351 int rval;
9346 9352 mptsas_tgt_private_t *tgt_private;
9347 9353 int target, lun;
9348 9354
9349 9355 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
9350 9356 tran_tgt_private;
9351 9357 ASSERT(tgt_private != NULL);
9352 9358 target = tgt_private->t_private->m_devhdl;
9353 9359 lun = tgt_private->t_lun;
9354 9360
9355 9361 NDBG23(("mptsas_scsi_abort: target=%d.%d", target, lun));
9356 9362
9357 9363 mutex_enter(&mpt->m_mutex);
9358 9364 rval = mptsas_do_scsi_abort(mpt, target, lun, pkt);
9359 9365 mutex_exit(&mpt->m_mutex);
9360 9366 return (rval);
9361 9367 }
9362 9368
9363 9369 static int
9364 9370 mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, struct scsi_pkt *pkt)
9365 9371 {
9366 9372 mptsas_cmd_t *sp = NULL;
9367 9373 mptsas_slots_t *slots = mpt->m_active;
9368 9374 int rval = FALSE;
9369 9375
9370 9376 ASSERT(mutex_owned(&mpt->m_mutex));
9371 9377
9372 9378 /*
9373 9379 * Abort the command pkt on the target/lun in ap. If pkt is
9374 9380 * NULL, abort all outstanding commands on that target/lun.
9375 9381 * If you can abort them, return 1, else return 0.
9376 9382 * Each packet that's aborted should be sent back to the target
9377 9383 * driver through the callback routine, with pkt_reason set to
9378 9384 * CMD_ABORTED.
9379 9385 *
9380 9386 * abort cmd pkt on HBA hardware; clean out of outstanding
9381 9387 * command lists, etc.
9382 9388 */
9383 9389 if (pkt != NULL) {
9384 9390 /* abort the specified packet */
9385 9391 sp = PKT2CMD(pkt);
9386 9392
9387 9393 if (sp->cmd_queued) {
9388 9394 NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
9389 9395 (void *)sp));
9390 9396 mptsas_waitq_delete(mpt, sp);
9391 9397 mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
9392 9398 STAT_ABORTED);
9393 9399 mptsas_doneq_add(mpt, sp);
9394 9400 rval = TRUE;
9395 9401 goto done;
9396 9402 }
9397 9403
9398 9404 /*
9399 9405 * Have mpt firmware abort this command
9400 9406 */
9401 9407
9402 9408 if (slots->m_slot[sp->cmd_slot] != NULL) {
9403 9409 rval = mptsas_ioc_task_management(mpt,
9404 9410 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
9405 9411 lun, NULL, 0, 0);
9406 9412
9407 9413 /*
9408 9414 * The transport layer expects only TRUE and FALSE.
9409 9415 * Therefore, if mptsas_ioc_task_management returns
9410 9416 * FAILED we will return FALSE.
9411 9417 */
9412 9418 if (rval == FAILED)
9413 9419 rval = FALSE;
9414 9420 goto done;
9415 9421 }
9416 9422 }
9417 9423
9418 9424 /*
9419 9425 * If pkt is NULL then abort task set
9420 9426 */
9421 9427 rval = mptsas_ioc_task_management(mpt,
9422 9428 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0);
9423 9429
9424 9430 /*
9425 9431 * The transport layer expects only TRUE and FALSE.
9426 9432 * Therefore, if mptsas_ioc_task_management returns
9427 9433 * FAILED we will return FALSE.
9428 9434 */
9429 9435 if (rval == FAILED)
9430 9436 rval = FALSE;
9431 9437
9432 9438 #ifdef MPTSAS_TEST
9433 9439 if (rval && mptsas_test_stop) {
9434 9440 debug_enter("mptsas_do_scsi_abort");
9435 9441 }
9436 9442 #endif
9437 9443
9438 9444 done:
9439 9445 mptsas_doneq_empty(mpt);
9440 9446 return (rval);
9441 9447 }
9442 9448
9443 9449 /*
9444 9450 * capability handling:
9445 9451 * (*tran_getcap). Get the capability named, and return its value.
9446 9452 */
9447 9453 static int
9448 9454 mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
9449 9455 {
9450 9456 mptsas_t *mpt = ADDR2MPT(ap);
9451 9457 int ckey;
9452 9458 int rval = FALSE;
9453 9459
9454 9460 NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
9455 9461 ap->a_target, cap, tgtonly));
9456 9462
9457 9463 mutex_enter(&mpt->m_mutex);
9458 9464
9459 9465 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9460 9466 mutex_exit(&mpt->m_mutex);
9461 9467 return (UNDEFINED);
9462 9468 }
9463 9469
9464 9470 switch (ckey) {
9465 9471 case SCSI_CAP_DMA_MAX:
9466 9472 rval = (int)mpt->m_msg_dma_attr.dma_attr_maxxfer;
9467 9473 break;
9468 9474 case SCSI_CAP_ARQ:
9469 9475 rval = TRUE;
9470 9476 break;
9471 9477 case SCSI_CAP_MSG_OUT:
9472 9478 case SCSI_CAP_PARITY:
9473 9479 case SCSI_CAP_UNTAGGED_QING:
9474 9480 rval = TRUE;
9475 9481 break;
9476 9482 case SCSI_CAP_TAGGED_QING:
9477 9483 rval = TRUE;
9478 9484 break;
9479 9485 case SCSI_CAP_RESET_NOTIFICATION:
9480 9486 rval = TRUE;
9481 9487 break;
9482 9488 case SCSI_CAP_LINKED_CMDS:
9483 9489 rval = FALSE;
9484 9490 break;
9485 9491 case SCSI_CAP_QFULL_RETRIES:
9486 9492 rval = ((mptsas_tgt_private_t *)(ap->a_hba_tran->
9487 9493 tran_tgt_private))->t_private->m_qfull_retries;
9488 9494 break;
9489 9495 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9490 9496 rval = drv_hztousec(((mptsas_tgt_private_t *)
9491 9497 (ap->a_hba_tran->tran_tgt_private))->
9492 9498 t_private->m_qfull_retry_interval) / 1000;
9493 9499 break;
9494 9500 case SCSI_CAP_CDB_LEN:
9495 9501 rval = CDB_GROUP4;
9496 9502 break;
9497 9503 case SCSI_CAP_INTERCONNECT_TYPE:
9498 9504 rval = INTERCONNECT_SAS;
9499 9505 break;
9500 9506 case SCSI_CAP_TRAN_LAYER_RETRIES:
9501 9507 if (mpt->m_ioc_capabilities &
9502 9508 MPI2_IOCFACTS_CAPABILITY_TLR)
9503 9509 rval = TRUE;
9504 9510 else
9505 9511 rval = FALSE;
9506 9512 break;
9507 9513 default:
9508 9514 rval = UNDEFINED;
9509 9515 break;
9510 9516 }
9511 9517
9512 9518 NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
9513 9519
9514 9520 mutex_exit(&mpt->m_mutex);
9515 9521 return (rval);
9516 9522 }
9517 9523
9518 9524 /*
9519 9525 * (*tran_setcap). Set the capability named to the value given.
9520 9526 */
9521 9527 static int
9522 9528 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
9523 9529 {
9524 9530 mptsas_t *mpt = ADDR2MPT(ap);
9525 9531 int ckey;
9526 9532 int rval = FALSE;
9527 9533
9528 9534 NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
9529 9535 ap->a_target, cap, value, tgtonly));
9530 9536
9531 9537 if (!tgtonly) {
9532 9538 return (rval);
9533 9539 }
9534 9540
9535 9541 mutex_enter(&mpt->m_mutex);
9536 9542
9537 9543 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9538 9544 mutex_exit(&mpt->m_mutex);
9539 9545 return (UNDEFINED);
9540 9546 }
9541 9547
9542 9548 switch (ckey) {
9543 9549 case SCSI_CAP_DMA_MAX:
9544 9550 case SCSI_CAP_MSG_OUT:
9545 9551 case SCSI_CAP_PARITY:
9546 9552 case SCSI_CAP_INITIATOR_ID:
9547 9553 case SCSI_CAP_LINKED_CMDS:
9548 9554 case SCSI_CAP_UNTAGGED_QING:
9549 9555 case SCSI_CAP_RESET_NOTIFICATION:
9550 9556 /*
9551 9557 * None of these are settable via
9552 9558 * the capability interface.
9553 9559 */
9554 9560 break;
9555 9561 case SCSI_CAP_ARQ:
9556 9562 /*
9557 9563 * We cannot turn off arq so return false if asked to
9558 9564 */
9559 9565 if (value) {
9560 9566 rval = TRUE;
9561 9567 } else {
9562 9568 rval = FALSE;
9563 9569 }
9564 9570 break;
9565 9571 case SCSI_CAP_TAGGED_QING:
9566 9572 mptsas_set_throttle(mpt, ((mptsas_tgt_private_t *)
9567 9573 (ap->a_hba_tran->tran_tgt_private))->t_private,
9568 9574 MAX_THROTTLE);
9569 9575 rval = TRUE;
9570 9576 break;
9571 9577 case SCSI_CAP_QFULL_RETRIES:
9572 9578 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9573 9579 t_private->m_qfull_retries = (uchar_t)value;
9574 9580 rval = TRUE;
9575 9581 break;
9576 9582 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9577 9583 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9578 9584 t_private->m_qfull_retry_interval =
9579 9585 drv_usectohz(value * 1000);
9580 9586 rval = TRUE;
9581 9587 break;
9582 9588 default:
9583 9589 rval = UNDEFINED;
9584 9590 break;
9585 9591 }
9586 9592 mutex_exit(&mpt->m_mutex);
9587 9593 return (rval);
9588 9594 }
9589 9595
9590 9596 /*
9591 9597 * Utility routine for mptsas_ifsetcap/ifgetcap
9592 9598 */
9593 9599 /*ARGSUSED*/
9594 9600 static int
9595 9601 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp)
9596 9602 {
9597 9603 NDBG24(("mptsas_scsi_capchk: cap=%s", cap));
9598 9604
9599 9605 if (!cap)
9600 9606 return (FALSE);
9601 9607
9602 9608 *cidxp = scsi_hba_lookup_capstr(cap);
9603 9609 return (TRUE);
9604 9610 }
9605 9611
9606 9612 static int
9607 9613 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
9608 9614 {
9609 9615 mptsas_slots_t *old_active = mpt->m_active;
9610 9616 mptsas_slots_t *new_active;
9611 9617 size_t size;
9612 9618
9613 9619 /*
9614 9620 * if there are active commands, then we cannot
9615 9621 * change size of active slots array.
9616 9622 */
9617 9623 ASSERT(mpt->m_ncmds == 0);
9618 9624
9619 9625 size = MPTSAS_SLOTS_SIZE(mpt);
9620 9626 new_active = kmem_zalloc(size, flag);
9621 9627 if (new_active == NULL) {
9622 9628 NDBG1(("new active alloc failed"));
9623 9629 return (-1);
9624 9630 }
9625 9631 /*
9626 9632 * Since SMID 0 is reserved and the TM slot is reserved, the
9627 9633 * number of slots that can be used at any one time is
9628 9634 * m_max_requests - 2.
9629 9635 */
9630 9636 new_active->m_n_normal = (mpt->m_max_requests - 2);
9631 9637 new_active->m_size = size;
9632 9638 new_active->m_rotor = 1;
9633 9639 if (old_active)
9634 9640 mptsas_free_active_slots(mpt);
9635 9641 mpt->m_active = new_active;
9636 9642
9637 9643 return (0);
9638 9644 }
9639 9645
9640 9646 static void
9641 9647 mptsas_free_active_slots(mptsas_t *mpt)
9642 9648 {
9643 9649 mptsas_slots_t *active = mpt->m_active;
9644 9650 size_t size;
9645 9651
9646 9652 if (active == NULL)
9647 9653 return;
9648 9654 size = active->m_size;
9649 9655 kmem_free(active, size);
9650 9656 mpt->m_active = NULL;
9651 9657 }
9652 9658
9653 9659 /*
9654 9660 * Error logging, printing, and debug print routines.
9655 9661 */
9656 9662 static char *mptsas_label = "mpt_sas";
9657 9663
9658 9664 /*PRINTFLIKE3*/
9659 9665 void
9660 9666 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
9661 9667 {
9662 9668 dev_info_t *dev;
9663 9669 va_list ap;
9664 9670
9665 9671 if (mpt) {
9666 9672 dev = mpt->m_dip;
9667 9673 } else {
9668 9674 dev = 0;
9669 9675 }
9670 9676
9671 9677 mutex_enter(&mptsas_log_mutex);
9672 9678
9673 9679 va_start(ap, fmt);
9674 9680 (void) vsprintf(mptsas_log_buf, fmt, ap);
9675 9681 va_end(ap);
9676 9682
9677 9683 if (level == CE_CONT) {
9678 9684 scsi_log(dev, mptsas_label, level, "%s\n", mptsas_log_buf);
9679 9685 } else {
9680 9686 scsi_log(dev, mptsas_label, level, "%s", mptsas_log_buf);
9681 9687 }
9682 9688
9683 9689 mutex_exit(&mptsas_log_mutex);
9684 9690 }
9685 9691
9686 9692 #ifdef MPTSAS_DEBUG
9687 9693 /*
9688 9694 * Use a circular buffer to log messages to private memory.
9689 9695 * Increment idx atomically to minimize risk to miss lines.
9690 9696 * It's fast and does not hold up the proceedings too much.
9691 9697 */
9692 9698 static const size_t mptsas_dbglog_linecnt = MPTSAS_DBGLOG_LINECNT;
9693 9699 static const size_t mptsas_dbglog_linelen = MPTSAS_DBGLOG_LINELEN;
9694 9700 static char mptsas_dbglog_bufs[MPTSAS_DBGLOG_LINECNT][MPTSAS_DBGLOG_LINELEN];
9695 9701 static uint32_t mptsas_dbglog_idx = 0;
9696 9702
9697 9703 /*PRINTFLIKE1*/
9698 9704 void
9699 9705 mptsas_debug_log(char *fmt, ...)
9700 9706 {
9701 9707 va_list ap;
9702 9708 uint32_t idx;
9703 9709
9704 9710 idx = atomic_inc_32_nv(&mptsas_dbglog_idx) &
9705 9711 (mptsas_dbglog_linecnt - 1);
9706 9712
9707 9713 va_start(ap, fmt);
9708 9714 (void) vsnprintf(mptsas_dbglog_bufs[idx],
9709 9715 mptsas_dbglog_linelen, fmt, ap);
9710 9716 va_end(ap);
9711 9717 }
9712 9718
9713 9719 /*PRINTFLIKE1*/
9714 9720 void
9715 9721 mptsas_printf(char *fmt, ...)
9716 9722 {
9717 9723 dev_info_t *dev = 0;
9718 9724 va_list ap;
9719 9725
9720 9726 mutex_enter(&mptsas_log_mutex);
9721 9727
9722 9728 va_start(ap, fmt);
9723 9729 (void) vsprintf(mptsas_log_buf, fmt, ap);
9724 9730 va_end(ap);
9725 9731
9726 9732 #ifdef PROM_PRINTF
9727 9733 prom_printf("%s:\t%s\n", mptsas_label, mptsas_log_buf);
9728 9734 #else
9729 9735 scsi_log(dev, mptsas_label, CE_CONT, "!%s\n", mptsas_log_buf);
9730 9736 #endif
9731 9737 mutex_exit(&mptsas_log_mutex);
9732 9738 }
9733 9739 #endif
9734 9740
9735 9741 /*
9736 9742 * timeout handling
9737 9743 */
9738 9744 static void
9739 9745 mptsas_watch(void *arg)
9740 9746 {
9741 9747 #ifndef __lock_lint
9742 9748 _NOTE(ARGUNUSED(arg))
9743 9749 #endif
9744 9750
9745 9751 mptsas_t *mpt;
9746 9752 uint32_t doorbell;
9747 9753
9748 9754 NDBG30(("mptsas_watch"));
9749 9755
9750 9756 rw_enter(&mptsas_global_rwlock, RW_READER);
9751 9757 for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
9752 9758
9753 9759 mutex_enter(&mpt->m_mutex);
9754 9760
9755 9761 /* Skip device if not powered on */
9756 9762 if (mpt->m_options & MPTSAS_OPT_PM) {
9757 9763 if (mpt->m_power_level == PM_LEVEL_D0) {
9758 9764 (void) pm_busy_component(mpt->m_dip, 0);
9759 9765 mpt->m_busy = 1;
9760 9766 } else {
9761 9767 mutex_exit(&mpt->m_mutex);
9762 9768 continue;
9763 9769 }
9764 9770 }
9765 9771
9766 9772 /*
9767 9773 * Check if controller is in a FAULT state. If so, reset it.
9768 9774 */
9769 9775 doorbell = ddi_get32(mpt->m_datap, &mpt->m_reg->Doorbell);
9770 9776 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
9771 9777 doorbell &= MPI2_DOORBELL_DATA_MASK;
9772 9778 mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
9773 9779 "code: %04x", doorbell);
9774 9780 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
9775 9781 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
9776 9782 mptsas_log(mpt, CE_WARN, "Reset failed"
9777 9783 "after fault was detected");
9778 9784 }
9779 9785 }
9780 9786
9781 9787 /*
9782 9788 * For now, always call mptsas_watchsubr.
9783 9789 */
9784 9790 mptsas_watchsubr(mpt);
9785 9791
9786 9792 if (mpt->m_options & MPTSAS_OPT_PM) {
9787 9793 mpt->m_busy = 0;
9788 9794 (void) pm_idle_component(mpt->m_dip, 0);
9789 9795 }
9790 9796
9791 9797 mutex_exit(&mpt->m_mutex);
9792 9798 }
9793 9799 rw_exit(&mptsas_global_rwlock);
9794 9800
9795 9801 mutex_enter(&mptsas_global_mutex);
9796 9802 if (mptsas_timeouts_enabled)
9797 9803 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
9798 9804 mutex_exit(&mptsas_global_mutex);
9799 9805 }
9800 9806
9801 9807 static void
9802 9808 mptsas_watchsubr(mptsas_t *mpt)
9803 9809 {
9804 9810 int i;
9805 9811 mptsas_cmd_t *cmd;
9806 9812 mptsas_target_t *ptgt = NULL;
9807 9813 hrtime_t timestamp = gethrtime();
9808 9814
9809 9815 ASSERT(MUTEX_HELD(&mpt->m_mutex));
9810 9816
9811 9817 NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
9812 9818
9813 9819 #ifdef MPTSAS_TEST
9814 9820 if (mptsas_enable_untagged) {
9815 9821 mptsas_test_untagged++;
9816 9822 }
9817 9823 #endif
9818 9824
9819 9825 /*
9820 9826 * Check for commands stuck in active slot
9821 9827 * Account for TM requests, which use the last SMID.
9822 9828 */
9823 9829 for (i = 0; i <= mpt->m_active->m_n_normal; i++) {
9824 9830 if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
9825 9831 if (cmd->cmd_active_expiration <= timestamp) {
9826 9832 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
9827 9833 /*
9828 9834 * There seems to be a command stuck
9829 9835 * in the active slot. Drain throttle.
9830 9836 */
9831 9837 mptsas_set_throttle(mpt,
9832 9838 cmd->cmd_tgt_addr,
9833 9839 DRAIN_THROTTLE);
9834 9840 } else if (cmd->cmd_flags &
9835 9841 (CFLAG_PASSTHRU | CFLAG_CONFIG |
9836 9842 CFLAG_FW_DIAG)) {
9837 9843 /*
9838 9844 * passthrough command timeout
9839 9845 */
9840 9846 cmd->cmd_flags |= (CFLAG_FINISHED |
9841 9847 CFLAG_TIMEOUT);
9842 9848 cv_broadcast(&mpt->m_passthru_cv);
9843 9849 cv_broadcast(&mpt->m_config_cv);
9844 9850 cv_broadcast(&mpt->m_fw_diag_cv);
9845 9851 }
9846 9852 }
9847 9853 }
9848 9854 }
9849 9855
9850 9856 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9851 9857 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9852 9858 /*
9853 9859 * If we were draining due to a qfull condition,
9854 9860 * go back to full throttle.
9855 9861 */
9856 9862 if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
9857 9863 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
9858 9864 (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
9859 9865 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9860 9866 mptsas_restart_hba(mpt);
9861 9867 }
9862 9868
9863 9869 cmd = TAILQ_LAST(&ptgt->m_active_cmdq, mptsas_active_cmdq);
9864 9870 if (cmd == NULL)
9865 9871 continue;
9866 9872
9867 9873 if (cmd->cmd_active_expiration <= timestamp) {
9868 9874 /*
9869 9875 * Earliest command timeout expired. Drain throttle.
9870 9876 */
9871 9877 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
9872 9878
9873 9879 /*
9874 9880 * Check for remaining commands.
9875 9881 */
9876 9882 cmd = TAILQ_FIRST(&ptgt->m_active_cmdq);
9877 9883 if (cmd->cmd_active_expiration > timestamp) {
9878 9884 /*
9879 9885 * Wait for remaining commands to complete or
9880 9886 * time out.
9881 9887 */
9882 9888 NDBG23(("command timed out, pending drain"));
9883 9889 continue;
9884 9890 }
9885 9891
9886 9892 /*
9887 9893 * All command timeouts expired.
9888 9894 */
9889 9895 mptsas_log(mpt, CE_NOTE, "Timeout of %d seconds "
9890 9896 "expired with %d commands on target %d lun %d.",
9891 9897 cmd->cmd_pkt->pkt_time, ptgt->m_t_ncmds,
9892 9898 ptgt->m_devhdl, Lun(cmd));
9893 9899
9894 9900 mptsas_cmd_timeout(mpt, ptgt);
9895 9901 } else if (cmd->cmd_active_expiration <=
9896 9902 timestamp + (hrtime_t)mptsas_scsi_watchdog_tick * NANOSEC) {
9897 9903 NDBG23(("pending timeout"));
9898 9904 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
9899 9905 }
9900 9906 }
9901 9907 }
9902 9908
9903 9909 /*
9904 9910 * timeout recovery
9905 9911 */
9906 9912 static void
9907 9913 mptsas_cmd_timeout(mptsas_t *mpt, mptsas_target_t *ptgt)
9908 9914 {
9909 9915 uint16_t devhdl;
9910 9916 uint64_t sas_wwn;
9911 9917 uint8_t phy;
9912 9918 char wwn_str[MPTSAS_WWN_STRLEN];
9913 9919
9914 9920 devhdl = ptgt->m_devhdl;
9915 9921 sas_wwn = ptgt->m_addr.mta_wwn;
9916 9922 phy = ptgt->m_phynum;
9917 9923 if (sas_wwn == 0) {
9918 9924 (void) sprintf(wwn_str, "p%x", phy);
9919 9925 } else {
9920 9926 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
9921 9927 }
9922 9928
9923 9929 NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
9924 9930 mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
9925 9931 "target %d %s, enclosure %u", devhdl, wwn_str,
9926 9932 ptgt->m_enclosure);
9927 9933
9928 9934 /*
9929 9935 * Abort all outstanding commands on the device.
9930 9936 */
9931 9937 NDBG29(("mptsas_cmd_timeout: device reset"));
9932 9938 if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
9933 9939 mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
9934 9940 "recovery failed!", devhdl);
9935 9941 }
9936 9942 }
9937 9943
9938 9944 /*
9939 9945 * Device / Hotplug control
9940 9946 */
9941 9947 static int
9942 9948 mptsas_scsi_quiesce(dev_info_t *dip)
9943 9949 {
9944 9950 mptsas_t *mpt;
9945 9951 scsi_hba_tran_t *tran;
9946 9952
9947 9953 tran = ddi_get_driver_private(dip);
9948 9954 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9949 9955 return (-1);
9950 9956
9951 9957 return (mptsas_quiesce_bus(mpt));
9952 9958 }
9953 9959
9954 9960 static int
9955 9961 mptsas_scsi_unquiesce(dev_info_t *dip)
9956 9962 {
9957 9963 mptsas_t *mpt;
9958 9964 scsi_hba_tran_t *tran;
9959 9965
9960 9966 tran = ddi_get_driver_private(dip);
9961 9967 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
9962 9968 return (-1);
9963 9969
9964 9970 return (mptsas_unquiesce_bus(mpt));
9965 9971 }
9966 9972
9967 9973 static int
9968 9974 mptsas_quiesce_bus(mptsas_t *mpt)
9969 9975 {
9970 9976 mptsas_target_t *ptgt = NULL;
9971 9977
9972 9978 NDBG28(("mptsas_quiesce_bus"));
9973 9979 mutex_enter(&mpt->m_mutex);
9974 9980
9975 9981 /* Set all the throttles to zero */
9976 9982 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9977 9983 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9978 9984 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9979 9985 }
9980 9986
9981 9987 /* If there are any outstanding commands in the queue */
9982 9988 if (mpt->m_ncmds) {
9983 9989 mpt->m_softstate |= MPTSAS_SS_DRAINING;
9984 9990 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
9985 9991 mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
9986 9992 if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
9987 9993 /*
9988 9994 * Quiesce has been interrupted
9989 9995 */
9990 9996 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
9991 9997 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9992 9998 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9993 9999 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9994 10000 }
9995 10001 mptsas_restart_hba(mpt);
9996 10002 if (mpt->m_quiesce_timeid != 0) {
9997 10003 timeout_id_t tid = mpt->m_quiesce_timeid;
9998 10004 mpt->m_quiesce_timeid = 0;
9999 10005 mutex_exit(&mpt->m_mutex);
10000 10006 (void) untimeout(tid);
10001 10007 return (-1);
10002 10008 }
10003 10009 mutex_exit(&mpt->m_mutex);
10004 10010 return (-1);
10005 10011 } else {
10006 10012 /* Bus has been quiesced */
10007 10013 ASSERT(mpt->m_quiesce_timeid == 0);
10008 10014 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
10009 10015 mpt->m_softstate |= MPTSAS_SS_QUIESCED;
10010 10016 mutex_exit(&mpt->m_mutex);
10011 10017 return (0);
10012 10018 }
10013 10019 }
10014 10020 /* Bus was not busy - QUIESCED */
10015 10021 mutex_exit(&mpt->m_mutex);
10016 10022
10017 10023 return (0);
10018 10024 }
10019 10025
10020 10026 static int
10021 10027 mptsas_unquiesce_bus(mptsas_t *mpt)
10022 10028 {
10023 10029 mptsas_target_t *ptgt = NULL;
10024 10030
10025 10031 NDBG28(("mptsas_unquiesce_bus"));
10026 10032 mutex_enter(&mpt->m_mutex);
10027 10033 mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
10028 10034 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10029 10035 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10030 10036 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10031 10037 }
10032 10038 mptsas_restart_hba(mpt);
10033 10039 mutex_exit(&mpt->m_mutex);
10034 10040 return (0);
10035 10041 }
10036 10042
10037 10043 static void
10038 10044 mptsas_ncmds_checkdrain(void *arg)
10039 10045 {
10040 10046 mptsas_t *mpt = arg;
10041 10047 mptsas_target_t *ptgt = NULL;
10042 10048
10043 10049 mutex_enter(&mpt->m_mutex);
10044 10050 if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
10045 10051 mpt->m_quiesce_timeid = 0;
10046 10052 if (mpt->m_ncmds == 0) {
10047 10053 /* Command queue has been drained */
10048 10054 cv_signal(&mpt->m_cv);
10049 10055 } else {
10050 10056 /*
10051 10057 * The throttle may have been reset because
10052 10058 * of a SCSI bus reset
10053 10059 */
10054 10060 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10055 10061 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10056 10062 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10057 10063 }
10058 10064
10059 10065 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
10060 10066 mpt, (MPTSAS_QUIESCE_TIMEOUT *
10061 10067 drv_usectohz(1000000)));
10062 10068 }
10063 10069 }
10064 10070 mutex_exit(&mpt->m_mutex);
10065 10071 }
10066 10072
10067 10073 /*ARGSUSED*/
10068 10074 static void
10069 10075 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
10070 10076 {
10071 10077 int i;
10072 10078 uint8_t *cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
10073 10079 char buf[128];
10074 10080
10075 10081 buf[0] = '\0';
10076 10082 NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd,
10077 10083 Tgt(cmd), Lun(cmd)));
10078 10084 (void) sprintf(&buf[0], "\tcdb=[");
10079 10085 for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
10080 10086 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
10081 10087 }
10082 10088 (void) sprintf(&buf[strlen(buf)], " ]");
10083 10089 NDBG25(("?%s\n", buf));
10084 10090 NDBG25(("?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
10085 10091 cmd->cmd_pkt->pkt_flags, cmd->cmd_pkt->pkt_statistics,
10086 10092 cmd->cmd_pkt->pkt_state));
10087 10093 NDBG25(("?pkt_scbp=0x%x cmd_flags=0x%x\n", cmd->cmd_pkt->pkt_scbp ?
10088 10094 *(cmd->cmd_pkt->pkt_scbp) : 0, cmd->cmd_flags));
10089 10095 }
10090 10096
10091 10097 static void
10092 10098 mptsas_passthru_sge(ddi_acc_handle_t acc_hdl, mptsas_pt_request_t *pt,
10093 10099 pMpi2SGESimple64_t sgep)
10094 10100 {
10095 10101 uint32_t sge_flags;
10096 10102 uint32_t data_size, dataout_size;
10097 10103 ddi_dma_cookie_t data_cookie;
10098 10104 ddi_dma_cookie_t dataout_cookie;
10099 10105
10100 10106 data_size = pt->data_size;
10101 10107 dataout_size = pt->dataout_size;
10102 10108 data_cookie = pt->data_cookie;
10103 10109 dataout_cookie = pt->dataout_cookie;
10104 10110
10105 10111 if (dataout_size) {
10106 10112 sge_flags = dataout_size |
10107 10113 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10108 10114 MPI2_SGE_FLAGS_END_OF_BUFFER |
10109 10115 MPI2_SGE_FLAGS_HOST_TO_IOC |
10110 10116 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10111 10117 MPI2_SGE_FLAGS_SHIFT);
10112 10118 ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
10113 10119 ddi_put32(acc_hdl, &sgep->Address.Low,
10114 10120 (uint32_t)(dataout_cookie.dmac_laddress &
10115 10121 0xffffffffull));
10116 10122 ddi_put32(acc_hdl, &sgep->Address.High,
10117 10123 (uint32_t)(dataout_cookie.dmac_laddress
10118 10124 >> 32));
10119 10125 sgep++;
10120 10126 }
10121 10127 sge_flags = data_size;
10122 10128 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10123 10129 MPI2_SGE_FLAGS_LAST_ELEMENT |
10124 10130 MPI2_SGE_FLAGS_END_OF_BUFFER |
10125 10131 MPI2_SGE_FLAGS_END_OF_LIST |
10126 10132 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10127 10133 MPI2_SGE_FLAGS_SHIFT);
10128 10134 if (pt->direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10129 10135 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
10130 10136 MPI2_SGE_FLAGS_SHIFT);
10131 10137 } else {
10132 10138 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
10133 10139 MPI2_SGE_FLAGS_SHIFT);
10134 10140 }
10135 10141 ddi_put32(acc_hdl, &sgep->FlagsLength,
10136 10142 sge_flags);
10137 10143 ddi_put32(acc_hdl, &sgep->Address.Low,
10138 10144 (uint32_t)(data_cookie.dmac_laddress &
10139 10145 0xffffffffull));
10140 10146 ddi_put32(acc_hdl, &sgep->Address.High,
10141 10147 (uint32_t)(data_cookie.dmac_laddress >> 32));
10142 10148 }
10143 10149
10144 10150 static void
10145 10151 mptsas_passthru_ieee_sge(ddi_acc_handle_t acc_hdl, mptsas_pt_request_t *pt,
10146 10152 pMpi2IeeeSgeSimple64_t ieeesgep)
10147 10153 {
10148 10154 uint8_t sge_flags;
10149 10155 uint32_t data_size, dataout_size;
10150 10156 ddi_dma_cookie_t data_cookie;
10151 10157 ddi_dma_cookie_t dataout_cookie;
10152 10158
10153 10159 data_size = pt->data_size;
10154 10160 dataout_size = pt->dataout_size;
10155 10161 data_cookie = pt->data_cookie;
10156 10162 dataout_cookie = pt->dataout_cookie;
10157 10163
10158 10164 sge_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
10159 10165 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
10160 10166 if (dataout_size) {
10161 10167 ddi_put32(acc_hdl, &ieeesgep->Length, dataout_size);
10162 10168 ddi_put32(acc_hdl, &ieeesgep->Address.Low,
10163 10169 (uint32_t)(dataout_cookie.dmac_laddress &
10164 10170 0xffffffffull));
10165 10171 ddi_put32(acc_hdl, &ieeesgep->Address.High,
10166 10172 (uint32_t)(dataout_cookie.dmac_laddress >> 32));
10167 10173 ddi_put8(acc_hdl, &ieeesgep->Flags, sge_flags);
10168 10174 ieeesgep++;
10169 10175 }
10170 10176 sge_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
10171 10177 ddi_put32(acc_hdl, &ieeesgep->Length, data_size);
10172 10178 ddi_put32(acc_hdl, &ieeesgep->Address.Low,
10173 10179 (uint32_t)(data_cookie.dmac_laddress & 0xffffffffull));
10174 10180 ddi_put32(acc_hdl, &ieeesgep->Address.High,
10175 10181 (uint32_t)(data_cookie.dmac_laddress >> 32));
10176 10182 ddi_put8(acc_hdl, &ieeesgep->Flags, sge_flags);
10177 10183 }
10178 10184
10179 10185 static void
10180 10186 mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd)
10181 10187 {
10182 10188 caddr_t memp;
10183 10189 pMPI2RequestHeader_t request_hdrp;
10184 10190 struct scsi_pkt *pkt = cmd->cmd_pkt;
10185 10191 mptsas_pt_request_t *pt = pkt->pkt_ha_private;
10186 10192 uint32_t request_size;
10187 10193 uint32_t request_desc_low, request_desc_high = 0;
10188 10194 uint32_t i, sense_bufp;
10189 10195 uint8_t desc_type;
10190 10196 uint8_t *request, function;
10191 10197 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
10192 10198 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
10193 10199
10194 10200 desc_type = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10195 10201
10196 10202 request = pt->request;
10197 10203 request_size = pt->request_size;
10198 10204
10199 10205 /*
10200 10206 * Store the passthrough message in memory location
10201 10207 * corresponding to our slot number
10202 10208 */
10203 10209 memp = mpt->m_req_frame + (mpt->m_req_frame_size * cmd->cmd_slot);
10204 10210 request_hdrp = (pMPI2RequestHeader_t)memp;
10205 10211 bzero(memp, mpt->m_req_frame_size);
10206 10212
10207 10213 for (i = 0; i < request_size; i++) {
10208 10214 bcopy(request + i, memp + i, 1);
10209 10215 }
10210 10216
10211 10217 NDBG15(("mptsas_start_passthru: Func 0x%x, MsgFlags 0x%x, "
10212 10218 "size=%d, in %d, out %d", request_hdrp->Function,
10213 10219 request_hdrp->MsgFlags, request_size,
10214 10220 pt->data_size, pt->dataout_size));
10215 10221
10216 10222 /*
10217 10223 * Add an SGE, even if the length is zero.
10218 10224 */
10219 10225 if (mpt->m_MPI25 && pt->simple == 0) {
10220 10226 mptsas_passthru_ieee_sge(acc_hdl, pt,
10221 10227 (pMpi2IeeeSgeSimple64_t)
10222 10228 ((uint8_t *)request_hdrp + pt->sgl_offset));
10223 10229 } else {
10224 10230 mptsas_passthru_sge(acc_hdl, pt,
10225 10231 (pMpi2SGESimple64_t)
10226 10232 ((uint8_t *)request_hdrp + pt->sgl_offset));
10227 10233 }
10228 10234
10229 10235 function = request_hdrp->Function;
10230 10236 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10231 10237 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10232 10238 pMpi2SCSIIORequest_t scsi_io_req;
10233 10239
10234 10240 NDBG15(("mptsas_start_passthru: Is SCSI IO Req"));
10235 10241 scsi_io_req = (pMpi2SCSIIORequest_t)request_hdrp;
10236 10242 /*
10237 10243 * Put SGE for data and data_out buffer at the end of
10238 10244 * scsi_io_request message header.(64 bytes in total)
10239 10245 * Following above SGEs, the residual space will be
10240 10246 * used by sense data.
10241 10247 */
10242 10248 ddi_put8(acc_hdl,
10243 10249 &scsi_io_req->SenseBufferLength,
10244 10250 (uint8_t)(request_size - 64));
10245 10251
10246 10252 sense_bufp = mpt->m_req_frame_dma_addr +
10247 10253 (mpt->m_req_frame_size * cmd->cmd_slot);
10248 10254 sense_bufp += 64;
10249 10255 ddi_put32(acc_hdl,
10250 10256 &scsi_io_req->SenseBufferLowAddress, sense_bufp);
10251 10257
10252 10258 /*
10253 10259 * Set SGLOffset0 value
10254 10260 */
10255 10261 ddi_put8(acc_hdl, &scsi_io_req->SGLOffset0,
10256 10262 offsetof(MPI2_SCSI_IO_REQUEST, SGL) / 4);
10257 10263
10258 10264 /*
10259 10265 * Setup descriptor info. RAID passthrough must use the
10260 10266 * default request descriptor which is already set, so if this
10261 10267 * is a SCSI IO request, change the descriptor to SCSI IO.
10262 10268 */
10263 10269 if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
10264 10270 desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
10265 10271 request_desc_high = (ddi_get16(acc_hdl,
10266 10272 &scsi_io_req->DevHandle) << 16);
10267 10273 }
10268 10274 }
10269 10275
10270 10276 /*
10271 10277 * We must wait till the message has been completed before
10272 10278 * beginning the next message so we wait for this one to
10273 10279 * finish.
10274 10280 */
10275 10281 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
10276 10282 request_desc_low = (cmd->cmd_slot << 16) + desc_type;
10277 10283 cmd->cmd_rfm = NULL;
10278 10284 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
10279 10285 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
10280 10286 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
10281 10287 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10282 10288 }
10283 10289 }
10284 10290
10285 10291 typedef void (mptsas_pre_f)(mptsas_t *, mptsas_pt_request_t *);
10286 10292 static mptsas_pre_f mpi_pre_ioc_facts;
10287 10293 static mptsas_pre_f mpi_pre_port_facts;
10288 10294 static mptsas_pre_f mpi_pre_fw_download;
10289 10295 static mptsas_pre_f mpi_pre_fw_25_download;
10290 10296 static mptsas_pre_f mpi_pre_fw_upload;
10291 10297 static mptsas_pre_f mpi_pre_fw_25_upload;
10292 10298 static mptsas_pre_f mpi_pre_sata_passthrough;
10293 10299 static mptsas_pre_f mpi_pre_smp_passthrough;
10294 10300 static mptsas_pre_f mpi_pre_config;
10295 10301 static mptsas_pre_f mpi_pre_sas_io_unit_control;
10296 10302 static mptsas_pre_f mpi_pre_scsi_io_req;
10297 10303
10298 10304 /*
10299 10305 * Prepare the pt for a SAS2 FW_DOWNLOAD request.
10300 10306 */
10301 10307 static void
10302 10308 mpi_pre_fw_download(mptsas_t *mpt, mptsas_pt_request_t *pt)
10303 10309 {
10304 10310 pMpi2FWDownloadTCSGE_t tcsge;
10305 10311 pMpi2FWDownloadRequest req;
10306 10312
10307 10313 /*
10308 10314 * If SAS3, call separate function.
10309 10315 */
10310 10316 if (mpt->m_MPI25) {
10311 10317 mpi_pre_fw_25_download(mpt, pt);
10312 10318 return;
10313 10319 }
10314 10320
10315 10321 /*
10316 10322 * User requests should come in with the Transaction
10317 10323 * context element where the SGL will go. Putting the
10318 10324 * SGL after that seems to work, but don't really know
10319 10325 * why. Other drivers tend to create an extra SGL and
10320 10326 * refer to the TCE through that.
10321 10327 */
10322 10328 req = (pMpi2FWDownloadRequest)pt->request;
10323 10329 tcsge = (pMpi2FWDownloadTCSGE_t)&req->SGL;
10324 10330 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10325 10331 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10326 10332 mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10327 10333 }
10328 10334
10329 10335 pt->sgl_offset = offsetof(MPI2_FW_DOWNLOAD_REQUEST, SGL) +
10330 10336 sizeof (*tcsge);
10331 10337 if (pt->request_size != pt->sgl_offset)
10332 10338 NDBG15(("mpi_pre_fw_download(): Incorrect req size, "
10333 10339 "0x%x, should be 0x%x, dataoutsz 0x%x",
10334 10340 (int)pt->request_size, (int)pt->sgl_offset,
10335 10341 (int)pt->dataout_size));
10336 10342 if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY))
10337 10343 NDBG15(("mpi_pre_fw_download(): Incorrect rep size, "
10338 10344 "0x%x, should be 0x%x", pt->data_size,
10339 10345 (int)sizeof (MPI2_FW_DOWNLOAD_REPLY)));
10340 10346 }
10341 10347
10342 10348 /*
10343 10349 * Prepare the pt for a SAS3 FW_DOWNLOAD request.
10344 10350 */
10345 10351 static void
10346 10352 mpi_pre_fw_25_download(mptsas_t *mpt, mptsas_pt_request_t *pt)
10347 10353 {
10348 10354 pMpi2FWDownloadTCSGE_t tcsge;
10349 10355 pMpi2FWDownloadRequest req2;
10350 10356 pMpi25FWDownloadRequest req25;
10351 10357
10352 10358 /*
10353 10359 * User requests should come in with the Transaction
10354 10360 * context element where the SGL will go. The new firmware
10355 10361 * Doesn't use TCE and has space in the main request for
10356 10362 * this information. So move to the right place.
10357 10363 */
10358 10364 req2 = (pMpi2FWDownloadRequest)pt->request;
10359 10365 req25 = (pMpi25FWDownloadRequest)pt->request;
10360 10366 tcsge = (pMpi2FWDownloadTCSGE_t)&req2->SGL;
10361 10367 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10362 10368 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10363 10369 mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10364 10370 }
10365 10371 req25->ImageOffset = tcsge->ImageOffset;
10366 10372 req25->ImageSize = tcsge->ImageSize;
10367 10373
10368 10374 pt->sgl_offset = offsetof(MPI25_FW_DOWNLOAD_REQUEST, SGL);
10369 10375 if (pt->request_size != pt->sgl_offset)
10370 10376 NDBG15(("mpi_pre_fw_25_download(): Incorrect req size, "
10371 10377 "0x%x, should be 0x%x, dataoutsz 0x%x",
10372 10378 pt->request_size, pt->sgl_offset,
10373 10379 pt->dataout_size));
10374 10380 if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY))
10375 10381 NDBG15(("mpi_pre_fw_25_download(): Incorrect rep size, "
10376 10382 "0x%x, should be 0x%x", pt->data_size,
10377 10383 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10378 10384 }
10379 10385
10380 10386 /*
10381 10387 * Prepare the pt for a SAS2 FW_UPLOAD request.
10382 10388 */
10383 10389 static void
10384 10390 mpi_pre_fw_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10385 10391 {
10386 10392 pMpi2FWUploadTCSGE_t tcsge;
10387 10393 pMpi2FWUploadRequest_t req;
10388 10394
10389 10395 /*
10390 10396 * If SAS3, call separate function.
10391 10397 */
10392 10398 if (mpt->m_MPI25) {
10393 10399 mpi_pre_fw_25_upload(mpt, pt);
10394 10400 return;
10395 10401 }
10396 10402
10397 10403 /*
10398 10404 * User requests should come in with the Transaction
10399 10405 * context element where the SGL will go. Putting the
10400 10406 * SGL after that seems to work, but don't really know
10401 10407 * why. Other drivers tend to create an extra SGL and
10402 10408 * refer to the TCE through that.
10403 10409 */
10404 10410 req = (pMpi2FWUploadRequest_t)pt->request;
10405 10411 tcsge = (pMpi2FWUploadTCSGE_t)&req->SGL;
10406 10412 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10407 10413 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10408 10414 mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10409 10415 }
10410 10416
10411 10417 pt->sgl_offset = offsetof(MPI2_FW_UPLOAD_REQUEST, SGL) +
10412 10418 sizeof (*tcsge);
10413 10419 if (pt->request_size != pt->sgl_offset)
10414 10420 NDBG15(("mpi_pre_fw_upload(): Incorrect req size, "
10415 10421 "0x%x, should be 0x%x, dataoutsz 0x%x",
10416 10422 pt->request_size, pt->sgl_offset,
10417 10423 pt->dataout_size));
10418 10424 if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY))
10419 10425 NDBG15(("mpi_pre_fw_upload(): Incorrect rep size, "
10420 10426 "0x%x, should be 0x%x", pt->data_size,
10421 10427 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10422 10428 }
10423 10429
10424 10430 /*
10425 10431 * Prepare the pt a SAS3 FW_UPLOAD request.
10426 10432 */
10427 10433 static void
10428 10434 mpi_pre_fw_25_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10429 10435 {
10430 10436 pMpi2FWUploadTCSGE_t tcsge;
10431 10437 pMpi2FWUploadRequest_t req2;
10432 10438 pMpi25FWUploadRequest_t req25;
10433 10439
10434 10440 /*
10435 10441 * User requests should come in with the Transaction
10436 10442 * context element where the SGL will go. The new firmware
10437 10443 * Doesn't use TCE and has space in the main request for
10438 10444 * this information. So move to the right place.
10439 10445 */
10440 10446 req2 = (pMpi2FWUploadRequest_t)pt->request;
10441 10447 req25 = (pMpi25FWUploadRequest_t)pt->request;
10442 10448 tcsge = (pMpi2FWUploadTCSGE_t)&req2->SGL;
10443 10449 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10444 10450 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10445 10451 mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10446 10452 }
10447 10453 req25->ImageOffset = tcsge->ImageOffset;
10448 10454 req25->ImageSize = tcsge->ImageSize;
10449 10455
10450 10456 pt->sgl_offset = offsetof(MPI25_FW_UPLOAD_REQUEST, SGL);
10451 10457 if (pt->request_size != pt->sgl_offset)
10452 10458 NDBG15(("mpi_pre_fw_25_upload(): Incorrect req size, "
10453 10459 "0x%x, should be 0x%x, dataoutsz 0x%x",
10454 10460 pt->request_size, pt->sgl_offset,
10455 10461 pt->dataout_size));
10456 10462 if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY))
10457 10463 NDBG15(("mpi_pre_fw_25_upload(): Incorrect rep size, "
10458 10464 "0x%x, should be 0x%x", pt->data_size,
10459 10465 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10460 10466 }
10461 10467
10462 10468 /*
10463 10469 * Prepare the pt for an IOC_FACTS request.
10464 10470 */
10465 10471 static void
10466 10472 mpi_pre_ioc_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
10467 10473 {
10468 10474 #ifndef __lock_lint
10469 10475 _NOTE(ARGUNUSED(mpt))
10470 10476 #endif
10471 10477 if (pt->request_size != sizeof (MPI2_IOC_FACTS_REQUEST))
10472 10478 NDBG15(("mpi_pre_ioc_facts(): Incorrect req size, "
10473 10479 "0x%x, should be 0x%x, dataoutsz 0x%x",
10474 10480 pt->request_size,
10475 10481 (int)sizeof (MPI2_IOC_FACTS_REQUEST),
10476 10482 pt->dataout_size));
10477 10483 if (pt->data_size != sizeof (MPI2_IOC_FACTS_REPLY))
10478 10484 NDBG15(("mpi_pre_ioc_facts(): Incorrect rep size, "
10479 10485 "0x%x, should be 0x%x", pt->data_size,
10480 10486 (int)sizeof (MPI2_IOC_FACTS_REPLY)));
10481 10487 pt->sgl_offset = (uint16_t)pt->request_size;
10482 10488 }
10483 10489
10484 10490 /*
10485 10491 * Prepare the pt for a PORT_FACTS request.
10486 10492 */
10487 10493 static void
10488 10494 mpi_pre_port_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
10489 10495 {
10490 10496 #ifndef __lock_lint
10491 10497 _NOTE(ARGUNUSED(mpt))
10492 10498 #endif
10493 10499 if (pt->request_size != sizeof (MPI2_PORT_FACTS_REQUEST))
10494 10500 NDBG15(("mpi_pre_port_facts(): Incorrect req size, "
10495 10501 "0x%x, should be 0x%x, dataoutsz 0x%x",
10496 10502 pt->request_size,
10497 10503 (int)sizeof (MPI2_PORT_FACTS_REQUEST),
10498 10504 pt->dataout_size));
10499 10505 if (pt->data_size != sizeof (MPI2_PORT_FACTS_REPLY))
10500 10506 NDBG15(("mpi_pre_port_facts(): Incorrect rep size, "
10501 10507 "0x%x, should be 0x%x", pt->data_size,
10502 10508 (int)sizeof (MPI2_PORT_FACTS_REPLY)));
10503 10509 pt->sgl_offset = (uint16_t)pt->request_size;
10504 10510 }
10505 10511
10506 10512 /*
10507 10513 * Prepare pt for a SATA_PASSTHROUGH request.
10508 10514 */
10509 10515 static void
10510 10516 mpi_pre_sata_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
10511 10517 {
10512 10518 #ifndef __lock_lint
10513 10519 _NOTE(ARGUNUSED(mpt))
10514 10520 #endif
10515 10521 pt->sgl_offset = offsetof(MPI2_SATA_PASSTHROUGH_REQUEST, SGL);
10516 10522 if (pt->request_size != pt->sgl_offset)
10517 10523 NDBG15(("mpi_pre_sata_passthrough(): Incorrect req size, "
10518 10524 "0x%x, should be 0x%x, dataoutsz 0x%x",
10519 10525 pt->request_size, pt->sgl_offset,
10520 10526 pt->dataout_size));
10521 10527 if (pt->data_size != sizeof (MPI2_SATA_PASSTHROUGH_REPLY))
10522 10528 NDBG15(("mpi_pre_sata_passthrough(): Incorrect rep size, "
10523 10529 "0x%x, should be 0x%x", pt->data_size,
10524 10530 (int)sizeof (MPI2_SATA_PASSTHROUGH_REPLY)));
10525 10531 }
10526 10532
10527 10533 static void
10528 10534 mpi_pre_smp_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
10529 10535 {
10530 10536 #ifndef __lock_lint
10531 10537 _NOTE(ARGUNUSED(mpt))
10532 10538 #endif
10533 10539 pt->sgl_offset = offsetof(MPI2_SMP_PASSTHROUGH_REQUEST, SGL);
10534 10540 if (pt->request_size != pt->sgl_offset)
10535 10541 NDBG15(("mpi_pre_smp_passthrough(): Incorrect req size, "
10536 10542 "0x%x, should be 0x%x, dataoutsz 0x%x",
10537 10543 pt->request_size, pt->sgl_offset,
10538 10544 pt->dataout_size));
10539 10545 if (pt->data_size != sizeof (MPI2_SMP_PASSTHROUGH_REPLY))
10540 10546 NDBG15(("mpi_pre_smp_passthrough(): Incorrect rep size, "
10541 10547 "0x%x, should be 0x%x", pt->data_size,
10542 10548 (int)sizeof (MPI2_SMP_PASSTHROUGH_REPLY)));
10543 10549 }
10544 10550
10545 10551 /*
10546 10552 * Prepare pt for a CONFIG request.
10547 10553 */
10548 10554 static void
10549 10555 mpi_pre_config(mptsas_t *mpt, mptsas_pt_request_t *pt)
10550 10556 {
10551 10557 #ifndef __lock_lint
10552 10558 _NOTE(ARGUNUSED(mpt))
10553 10559 #endif
10554 10560 pt->sgl_offset = offsetof(MPI2_CONFIG_REQUEST, PageBufferSGE);
10555 10561 if (pt->request_size != pt->sgl_offset)
10556 10562 NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
10557 10563 "should be 0x%x, dataoutsz 0x%x", pt->request_size,
10558 10564 pt->sgl_offset, pt->dataout_size));
10559 10565 if (pt->data_size != sizeof (MPI2_CONFIG_REPLY))
10560 10566 NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
10561 10567 "should be 0x%x", pt->data_size,
10562 10568 (int)sizeof (MPI2_CONFIG_REPLY)));
10563 10569 pt->simple = 1;
10564 10570 }
10565 10571
10566 10572 /*
10567 10573 * Prepare pt for a SCSI_IO_REQ request.
10568 10574 */
10569 10575 static void
10570 10576 mpi_pre_scsi_io_req(mptsas_t *mpt, mptsas_pt_request_t *pt)
10571 10577 {
10572 10578 #ifndef __lock_lint
10573 10579 _NOTE(ARGUNUSED(mpt))
10574 10580 #endif
10575 10581 pt->sgl_offset = offsetof(MPI2_SCSI_IO_REQUEST, SGL);
10576 10582 if (pt->request_size != pt->sgl_offset)
10577 10583 NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
10578 10584 "should be 0x%x, dataoutsz 0x%x", pt->request_size,
10579 10585 pt->sgl_offset,
10580 10586 pt->dataout_size));
10581 10587 if (pt->data_size != sizeof (MPI2_SCSI_IO_REPLY))
10582 10588 NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
10583 10589 "should be 0x%x", pt->data_size,
10584 10590 (int)sizeof (MPI2_SCSI_IO_REPLY)));
10585 10591 }
10586 10592
10587 10593 /*
10588 10594 * Prepare the mptsas_cmd for a SAS_IO_UNIT_CONTROL request.
10589 10595 */
10590 10596 static void
10591 10597 mpi_pre_sas_io_unit_control(mptsas_t *mpt, mptsas_pt_request_t *pt)
10592 10598 {
10593 10599 #ifndef __lock_lint
10594 10600 _NOTE(ARGUNUSED(mpt))
10595 10601 #endif
10596 10602 pt->sgl_offset = (uint16_t)pt->request_size;
10597 10603 }
10598 10604
10599 10605 /*
10600 10606 * A set of functions to prepare an mptsas_cmd for the various
10601 10607 * supported requests.
10602 10608 */
10603 10609 static struct mptsas_func {
10604 10610 U8 Function;
10605 10611 char *Name;
10606 10612 mptsas_pre_f *f_pre;
10607 10613 } mptsas_func_list[] = {
10608 10614 { MPI2_FUNCTION_IOC_FACTS, "IOC_FACTS", mpi_pre_ioc_facts },
10609 10615 { MPI2_FUNCTION_PORT_FACTS, "PORT_FACTS", mpi_pre_port_facts },
10610 10616 { MPI2_FUNCTION_FW_DOWNLOAD, "FW_DOWNLOAD", mpi_pre_fw_download },
10611 10617 { MPI2_FUNCTION_FW_UPLOAD, "FW_UPLOAD", mpi_pre_fw_upload },
10612 10618 { MPI2_FUNCTION_SATA_PASSTHROUGH, "SATA_PASSTHROUGH",
10613 10619 mpi_pre_sata_passthrough },
10614 10620 { MPI2_FUNCTION_SMP_PASSTHROUGH, "SMP_PASSTHROUGH",
10615 10621 mpi_pre_smp_passthrough},
10616 10622 { MPI2_FUNCTION_SCSI_IO_REQUEST, "SCSI_IO_REQUEST",
10617 10623 mpi_pre_scsi_io_req},
10618 10624 { MPI2_FUNCTION_CONFIG, "CONFIG", mpi_pre_config},
10619 10625 { MPI2_FUNCTION_SAS_IO_UNIT_CONTROL, "SAS_IO_UNIT_CONTROL",
10620 10626 mpi_pre_sas_io_unit_control },
10621 10627 { 0xFF, NULL, NULL } /* list end */
10622 10628 };
10623 10629
10624 10630 static void
10625 10631 mptsas_prep_sgl_offset(mptsas_t *mpt, mptsas_pt_request_t *pt)
10626 10632 {
10627 10633 pMPI2RequestHeader_t hdr;
10628 10634 struct mptsas_func *f;
10629 10635
10630 10636 hdr = (pMPI2RequestHeader_t)pt->request;
10631 10637
10632 10638 for (f = mptsas_func_list; f->f_pre != NULL; f++) {
10633 10639 if (hdr->Function == f->Function) {
10634 10640 f->f_pre(mpt, pt);
10635 10641 NDBG15(("mptsas_prep_sgl_offset: Function %s,"
10636 10642 " sgl_offset 0x%x", f->Name,
10637 10643 pt->sgl_offset));
10638 10644 return;
10639 10645 }
10640 10646 }
10641 10647 NDBG15(("mptsas_prep_sgl_offset: Unknown Function 0x%02x,"
10642 10648 " returning req_size 0x%x for sgl_offset",
10643 10649 hdr->Function, pt->request_size));
10644 10650 pt->sgl_offset = (uint16_t)pt->request_size;
10645 10651 }
10646 10652
10647 10653
10648 10654 static int
10649 10655 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
10650 10656 uint8_t *data, uint32_t request_size, uint32_t reply_size,
10651 10657 uint32_t data_size, uint32_t direction, uint8_t *dataout,
10652 10658 uint32_t dataout_size, short timeout, int mode)
10653 10659 {
10654 10660 mptsas_pt_request_t pt;
10655 10661 mptsas_dma_alloc_state_t data_dma_state;
10656 10662 mptsas_dma_alloc_state_t dataout_dma_state;
10657 10663 caddr_t memp;
10658 10664 mptsas_cmd_t *cmd = NULL;
10659 10665 struct scsi_pkt *pkt;
10660 10666 uint32_t reply_len = 0, sense_len = 0;
10661 10667 pMPI2RequestHeader_t request_hdrp;
10662 10668 pMPI2RequestHeader_t request_msg;
10663 10669 pMPI2DefaultReply_t reply_msg;
10664 10670 Mpi2SCSIIOReply_t rep_msg;
10665 10671 int i, status = 0, pt_flags = 0, rv = 0;
10666 10672 int rvalue;
10667 10673 uint8_t function;
10668 10674
10669 10675 ASSERT(mutex_owned(&mpt->m_mutex));
10670 10676
10671 10677 reply_msg = (pMPI2DefaultReply_t)(&rep_msg);
10672 10678 bzero(reply_msg, sizeof (MPI2_DEFAULT_REPLY));
10673 10679 request_msg = kmem_zalloc(request_size, KM_SLEEP);
10674 10680
10675 10681 mutex_exit(&mpt->m_mutex);
10676 10682 /*
10677 10683 * copy in the request buffer since it could be used by
10678 10684 * another thread when the pt request into waitq
10679 10685 */
10680 10686 if (ddi_copyin(request, request_msg, request_size, mode)) {
10681 10687 mutex_enter(&mpt->m_mutex);
10682 10688 status = EFAULT;
10683 10689 mptsas_log(mpt, CE_WARN, "failed to copy request data");
10684 10690 goto out;
10685 10691 }
10686 10692 mutex_enter(&mpt->m_mutex);
10687 10693
10688 10694 function = request_msg->Function;
10689 10695 if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
10690 10696 pMpi2SCSITaskManagementRequest_t task;
10691 10697 task = (pMpi2SCSITaskManagementRequest_t)request_msg;
10692 10698 mptsas_setup_bus_reset_delay(mpt);
10693 10699 rv = mptsas_ioc_task_management(mpt, task->TaskType,
10694 10700 task->DevHandle, (int)task->LUN[1], reply, reply_size,
10695 10701 mode);
10696 10702
10697 10703 if (rv != TRUE) {
10698 10704 status = EIO;
10699 10705 mptsas_log(mpt, CE_WARN, "task management failed");
10700 10706 }
10701 10707 goto out;
10702 10708 }
10703 10709
10704 10710 if (data_size != 0) {
10705 10711 data_dma_state.size = data_size;
10706 10712 if (mptsas_dma_alloc(mpt, &data_dma_state) != DDI_SUCCESS) {
10707 10713 status = ENOMEM;
10708 10714 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
10709 10715 "resource");
10710 10716 goto out;
10711 10717 }
10712 10718 pt_flags |= MPTSAS_DATA_ALLOCATED;
10713 10719 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10714 10720 mutex_exit(&mpt->m_mutex);
10715 10721 for (i = 0; i < data_size; i++) {
10716 10722 if (ddi_copyin(data + i, (uint8_t *)
10717 10723 data_dma_state.memp + i, 1, mode)) {
10718 10724 mutex_enter(&mpt->m_mutex);
10719 10725 status = EFAULT;
10720 10726 mptsas_log(mpt, CE_WARN, "failed to "
10721 10727 "copy read data");
10722 10728 goto out;
10723 10729 }
10724 10730 }
10725 10731 mutex_enter(&mpt->m_mutex);
10726 10732 }
10727 10733 } else {
10728 10734 bzero(&data_dma_state, sizeof (data_dma_state));
10729 10735 }
10730 10736
10731 10737 if (dataout_size != 0) {
10732 10738 dataout_dma_state.size = dataout_size;
10733 10739 if (mptsas_dma_alloc(mpt, &dataout_dma_state) != DDI_SUCCESS) {
10734 10740 status = ENOMEM;
10735 10741 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
10736 10742 "resource");
10737 10743 goto out;
10738 10744 }
10739 10745 pt_flags |= MPTSAS_DATAOUT_ALLOCATED;
10740 10746 mutex_exit(&mpt->m_mutex);
10741 10747 for (i = 0; i < dataout_size; i++) {
10742 10748 if (ddi_copyin(dataout + i, (uint8_t *)
10743 10749 dataout_dma_state.memp + i, 1, mode)) {
10744 10750 mutex_enter(&mpt->m_mutex);
10745 10751 mptsas_log(mpt, CE_WARN, "failed to copy out"
10746 10752 " data");
10747 10753 status = EFAULT;
10748 10754 goto out;
10749 10755 }
10750 10756 }
10751 10757 mutex_enter(&mpt->m_mutex);
10752 10758 } else {
10753 10759 bzero(&dataout_dma_state, sizeof (dataout_dma_state));
10754 10760 }
10755 10761
10756 10762 if ((rvalue = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10757 10763 status = EAGAIN;
10758 10764 mptsas_log(mpt, CE_NOTE, "event ack command pool is full");
10759 10765 goto out;
10760 10766 }
10761 10767 pt_flags |= MPTSAS_REQUEST_POOL_CMD;
10762 10768
10763 10769 bzero((caddr_t)cmd, sizeof (*cmd));
10764 10770 bzero((caddr_t)pkt, scsi_pkt_size());
10765 10771 bzero((caddr_t)&pt, sizeof (pt));
10766 10772
10767 10773 cmd->ioc_cmd_slot = (uint32_t)(rvalue);
10768 10774
10769 10775 pt.request = (uint8_t *)request_msg;
10770 10776 pt.direction = direction;
10771 10777 pt.simple = 0;
10772 10778 pt.request_size = request_size;
10773 10779 pt.data_size = data_size;
10774 10780 pt.dataout_size = dataout_size;
10775 10781 pt.data_cookie = data_dma_state.cookie;
10776 10782 pt.dataout_cookie = dataout_dma_state.cookie;
10777 10783 mptsas_prep_sgl_offset(mpt, &pt);
10778 10784
10779 10785 /*
10780 10786 * Form a blank cmd/pkt to store the acknowledgement message
10781 10787 */
10782 10788 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb[0];
10783 10789 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
10784 10790 pkt->pkt_ha_private = (opaque_t)&pt;
10785 10791 pkt->pkt_flags = FLAG_HEAD;
10786 10792 pkt->pkt_time = timeout;
10787 10793 cmd->cmd_pkt = pkt;
10788 10794 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_PASSTHRU;
10789 10795
10790 10796 /*
10791 10797 * Save the command in a slot
10792 10798 */
10793 10799 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10794 10800 /*
10795 10801 * Once passthru command get slot, set cmd_flags
10796 10802 * CFLAG_PREPARED.
10797 10803 */
10798 10804 cmd->cmd_flags |= CFLAG_PREPARED;
10799 10805 mptsas_start_passthru(mpt, cmd);
10800 10806 } else {
10801 10807 mptsas_waitq_add(mpt, cmd);
10802 10808 }
10803 10809
10804 10810 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10805 10811 cv_wait(&mpt->m_passthru_cv, &mpt->m_mutex);
10806 10812 }
10807 10813
10808 10814 if (cmd->cmd_flags & CFLAG_PREPARED) {
10809 10815 memp = mpt->m_req_frame + (mpt->m_req_frame_size *
10810 10816 cmd->cmd_slot);
10811 10817 request_hdrp = (pMPI2RequestHeader_t)memp;
10812 10818 }
10813 10819
10814 10820 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10815 10821 status = ETIMEDOUT;
10816 10822 mptsas_log(mpt, CE_WARN, "passthrough command timeout");
10817 10823 pt_flags |= MPTSAS_CMD_TIMEOUT;
10818 10824 goto out;
10819 10825 }
10820 10826
10821 10827 if (cmd->cmd_rfm) {
10822 10828 /*
10823 10829 * cmd_rfm is zero means the command reply is a CONTEXT
10824 10830 * reply and no PCI Write to post the free reply SMFA
10825 10831 * because no reply message frame is used.
10826 10832 * cmd_rfm is non-zero means the reply is a ADDRESS
10827 10833 * reply and reply message frame is used.
10828 10834 */
10829 10835 pt_flags |= MPTSAS_ADDRESS_REPLY;
10830 10836 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10831 10837 DDI_DMA_SYNC_FORCPU);
10832 10838 reply_msg = (pMPI2DefaultReply_t)
10833 10839 (mpt->m_reply_frame + (cmd->cmd_rfm -
10834 10840 mpt->m_reply_frame_dma_addr));
10835 10841 }
10836 10842
10837 10843 mptsas_fma_check(mpt, cmd);
10838 10844 if (pkt->pkt_reason == CMD_TRAN_ERR) {
10839 10845 status = EAGAIN;
10840 10846 mptsas_log(mpt, CE_WARN, "passthru fma error");
10841 10847 goto out;
10842 10848 }
10843 10849 if (pkt->pkt_reason == CMD_RESET) {
10844 10850 status = EAGAIN;
10845 10851 mptsas_log(mpt, CE_WARN, "ioc reset abort passthru");
10846 10852 goto out;
10847 10853 }
10848 10854
10849 10855 if (pkt->pkt_reason == CMD_INCOMPLETE) {
10850 10856 status = EIO;
10851 10857 mptsas_log(mpt, CE_WARN, "passthrough command incomplete");
10852 10858 goto out;
10853 10859 }
10854 10860
10855 10861 mutex_exit(&mpt->m_mutex);
10856 10862 if (cmd->cmd_flags & CFLAG_PREPARED) {
10857 10863 function = request_hdrp->Function;
10858 10864 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10859 10865 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10860 10866 reply_len = sizeof (MPI2_SCSI_IO_REPLY);
10861 10867 sense_len = reply_size - reply_len;
10862 10868 } else {
10863 10869 reply_len = reply_size;
10864 10870 sense_len = 0;
10865 10871 }
10866 10872
10867 10873 for (i = 0; i < reply_len; i++) {
10868 10874 if (ddi_copyout((uint8_t *)reply_msg + i, reply + i, 1,
10869 10875 mode)) {
10870 10876 mutex_enter(&mpt->m_mutex);
10871 10877 status = EFAULT;
10872 10878 mptsas_log(mpt, CE_WARN, "failed to copy out "
10873 10879 "reply data");
10874 10880 goto out;
10875 10881 }
10876 10882 }
10877 10883 for (i = 0; i < sense_len; i++) {
10878 10884 if (ddi_copyout((uint8_t *)request_hdrp + 64 + i,
10879 10885 reply + reply_len + i, 1, mode)) {
10880 10886 mutex_enter(&mpt->m_mutex);
10881 10887 status = EFAULT;
10882 10888 mptsas_log(mpt, CE_WARN, "failed to copy out "
10883 10889 "sense data");
10884 10890 goto out;
10885 10891 }
10886 10892 }
10887 10893 }
10888 10894
10889 10895 if (data_size) {
10890 10896 if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10891 10897 (void) ddi_dma_sync(data_dma_state.handle, 0, 0,
10892 10898 DDI_DMA_SYNC_FORCPU);
10893 10899 for (i = 0; i < data_size; i++) {
10894 10900 if (ddi_copyout((uint8_t *)(
10895 10901 data_dma_state.memp + i), data + i, 1,
10896 10902 mode)) {
10897 10903 mutex_enter(&mpt->m_mutex);
10898 10904 status = EFAULT;
10899 10905 mptsas_log(mpt, CE_WARN, "failed to "
10900 10906 "copy out the reply data");
10901 10907 goto out;
10902 10908 }
10903 10909 }
10904 10910 }
10905 10911 }
10906 10912 mutex_enter(&mpt->m_mutex);
10907 10913 out:
10908 10914 /*
10909 10915 * Put the reply frame back on the free queue, increment the free
10910 10916 * index, and write the new index to the free index register. But only
10911 10917 * if this reply is an ADDRESS reply.
10912 10918 */
10913 10919 if (pt_flags & MPTSAS_ADDRESS_REPLY) {
10914 10920 ddi_put32(mpt->m_acc_free_queue_hdl,
10915 10921 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10916 10922 cmd->cmd_rfm);
10917 10923 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10918 10924 DDI_DMA_SYNC_FORDEV);
10919 10925 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10920 10926 mpt->m_free_index = 0;
10921 10927 }
10922 10928 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10923 10929 mpt->m_free_index);
10924 10930 }
10925 10931 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10926 10932 mptsas_remove_cmd(mpt, cmd);
10927 10933 pt_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10928 10934 }
10929 10935 if (pt_flags & MPTSAS_REQUEST_POOL_CMD)
10930 10936 mptsas_return_to_pool(mpt, cmd);
10931 10937 if (pt_flags & MPTSAS_DATA_ALLOCATED) {
10932 10938 if (mptsas_check_dma_handle(data_dma_state.handle) !=
10933 10939 DDI_SUCCESS) {
10934 10940 ddi_fm_service_impact(mpt->m_dip,
10935 10941 DDI_SERVICE_UNAFFECTED);
10936 10942 status = EFAULT;
10937 10943 }
10938 10944 mptsas_dma_free(&data_dma_state);
10939 10945 }
10940 10946 if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
10941 10947 if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
10942 10948 DDI_SUCCESS) {
10943 10949 ddi_fm_service_impact(mpt->m_dip,
10944 10950 DDI_SERVICE_UNAFFECTED);
10945 10951 status = EFAULT;
10946 10952 }
10947 10953 mptsas_dma_free(&dataout_dma_state);
10948 10954 }
10949 10955 if (pt_flags & MPTSAS_CMD_TIMEOUT) {
10950 10956 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
10951 10957 mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
10952 10958 }
10953 10959 }
10954 10960 if (request_msg)
10955 10961 kmem_free(request_msg, request_size);
10956 10962
10957 10963 return (status);
10958 10964 }
10959 10965
10960 10966 static int
10961 10967 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
10962 10968 {
10963 10969 /*
10964 10970 * If timeout is 0, set timeout to default of 60 seconds.
10965 10971 */
10966 10972 if (data->Timeout == 0) {
10967 10973 data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
10968 10974 }
10969 10975
10970 10976 if (((data->DataSize == 0) &&
10971 10977 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_NONE)) ||
10972 10978 ((data->DataSize != 0) &&
10973 10979 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_READ) ||
10974 10980 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_WRITE) ||
10975 10981 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) &&
10976 10982 (data->DataOutSize != 0))))) {
10977 10983 if (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) {
10978 10984 data->DataDirection = MPTSAS_PASS_THRU_DIRECTION_READ;
10979 10985 } else {
10980 10986 data->DataOutSize = 0;
10981 10987 }
10982 10988 /*
10983 10989 * Send passthru request messages
10984 10990 */
10985 10991 return (mptsas_do_passthru(mpt,
10986 10992 (uint8_t *)((uintptr_t)data->PtrRequest),
10987 10993 (uint8_t *)((uintptr_t)data->PtrReply),
10988 10994 (uint8_t *)((uintptr_t)data->PtrData),
10989 10995 data->RequestSize, data->ReplySize,
10990 10996 data->DataSize, data->DataDirection,
10991 10997 (uint8_t *)((uintptr_t)data->PtrDataOut),
10992 10998 data->DataOutSize, data->Timeout, mode));
10993 10999 } else {
10994 11000 return (EINVAL);
10995 11001 }
10996 11002 }
10997 11003
10998 11004 static uint8_t
10999 11005 mptsas_get_fw_diag_buffer_number(mptsas_t *mpt, uint32_t unique_id)
11000 11006 {
11001 11007 uint8_t index;
11002 11008
11003 11009 for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
11004 11010 if (mpt->m_fw_diag_buffer_list[index].unique_id == unique_id) {
11005 11011 return (index);
11006 11012 }
11007 11013 }
11008 11014
11009 11015 return (MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND);
11010 11016 }
11011 11017
11012 11018 static void
11013 11019 mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd)
11014 11020 {
11015 11021 pMpi2DiagBufferPostRequest_t pDiag_post_msg;
11016 11022 pMpi2DiagReleaseRequest_t pDiag_release_msg;
11017 11023 struct scsi_pkt *pkt = cmd->cmd_pkt;
11018 11024 mptsas_diag_request_t *diag = pkt->pkt_ha_private;
11019 11025 uint32_t request_desc_low, i;
11020 11026
11021 11027 ASSERT(mutex_owned(&mpt->m_mutex));
11022 11028
11023 11029 /*
11024 11030 * Form the diag message depending on the post or release function.
11025 11031 */
11026 11032 if (diag->function == MPI2_FUNCTION_DIAG_BUFFER_POST) {
11027 11033 pDiag_post_msg = (pMpi2DiagBufferPostRequest_t)
11028 11034 (mpt->m_req_frame + (mpt->m_req_frame_size *
11029 11035 cmd->cmd_slot));
11030 11036 bzero(pDiag_post_msg, mpt->m_req_frame_size);
11031 11037 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->Function,
11032 11038 diag->function);
11033 11039 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->BufferType,
11034 11040 diag->pBuffer->buffer_type);
11035 11041 ddi_put8(mpt->m_acc_req_frame_hdl,
11036 11042 &pDiag_post_msg->ExtendedType,
11037 11043 diag->pBuffer->extended_type);
11038 11044 ddi_put32(mpt->m_acc_req_frame_hdl,
11039 11045 &pDiag_post_msg->BufferLength,
11040 11046 diag->pBuffer->buffer_data.size);
11041 11047 for (i = 0; i < (sizeof (pDiag_post_msg->ProductSpecific) / 4);
11042 11048 i++) {
11043 11049 ddi_put32(mpt->m_acc_req_frame_hdl,
11044 11050 &pDiag_post_msg->ProductSpecific[i],
11045 11051 diag->pBuffer->product_specific[i]);
11046 11052 }
11047 11053 ddi_put32(mpt->m_acc_req_frame_hdl,
11048 11054 &pDiag_post_msg->BufferAddress.Low,
11049 11055 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
11050 11056 & 0xffffffffull));
11051 11057 ddi_put32(mpt->m_acc_req_frame_hdl,
11052 11058 &pDiag_post_msg->BufferAddress.High,
11053 11059 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
11054 11060 >> 32));
11055 11061 } else {
11056 11062 pDiag_release_msg = (pMpi2DiagReleaseRequest_t)
11057 11063 (mpt->m_req_frame + (mpt->m_req_frame_size *
11058 11064 cmd->cmd_slot));
11059 11065 bzero(pDiag_release_msg, mpt->m_req_frame_size);
11060 11066 ddi_put8(mpt->m_acc_req_frame_hdl,
11061 11067 &pDiag_release_msg->Function, diag->function);
11062 11068 ddi_put8(mpt->m_acc_req_frame_hdl,
11063 11069 &pDiag_release_msg->BufferType,
11064 11070 diag->pBuffer->buffer_type);
11065 11071 }
11066 11072
11067 11073 /*
11068 11074 * Send the message
11069 11075 */
11070 11076 (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
11071 11077 DDI_DMA_SYNC_FORDEV);
11072 11078 request_desc_low = (cmd->cmd_slot << 16) +
11073 11079 MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
11074 11080 cmd->cmd_rfm = NULL;
11075 11081 MPTSAS_START_CMD(mpt, request_desc_low, 0);
11076 11082 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
11077 11083 DDI_SUCCESS) ||
11078 11084 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
11079 11085 DDI_SUCCESS)) {
11080 11086 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11081 11087 }
11082 11088 }
11083 11089
11084 11090 static int
11085 11091 mptsas_post_fw_diag_buffer(mptsas_t *mpt,
11086 11092 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
11087 11093 {
11088 11094 mptsas_diag_request_t diag;
11089 11095 int status, slot_num, post_flags = 0;
11090 11096 mptsas_cmd_t *cmd = NULL;
11091 11097 struct scsi_pkt *pkt;
11092 11098 pMpi2DiagBufferPostReply_t reply;
11093 11099 uint16_t iocstatus;
11094 11100 uint32_t iocloginfo, transfer_length;
11095 11101
11096 11102 /*
11097 11103 * If buffer is not enabled, just leave.
11098 11104 */
11099 11105 *return_code = MPTSAS_FW_DIAG_ERROR_POST_FAILED;
11100 11106 if (!pBuffer->enabled) {
11101 11107 status = DDI_FAILURE;
11102 11108 goto out;
11103 11109 }
11104 11110
11105 11111 /*
11106 11112 * Clear some flags initially.
11107 11113 */
11108 11114 pBuffer->force_release = FALSE;
11109 11115 pBuffer->valid_data = FALSE;
11110 11116 pBuffer->owned_by_firmware = FALSE;
11111 11117
11112 11118 /*
11113 11119 * Get a cmd buffer from the cmd buffer pool
11114 11120 */
11115 11121 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11116 11122 status = DDI_FAILURE;
11117 11123 mptsas_log(mpt, CE_NOTE, "command pool is full: Post FW Diag");
11118 11124 goto out;
11119 11125 }
11120 11126 post_flags |= MPTSAS_REQUEST_POOL_CMD;
11121 11127
11122 11128 bzero((caddr_t)cmd, sizeof (*cmd));
11123 11129 bzero((caddr_t)pkt, scsi_pkt_size());
11124 11130
11125 11131 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
11126 11132
11127 11133 diag.pBuffer = pBuffer;
11128 11134 diag.function = MPI2_FUNCTION_DIAG_BUFFER_POST;
11129 11135
11130 11136 /*
11131 11137 * Form a blank cmd/pkt to store the acknowledgement message
11132 11138 */
11133 11139 pkt->pkt_ha_private = (opaque_t)&diag;
11134 11140 pkt->pkt_flags = FLAG_HEAD;
11135 11141 pkt->pkt_time = 60;
11136 11142 cmd->cmd_pkt = pkt;
11137 11143 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
11138 11144
11139 11145 /*
11140 11146 * Save the command in a slot
11141 11147 */
11142 11148 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11143 11149 /*
11144 11150 * Once passthru command get slot, set cmd_flags
11145 11151 * CFLAG_PREPARED.
11146 11152 */
11147 11153 cmd->cmd_flags |= CFLAG_PREPARED;
11148 11154 mptsas_start_diag(mpt, cmd);
11149 11155 } else {
11150 11156 mptsas_waitq_add(mpt, cmd);
11151 11157 }
11152 11158
11153 11159 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11154 11160 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
11155 11161 }
11156 11162
11157 11163 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11158 11164 status = DDI_FAILURE;
11159 11165 mptsas_log(mpt, CE_WARN, "Post FW Diag command timeout");
11160 11166 goto out;
11161 11167 }
11162 11168
11163 11169 /*
11164 11170 * cmd_rfm points to the reply message if a reply was given. Check the
11165 11171 * IOCStatus to make sure everything went OK with the FW diag request
11166 11172 * and set buffer flags.
11167 11173 */
11168 11174 if (cmd->cmd_rfm) {
11169 11175 post_flags |= MPTSAS_ADDRESS_REPLY;
11170 11176 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11171 11177 DDI_DMA_SYNC_FORCPU);
11172 11178 reply = (pMpi2DiagBufferPostReply_t)(mpt->m_reply_frame +
11173 11179 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
11174 11180
11175 11181 /*
11176 11182 * Get the reply message data
11177 11183 */
11178 11184 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
11179 11185 &reply->IOCStatus);
11180 11186 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
11181 11187 &reply->IOCLogInfo);
11182 11188 transfer_length = ddi_get32(mpt->m_acc_reply_frame_hdl,
11183 11189 &reply->TransferLength);
11184 11190
11185 11191 /*
11186 11192 * If post failed quit.
11187 11193 */
11188 11194 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
11189 11195 status = DDI_FAILURE;
11190 11196 NDBG13(("post FW Diag Buffer failed: IOCStatus=0x%x, "
11191 11197 "IOCLogInfo=0x%x, TransferLength=0x%x", iocstatus,
11192 11198 iocloginfo, transfer_length));
11193 11199 goto out;
11194 11200 }
11195 11201
11196 11202 /*
11197 11203 * Post was successful.
11198 11204 */
11199 11205 pBuffer->valid_data = TRUE;
11200 11206 pBuffer->owned_by_firmware = TRUE;
11201 11207 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11202 11208 status = DDI_SUCCESS;
11203 11209 }
11204 11210
11205 11211 out:
11206 11212 /*
11207 11213 * Put the reply frame back on the free queue, increment the free
11208 11214 * index, and write the new index to the free index register. But only
11209 11215 * if this reply is an ADDRESS reply.
11210 11216 */
11211 11217 if (post_flags & MPTSAS_ADDRESS_REPLY) {
11212 11218 ddi_put32(mpt->m_acc_free_queue_hdl,
11213 11219 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11214 11220 cmd->cmd_rfm);
11215 11221 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11216 11222 DDI_DMA_SYNC_FORDEV);
11217 11223 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11218 11224 mpt->m_free_index = 0;
11219 11225 }
11220 11226 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11221 11227 mpt->m_free_index);
11222 11228 }
11223 11229 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
11224 11230 mptsas_remove_cmd(mpt, cmd);
11225 11231 post_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11226 11232 }
11227 11233 if (post_flags & MPTSAS_REQUEST_POOL_CMD) {
11228 11234 mptsas_return_to_pool(mpt, cmd);
11229 11235 }
11230 11236
11231 11237 return (status);
11232 11238 }
11233 11239
11234 11240 static int
11235 11241 mptsas_release_fw_diag_buffer(mptsas_t *mpt,
11236 11242 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
11237 11243 uint32_t diag_type)
11238 11244 {
11239 11245 mptsas_diag_request_t diag;
11240 11246 int status, slot_num, rel_flags = 0;
11241 11247 mptsas_cmd_t *cmd = NULL;
11242 11248 struct scsi_pkt *pkt;
11243 11249 pMpi2DiagReleaseReply_t reply;
11244 11250 uint16_t iocstatus;
11245 11251 uint32_t iocloginfo;
11246 11252
11247 11253 /*
11248 11254 * If buffer is not enabled, just leave.
11249 11255 */
11250 11256 *return_code = MPTSAS_FW_DIAG_ERROR_RELEASE_FAILED;
11251 11257 if (!pBuffer->enabled) {
11252 11258 mptsas_log(mpt, CE_NOTE, "This buffer type is not supported "
11253 11259 "by the IOC");
11254 11260 status = DDI_FAILURE;
11255 11261 goto out;
11256 11262 }
11257 11263
11258 11264 /*
11259 11265 * Clear some flags initially.
11260 11266 */
11261 11267 pBuffer->force_release = FALSE;
11262 11268 pBuffer->valid_data = FALSE;
11263 11269 pBuffer->owned_by_firmware = FALSE;
11264 11270
11265 11271 /*
11266 11272 * Get a cmd buffer from the cmd buffer pool
11267 11273 */
11268 11274 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11269 11275 status = DDI_FAILURE;
11270 11276 mptsas_log(mpt, CE_NOTE, "command pool is full: Release FW "
11271 11277 "Diag");
11272 11278 goto out;
11273 11279 }
11274 11280 rel_flags |= MPTSAS_REQUEST_POOL_CMD;
11275 11281
11276 11282 bzero((caddr_t)cmd, sizeof (*cmd));
11277 11283 bzero((caddr_t)pkt, scsi_pkt_size());
11278 11284
11279 11285 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
11280 11286
11281 11287 diag.pBuffer = pBuffer;
11282 11288 diag.function = MPI2_FUNCTION_DIAG_RELEASE;
11283 11289
11284 11290 /*
11285 11291 * Form a blank cmd/pkt to store the acknowledgement message
11286 11292 */
11287 11293 pkt->pkt_ha_private = (opaque_t)&diag;
11288 11294 pkt->pkt_flags = FLAG_HEAD;
11289 11295 pkt->pkt_time = 60;
11290 11296 cmd->cmd_pkt = pkt;
11291 11297 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
11292 11298
11293 11299 /*
11294 11300 * Save the command in a slot
11295 11301 */
11296 11302 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11297 11303 /*
11298 11304 * Once passthru command get slot, set cmd_flags
11299 11305 * CFLAG_PREPARED.
11300 11306 */
11301 11307 cmd->cmd_flags |= CFLAG_PREPARED;
11302 11308 mptsas_start_diag(mpt, cmd);
11303 11309 } else {
11304 11310 mptsas_waitq_add(mpt, cmd);
11305 11311 }
11306 11312
11307 11313 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11308 11314 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
11309 11315 }
11310 11316
11311 11317 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11312 11318 status = DDI_FAILURE;
11313 11319 mptsas_log(mpt, CE_WARN, "Release FW Diag command timeout");
11314 11320 goto out;
11315 11321 }
11316 11322
11317 11323 /*
11318 11324 * cmd_rfm points to the reply message if a reply was given. Check the
11319 11325 * IOCStatus to make sure everything went OK with the FW diag request
11320 11326 * and set buffer flags.
11321 11327 */
11322 11328 if (cmd->cmd_rfm) {
11323 11329 rel_flags |= MPTSAS_ADDRESS_REPLY;
11324 11330 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11325 11331 DDI_DMA_SYNC_FORCPU);
11326 11332 reply = (pMpi2DiagReleaseReply_t)(mpt->m_reply_frame +
11327 11333 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
11328 11334
11329 11335 /*
11330 11336 * Get the reply message data
11331 11337 */
11332 11338 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
11333 11339 &reply->IOCStatus);
11334 11340 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
11335 11341 &reply->IOCLogInfo);
11336 11342
11337 11343 /*
11338 11344 * If release failed quit.
11339 11345 */
11340 11346 if ((iocstatus != MPI2_IOCSTATUS_SUCCESS) ||
11341 11347 pBuffer->owned_by_firmware) {
11342 11348 status = DDI_FAILURE;
11343 11349 NDBG13(("release FW Diag Buffer failed: "
11344 11350 "IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus,
11345 11351 iocloginfo));
11346 11352 goto out;
11347 11353 }
11348 11354
11349 11355 /*
11350 11356 * Release was successful.
11351 11357 */
11352 11358 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11353 11359 status = DDI_SUCCESS;
11354 11360
11355 11361 /*
11356 11362 * If this was for an UNREGISTER diag type command, clear the
11357 11363 * unique ID.
11358 11364 */
11359 11365 if (diag_type == MPTSAS_FW_DIAG_TYPE_UNREGISTER) {
11360 11366 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
11361 11367 }
11362 11368 }
11363 11369
11364 11370 out:
11365 11371 /*
11366 11372 * Put the reply frame back on the free queue, increment the free
11367 11373 * index, and write the new index to the free index register. But only
11368 11374 * if this reply is an ADDRESS reply.
11369 11375 */
11370 11376 if (rel_flags & MPTSAS_ADDRESS_REPLY) {
11371 11377 ddi_put32(mpt->m_acc_free_queue_hdl,
11372 11378 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11373 11379 cmd->cmd_rfm);
11374 11380 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11375 11381 DDI_DMA_SYNC_FORDEV);
11376 11382 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11377 11383 mpt->m_free_index = 0;
11378 11384 }
11379 11385 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11380 11386 mpt->m_free_index);
11381 11387 }
11382 11388 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
11383 11389 mptsas_remove_cmd(mpt, cmd);
11384 11390 rel_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11385 11391 }
11386 11392 if (rel_flags & MPTSAS_REQUEST_POOL_CMD) {
11387 11393 mptsas_return_to_pool(mpt, cmd);
11388 11394 }
11389 11395
11390 11396 return (status);
11391 11397 }
11392 11398
11393 11399 static int
11394 11400 mptsas_diag_register(mptsas_t *mpt, mptsas_fw_diag_register_t *diag_register,
11395 11401 uint32_t *return_code)
11396 11402 {
11397 11403 mptsas_fw_diagnostic_buffer_t *pBuffer;
11398 11404 uint8_t extended_type, buffer_type, i;
11399 11405 uint32_t buffer_size;
11400 11406 uint32_t unique_id;
11401 11407 int status;
11402 11408
11403 11409 ASSERT(mutex_owned(&mpt->m_mutex));
11404 11410
11405 11411 extended_type = diag_register->ExtendedType;
11406 11412 buffer_type = diag_register->BufferType;
11407 11413 buffer_size = diag_register->RequestedBufferSize;
11408 11414 unique_id = diag_register->UniqueId;
11409 11415
11410 11416 /*
11411 11417 * Check for valid buffer type
11412 11418 */
11413 11419 if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
11414 11420 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11415 11421 return (DDI_FAILURE);
11416 11422 }
11417 11423
11418 11424 /*
11419 11425 * Get the current buffer and look up the unique ID. The unique ID
11420 11426 * should not be found. If it is, the ID is already in use.
11421 11427 */
11422 11428 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11423 11429 pBuffer = &mpt->m_fw_diag_buffer_list[buffer_type];
11424 11430 if (i != MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11425 11431 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11426 11432 return (DDI_FAILURE);
11427 11433 }
11428 11434
11429 11435 /*
11430 11436 * The buffer's unique ID should not be registered yet, and the given
11431 11437 * unique ID cannot be 0.
11432 11438 */
11433 11439 if ((pBuffer->unique_id != MPTSAS_FW_DIAG_INVALID_UID) ||
11434 11440 (unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
11435 11441 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11436 11442 return (DDI_FAILURE);
11437 11443 }
11438 11444
11439 11445 /*
11440 11446 * If this buffer is already posted as immediate, just change owner.
11441 11447 */
11442 11448 if (pBuffer->immediate && pBuffer->owned_by_firmware &&
11443 11449 (pBuffer->unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
11444 11450 pBuffer->immediate = FALSE;
11445 11451 pBuffer->unique_id = unique_id;
11446 11452 return (DDI_SUCCESS);
11447 11453 }
11448 11454
11449 11455 /*
11450 11456 * Post a new buffer after checking if it's enabled. The DMA buffer
11451 11457 * that is allocated will be contiguous (sgl_len = 1).
11452 11458 */
11453 11459 if (!pBuffer->enabled) {
11454 11460 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
11455 11461 return (DDI_FAILURE);
11456 11462 }
11457 11463 bzero(&pBuffer->buffer_data, sizeof (mptsas_dma_alloc_state_t));
11458 11464 pBuffer->buffer_data.size = buffer_size;
11459 11465 if (mptsas_dma_alloc(mpt, &pBuffer->buffer_data) != DDI_SUCCESS) {
11460 11466 mptsas_log(mpt, CE_WARN, "failed to alloc DMA resource for "
11461 11467 "diag buffer: size = %d bytes", buffer_size);
11462 11468 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
11463 11469 return (DDI_FAILURE);
11464 11470 }
11465 11471
11466 11472 /*
11467 11473 * Copy the given info to the diag buffer and post the buffer.
11468 11474 */
11469 11475 pBuffer->buffer_type = buffer_type;
11470 11476 pBuffer->immediate = FALSE;
11471 11477 if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
11472 11478 for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
11473 11479 i++) {
11474 11480 pBuffer->product_specific[i] =
11475 11481 diag_register->ProductSpecific[i];
11476 11482 }
11477 11483 }
11478 11484 pBuffer->extended_type = extended_type;
11479 11485 pBuffer->unique_id = unique_id;
11480 11486 status = mptsas_post_fw_diag_buffer(mpt, pBuffer, return_code);
11481 11487
11482 11488 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
11483 11489 DDI_SUCCESS) {
11484 11490 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed in "
11485 11491 "mptsas_diag_register.");
11486 11492 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11487 11493 status = DDI_FAILURE;
11488 11494 }
11489 11495
11490 11496 /*
11491 11497 * In case there was a failure, free the DMA buffer.
11492 11498 */
11493 11499 if (status == DDI_FAILURE) {
11494 11500 mptsas_dma_free(&pBuffer->buffer_data);
11495 11501 }
11496 11502
11497 11503 return (status);
11498 11504 }
11499 11505
11500 11506 static int
11501 11507 mptsas_diag_unregister(mptsas_t *mpt,
11502 11508 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
11503 11509 {
11504 11510 mptsas_fw_diagnostic_buffer_t *pBuffer;
11505 11511 uint8_t i;
11506 11512 uint32_t unique_id;
11507 11513 int status;
11508 11514
11509 11515 ASSERT(mutex_owned(&mpt->m_mutex));
11510 11516
11511 11517 unique_id = diag_unregister->UniqueId;
11512 11518
11513 11519 /*
11514 11520 * Get the current buffer and look up the unique ID. The unique ID
11515 11521 * should be there.
11516 11522 */
11517 11523 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11518 11524 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11519 11525 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11520 11526 return (DDI_FAILURE);
11521 11527 }
11522 11528
11523 11529 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11524 11530
11525 11531 /*
11526 11532 * Try to release the buffer from FW before freeing it. If release
11527 11533 * fails, don't free the DMA buffer in case FW tries to access it
11528 11534 * later. If buffer is not owned by firmware, can't release it.
11529 11535 */
11530 11536 if (!pBuffer->owned_by_firmware) {
11531 11537 status = DDI_SUCCESS;
11532 11538 } else {
11533 11539 status = mptsas_release_fw_diag_buffer(mpt, pBuffer,
11534 11540 return_code, MPTSAS_FW_DIAG_TYPE_UNREGISTER);
11535 11541 }
11536 11542
11537 11543 /*
11538 11544 * At this point, return the current status no matter what happens with
11539 11545 * the DMA buffer.
11540 11546 */
11541 11547 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
11542 11548 if (status == DDI_SUCCESS) {
11543 11549 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
11544 11550 DDI_SUCCESS) {
11545 11551 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed "
11546 11552 "in mptsas_diag_unregister.");
11547 11553 ddi_fm_service_impact(mpt->m_dip,
11548 11554 DDI_SERVICE_UNAFFECTED);
11549 11555 }
11550 11556 mptsas_dma_free(&pBuffer->buffer_data);
11551 11557 }
11552 11558
11553 11559 return (status);
11554 11560 }
11555 11561
11556 11562 static int
11557 11563 mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
11558 11564 uint32_t *return_code)
11559 11565 {
11560 11566 mptsas_fw_diagnostic_buffer_t *pBuffer;
11561 11567 uint8_t i;
11562 11568 uint32_t unique_id;
11563 11569
11564 11570 ASSERT(mutex_owned(&mpt->m_mutex));
11565 11571
11566 11572 unique_id = diag_query->UniqueId;
11567 11573
11568 11574 /*
11569 11575 * If ID is valid, query on ID.
11570 11576 * If ID is invalid, query on buffer type.
11571 11577 */
11572 11578 if (unique_id == MPTSAS_FW_DIAG_INVALID_UID) {
11573 11579 i = diag_query->BufferType;
11574 11580 if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
11575 11581 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11576 11582 return (DDI_FAILURE);
11577 11583 }
11578 11584 } else {
11579 11585 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11580 11586 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11581 11587 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11582 11588 return (DDI_FAILURE);
11583 11589 }
11584 11590 }
11585 11591
11586 11592 /*
11587 11593 * Fill query structure with the diag buffer info.
11588 11594 */
11589 11595 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11590 11596 diag_query->BufferType = pBuffer->buffer_type;
11591 11597 diag_query->ExtendedType = pBuffer->extended_type;
11592 11598 if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
11593 11599 for (i = 0; i < (sizeof (diag_query->ProductSpecific) / 4);
11594 11600 i++) {
11595 11601 diag_query->ProductSpecific[i] =
11596 11602 pBuffer->product_specific[i];
11597 11603 }
11598 11604 }
11599 11605 diag_query->TotalBufferSize = pBuffer->buffer_data.size;
11600 11606 diag_query->DriverAddedBufferSize = 0;
11601 11607 diag_query->UniqueId = pBuffer->unique_id;
11602 11608 diag_query->ApplicationFlags = 0;
11603 11609 diag_query->DiagnosticFlags = 0;
11604 11610
11605 11611 /*
11606 11612 * Set/Clear application flags
11607 11613 */
11608 11614 if (pBuffer->immediate) {
11609 11615 diag_query->ApplicationFlags &= ~MPTSAS_FW_DIAG_FLAG_APP_OWNED;
11610 11616 } else {
11611 11617 diag_query->ApplicationFlags |= MPTSAS_FW_DIAG_FLAG_APP_OWNED;
11612 11618 }
11613 11619 if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
11614 11620 diag_query->ApplicationFlags |=
11615 11621 MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
11616 11622 } else {
11617 11623 diag_query->ApplicationFlags &=
11618 11624 ~MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
11619 11625 }
11620 11626 if (pBuffer->owned_by_firmware) {
11621 11627 diag_query->ApplicationFlags |=
11622 11628 MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
11623 11629 } else {
11624 11630 diag_query->ApplicationFlags &=
11625 11631 ~MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
11626 11632 }
11627 11633
11628 11634 return (DDI_SUCCESS);
11629 11635 }
11630 11636
11631 11637 static int
11632 11638 mptsas_diag_read_buffer(mptsas_t *mpt,
11633 11639 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
11634 11640 uint32_t *return_code, int ioctl_mode)
11635 11641 {
11636 11642 mptsas_fw_diagnostic_buffer_t *pBuffer;
11637 11643 uint8_t i, *pData;
11638 11644 uint32_t unique_id, byte;
11639 11645 int status;
11640 11646
11641 11647 ASSERT(mutex_owned(&mpt->m_mutex));
11642 11648
11643 11649 unique_id = diag_read_buffer->UniqueId;
11644 11650
11645 11651 /*
11646 11652 * Get the current buffer and look up the unique ID. The unique ID
11647 11653 * should be there.
11648 11654 */
11649 11655 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11650 11656 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11651 11657 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11652 11658 return (DDI_FAILURE);
11653 11659 }
11654 11660
11655 11661 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11656 11662
11657 11663 /*
11658 11664 * Make sure requested read is within limits
11659 11665 */
11660 11666 if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
11661 11667 pBuffer->buffer_data.size) {
11662 11668 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11663 11669 return (DDI_FAILURE);
11664 11670 }
11665 11671
11666 11672 /*
11667 11673 * Copy the requested data from DMA to the diag_read_buffer. The DMA
11668 11674 * buffer that was allocated is one contiguous buffer.
11669 11675 */
11670 11676 pData = (uint8_t *)(pBuffer->buffer_data.memp +
11671 11677 diag_read_buffer->StartingOffset);
11672 11678 (void) ddi_dma_sync(pBuffer->buffer_data.handle, 0, 0,
11673 11679 DDI_DMA_SYNC_FORCPU);
11674 11680 for (byte = 0; byte < diag_read_buffer->BytesToRead; byte++) {
11675 11681 if (ddi_copyout(pData + byte, ioctl_buf + byte, 1, ioctl_mode)
11676 11682 != 0) {
11677 11683 return (DDI_FAILURE);
11678 11684 }
11679 11685 }
11680 11686 diag_read_buffer->Status = 0;
11681 11687
11682 11688 /*
11683 11689 * Set or clear the Force Release flag.
11684 11690 */
11685 11691 if (pBuffer->force_release) {
11686 11692 diag_read_buffer->Flags |= MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
11687 11693 } else {
11688 11694 diag_read_buffer->Flags &= ~MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
11689 11695 }
11690 11696
11691 11697 /*
11692 11698 * If buffer is to be reregistered, make sure it's not already owned by
11693 11699 * firmware first.
11694 11700 */
11695 11701 status = DDI_SUCCESS;
11696 11702 if (!pBuffer->owned_by_firmware) {
11697 11703 if (diag_read_buffer->Flags & MPTSAS_FW_DIAG_FLAG_REREGISTER) {
11698 11704 status = mptsas_post_fw_diag_buffer(mpt, pBuffer,
11699 11705 return_code);
11700 11706 }
11701 11707 }
11702 11708
11703 11709 return (status);
11704 11710 }
11705 11711
11706 11712 static int
11707 11713 mptsas_diag_release(mptsas_t *mpt, mptsas_fw_diag_release_t *diag_release,
11708 11714 uint32_t *return_code)
11709 11715 {
11710 11716 mptsas_fw_diagnostic_buffer_t *pBuffer;
11711 11717 uint8_t i;
11712 11718 uint32_t unique_id;
11713 11719 int status;
11714 11720
11715 11721 ASSERT(mutex_owned(&mpt->m_mutex));
11716 11722
11717 11723 unique_id = diag_release->UniqueId;
11718 11724
11719 11725 /*
11720 11726 * Get the current buffer and look up the unique ID. The unique ID
11721 11727 * should be there.
11722 11728 */
11723 11729 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11724 11730 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11725 11731 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11726 11732 return (DDI_FAILURE);
11727 11733 }
11728 11734
11729 11735 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11730 11736
11731 11737 /*
11732 11738 * If buffer is not owned by firmware, it's already been released.
11733 11739 */
11734 11740 if (!pBuffer->owned_by_firmware) {
11735 11741 *return_code = MPTSAS_FW_DIAG_ERROR_ALREADY_RELEASED;
11736 11742 return (DDI_FAILURE);
11737 11743 }
11738 11744
11739 11745 /*
11740 11746 * Release the buffer.
11741 11747 */
11742 11748 status = mptsas_release_fw_diag_buffer(mpt, pBuffer, return_code,
11743 11749 MPTSAS_FW_DIAG_TYPE_RELEASE);
11744 11750 return (status);
11745 11751 }
11746 11752
11747 11753 static int
11748 11754 mptsas_do_diag_action(mptsas_t *mpt, uint32_t action, uint8_t *diag_action,
11749 11755 uint32_t length, uint32_t *return_code, int ioctl_mode)
11750 11756 {
11751 11757 mptsas_fw_diag_register_t diag_register;
11752 11758 mptsas_fw_diag_unregister_t diag_unregister;
11753 11759 mptsas_fw_diag_query_t diag_query;
11754 11760 mptsas_diag_read_buffer_t diag_read_buffer;
11755 11761 mptsas_fw_diag_release_t diag_release;
11756 11762 int status = DDI_SUCCESS;
11757 11763 uint32_t original_return_code, read_buf_len;
11758 11764
11759 11765 ASSERT(mutex_owned(&mpt->m_mutex));
11760 11766
11761 11767 original_return_code = *return_code;
11762 11768 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11763 11769
11764 11770 switch (action) {
11765 11771 case MPTSAS_FW_DIAG_TYPE_REGISTER:
11766 11772 if (!length) {
11767 11773 *return_code =
11768 11774 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11769 11775 status = DDI_FAILURE;
11770 11776 break;
11771 11777 }
11772 11778 if (ddi_copyin(diag_action, &diag_register,
11773 11779 sizeof (diag_register), ioctl_mode) != 0) {
11774 11780 return (DDI_FAILURE);
11775 11781 }
11776 11782 status = mptsas_diag_register(mpt, &diag_register,
11777 11783 return_code);
11778 11784 break;
11779 11785
11780 11786 case MPTSAS_FW_DIAG_TYPE_UNREGISTER:
11781 11787 if (length < sizeof (diag_unregister)) {
11782 11788 *return_code =
11783 11789 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11784 11790 status = DDI_FAILURE;
11785 11791 break;
11786 11792 }
11787 11793 if (ddi_copyin(diag_action, &diag_unregister,
11788 11794 sizeof (diag_unregister), ioctl_mode) != 0) {
11789 11795 return (DDI_FAILURE);
11790 11796 }
11791 11797 status = mptsas_diag_unregister(mpt, &diag_unregister,
11792 11798 return_code);
11793 11799 break;
11794 11800
11795 11801 case MPTSAS_FW_DIAG_TYPE_QUERY:
11796 11802 if (length < sizeof (diag_query)) {
11797 11803 *return_code =
11798 11804 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11799 11805 status = DDI_FAILURE;
11800 11806 break;
11801 11807 }
11802 11808 if (ddi_copyin(diag_action, &diag_query,
11803 11809 sizeof (diag_query), ioctl_mode) != 0) {
11804 11810 return (DDI_FAILURE);
11805 11811 }
11806 11812 status = mptsas_diag_query(mpt, &diag_query,
11807 11813 return_code);
11808 11814 if (status == DDI_SUCCESS) {
11809 11815 if (ddi_copyout(&diag_query, diag_action,
11810 11816 sizeof (diag_query), ioctl_mode) != 0) {
11811 11817 return (DDI_FAILURE);
11812 11818 }
11813 11819 }
11814 11820 break;
11815 11821
11816 11822 case MPTSAS_FW_DIAG_TYPE_READ_BUFFER:
11817 11823 if (ddi_copyin(diag_action, &diag_read_buffer,
11818 11824 sizeof (diag_read_buffer) - 4, ioctl_mode) != 0) {
11819 11825 return (DDI_FAILURE);
11820 11826 }
11821 11827 read_buf_len = sizeof (diag_read_buffer) -
11822 11828 sizeof (diag_read_buffer.DataBuffer) +
11823 11829 diag_read_buffer.BytesToRead;
11824 11830 if (length < read_buf_len) {
11825 11831 *return_code =
11826 11832 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11827 11833 status = DDI_FAILURE;
11828 11834 break;
11829 11835 }
11830 11836 status = mptsas_diag_read_buffer(mpt,
11831 11837 &diag_read_buffer, diag_action +
11832 11838 sizeof (diag_read_buffer) - 4, return_code,
11833 11839 ioctl_mode);
11834 11840 if (status == DDI_SUCCESS) {
11835 11841 if (ddi_copyout(&diag_read_buffer, diag_action,
11836 11842 sizeof (diag_read_buffer) - 4, ioctl_mode)
11837 11843 != 0) {
11838 11844 return (DDI_FAILURE);
11839 11845 }
11840 11846 }
11841 11847 break;
11842 11848
11843 11849 case MPTSAS_FW_DIAG_TYPE_RELEASE:
11844 11850 if (length < sizeof (diag_release)) {
11845 11851 *return_code =
11846 11852 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11847 11853 status = DDI_FAILURE;
11848 11854 break;
11849 11855 }
11850 11856 if (ddi_copyin(diag_action, &diag_release,
11851 11857 sizeof (diag_release), ioctl_mode) != 0) {
11852 11858 return (DDI_FAILURE);
11853 11859 }
11854 11860 status = mptsas_diag_release(mpt, &diag_release,
11855 11861 return_code);
11856 11862 break;
11857 11863
11858 11864 default:
11859 11865 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11860 11866 status = DDI_FAILURE;
11861 11867 break;
11862 11868 }
11863 11869
11864 11870 if ((status == DDI_FAILURE) &&
11865 11871 (original_return_code == MPTSAS_FW_DIAG_NEW) &&
11866 11872 (*return_code != MPTSAS_FW_DIAG_ERROR_SUCCESS)) {
11867 11873 status = DDI_SUCCESS;
11868 11874 }
11869 11875
11870 11876 return (status);
11871 11877 }
11872 11878
11873 11879 static int
11874 11880 mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *user_data, int mode)
11875 11881 {
11876 11882 int status;
11877 11883 mptsas_diag_action_t driver_data;
11878 11884
11879 11885 ASSERT(mutex_owned(&mpt->m_mutex));
11880 11886
11881 11887 /*
11882 11888 * Copy the user data to a driver data buffer.
11883 11889 */
11884 11890 if (ddi_copyin(user_data, &driver_data, sizeof (mptsas_diag_action_t),
11885 11891 mode) == 0) {
11886 11892 /*
11887 11893 * Send diag action request if Action is valid
11888 11894 */
11889 11895 if (driver_data.Action == MPTSAS_FW_DIAG_TYPE_REGISTER ||
11890 11896 driver_data.Action == MPTSAS_FW_DIAG_TYPE_UNREGISTER ||
11891 11897 driver_data.Action == MPTSAS_FW_DIAG_TYPE_QUERY ||
11892 11898 driver_data.Action == MPTSAS_FW_DIAG_TYPE_READ_BUFFER ||
11893 11899 driver_data.Action == MPTSAS_FW_DIAG_TYPE_RELEASE) {
11894 11900 status = mptsas_do_diag_action(mpt, driver_data.Action,
11895 11901 (void *)(uintptr_t)driver_data.PtrDiagAction,
11896 11902 driver_data.Length, &driver_data.ReturnCode,
11897 11903 mode);
11898 11904 if (status == DDI_SUCCESS) {
11899 11905 if (ddi_copyout(&driver_data.ReturnCode,
11900 11906 &user_data->ReturnCode,
11901 11907 sizeof (user_data->ReturnCode), mode)
11902 11908 != 0) {
11903 11909 status = EFAULT;
11904 11910 } else {
11905 11911 status = 0;
11906 11912 }
11907 11913 } else {
11908 11914 status = EIO;
11909 11915 }
11910 11916 } else {
11911 11917 status = EINVAL;
11912 11918 }
11913 11919 } else {
11914 11920 status = EFAULT;
11915 11921 }
11916 11922
11917 11923 return (status);
11918 11924 }
11919 11925
11920 11926 /*
11921 11927 * This routine handles the "event query" ioctl.
11922 11928 */
11923 11929 static int
11924 11930 mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, int mode,
11925 11931 int *rval)
11926 11932 {
11927 11933 int status;
11928 11934 mptsas_event_query_t driverdata;
11929 11935 uint8_t i;
11930 11936
11931 11937 driverdata.Entries = MPTSAS_EVENT_QUEUE_SIZE;
11932 11938
11933 11939 mutex_enter(&mpt->m_mutex);
11934 11940 for (i = 0; i < 4; i++) {
11935 11941 driverdata.Types[i] = mpt->m_event_mask[i];
11936 11942 }
11937 11943 mutex_exit(&mpt->m_mutex);
11938 11944
11939 11945 if (ddi_copyout(&driverdata, data, sizeof (driverdata), mode) != 0) {
11940 11946 status = EFAULT;
11941 11947 } else {
11942 11948 *rval = MPTIOCTL_STATUS_GOOD;
11943 11949 status = 0;
11944 11950 }
11945 11951
11946 11952 return (status);
11947 11953 }
11948 11954
11949 11955 /*
11950 11956 * This routine handles the "event enable" ioctl.
11951 11957 */
11952 11958 static int
11953 11959 mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, int mode,
11954 11960 int *rval)
11955 11961 {
11956 11962 int status;
11957 11963 mptsas_event_enable_t driverdata;
11958 11964 uint8_t i;
11959 11965
11960 11966 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11961 11967 mutex_enter(&mpt->m_mutex);
11962 11968 for (i = 0; i < 4; i++) {
11963 11969 mpt->m_event_mask[i] = driverdata.Types[i];
11964 11970 }
11965 11971 mutex_exit(&mpt->m_mutex);
11966 11972
11967 11973 *rval = MPTIOCTL_STATUS_GOOD;
11968 11974 status = 0;
11969 11975 } else {
11970 11976 status = EFAULT;
11971 11977 }
11972 11978 return (status);
11973 11979 }
11974 11980
11975 11981 /*
11976 11982 * This routine handles the "event report" ioctl.
11977 11983 */
11978 11984 static int
11979 11985 mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, int mode,
11980 11986 int *rval)
11981 11987 {
11982 11988 int status;
11983 11989 mptsas_event_report_t driverdata;
11984 11990
11985 11991 mutex_enter(&mpt->m_mutex);
11986 11992
11987 11993 if (ddi_copyin(&data->Size, &driverdata.Size, sizeof (driverdata.Size),
11988 11994 mode) == 0) {
11989 11995 if (driverdata.Size >= sizeof (mpt->m_events)) {
11990 11996 if (ddi_copyout(mpt->m_events, data->Events,
11991 11997 sizeof (mpt->m_events), mode) != 0) {
11992 11998 status = EFAULT;
11993 11999 } else {
11994 12000 if (driverdata.Size > sizeof (mpt->m_events)) {
11995 12001 driverdata.Size =
11996 12002 sizeof (mpt->m_events);
11997 12003 if (ddi_copyout(&driverdata.Size,
11998 12004 &data->Size,
11999 12005 sizeof (driverdata.Size),
12000 12006 mode) != 0) {
12001 12007 status = EFAULT;
12002 12008 } else {
12003 12009 *rval = MPTIOCTL_STATUS_GOOD;
12004 12010 status = 0;
12005 12011 }
12006 12012 } else {
12007 12013 *rval = MPTIOCTL_STATUS_GOOD;
12008 12014 status = 0;
12009 12015 }
12010 12016 }
12011 12017 } else {
12012 12018 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
12013 12019 status = 0;
12014 12020 }
12015 12021 } else {
12016 12022 status = EFAULT;
12017 12023 }
12018 12024
12019 12025 mutex_exit(&mpt->m_mutex);
12020 12026 return (status);
12021 12027 }
12022 12028
12023 12029 static void
12024 12030 mptsas_lookup_pci_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
12025 12031 {
12026 12032 int *reg_data;
12027 12033 uint_t reglen;
12028 12034
12029 12035 /*
12030 12036 * Lookup the 'reg' property and extract the other data
12031 12037 */
12032 12038 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
12033 12039 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
12034 12040 DDI_PROP_SUCCESS) {
12035 12041 /*
12036 12042 * Extract the PCI data from the 'reg' property first DWORD.
12037 12043 * The entry looks like the following:
12038 12044 * First DWORD:
12039 12045 * Bits 0 - 7 8-bit Register number
12040 12046 * Bits 8 - 10 3-bit Function number
12041 12047 * Bits 11 - 15 5-bit Device number
12042 12048 * Bits 16 - 23 8-bit Bus number
12043 12049 * Bits 24 - 25 2-bit Address Space type identifier
12044 12050 *
12045 12051 */
12046 12052 adapter_data->PciInformation.u.bits.BusNumber =
12047 12053 (reg_data[0] & 0x00FF0000) >> 16;
12048 12054 adapter_data->PciInformation.u.bits.DeviceNumber =
12049 12055 (reg_data[0] & 0x0000F800) >> 11;
12050 12056 adapter_data->PciInformation.u.bits.FunctionNumber =
12051 12057 (reg_data[0] & 0x00000700) >> 8;
12052 12058 ddi_prop_free((void *)reg_data);
12053 12059 } else {
12054 12060 /*
12055 12061 * If we can't determine the PCI data then we fill in FF's for
12056 12062 * the data to indicate this.
12057 12063 */
12058 12064 adapter_data->PCIDeviceHwId = 0xFFFFFFFF;
12059 12065 adapter_data->MpiPortNumber = 0xFFFFFFFF;
12060 12066 adapter_data->PciInformation.u.AsDWORD = 0xFFFFFFFF;
12061 12067 }
12062 12068
12063 12069 /*
12064 12070 * Saved in the mpt->m_fwversion
12065 12071 */
12066 12072 adapter_data->MpiFirmwareVersion = mpt->m_fwversion;
12067 12073 }
12068 12074
12069 12075 static void
12070 12076 mptsas_read_adapter_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
12071 12077 {
12072 12078 char *driver_verstr = MPTSAS_MOD_STRING;
12073 12079
12074 12080 mptsas_lookup_pci_data(mpt, adapter_data);
12075 12081 adapter_data->AdapterType = mpt->m_MPI25 ?
12076 12082 MPTIOCTL_ADAPTER_TYPE_SAS3 :
12077 12083 MPTIOCTL_ADAPTER_TYPE_SAS2;
12078 12084 adapter_data->PCIDeviceHwId = (uint32_t)mpt->m_devid;
12079 12085 adapter_data->PCIDeviceHwRev = (uint32_t)mpt->m_revid;
12080 12086 adapter_data->SubSystemId = (uint32_t)mpt->m_ssid;
12081 12087 adapter_data->SubsystemVendorId = (uint32_t)mpt->m_svid;
12082 12088 (void) strcpy((char *)&adapter_data->DriverVersion[0], driver_verstr);
12083 12089 adapter_data->BiosVersion = 0;
12084 12090 (void) mptsas_get_bios_page3(mpt, &adapter_data->BiosVersion);
12085 12091 }
12086 12092
12087 12093 static void
12088 12094 mptsas_read_pci_info(mptsas_t *mpt, mptsas_pci_info_t *pci_info)
12089 12095 {
12090 12096 int *reg_data, i;
12091 12097 uint_t reglen;
12092 12098
12093 12099 /*
12094 12100 * Lookup the 'reg' property and extract the other data
12095 12101 */
12096 12102 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
12097 12103 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
12098 12104 DDI_PROP_SUCCESS) {
12099 12105 /*
12100 12106 * Extract the PCI data from the 'reg' property first DWORD.
12101 12107 * The entry looks like the following:
12102 12108 * First DWORD:
12103 12109 * Bits 8 - 10 3-bit Function number
12104 12110 * Bits 11 - 15 5-bit Device number
12105 12111 * Bits 16 - 23 8-bit Bus number
12106 12112 */
12107 12113 pci_info->BusNumber = (reg_data[0] & 0x00FF0000) >> 16;
12108 12114 pci_info->DeviceNumber = (reg_data[0] & 0x0000F800) >> 11;
12109 12115 pci_info->FunctionNumber = (reg_data[0] & 0x00000700) >> 8;
12110 12116 ddi_prop_free((void *)reg_data);
12111 12117 } else {
12112 12118 /*
12113 12119 * If we can't determine the PCI info then we fill in FF's for
12114 12120 * the data to indicate this.
12115 12121 */
12116 12122 pci_info->BusNumber = 0xFFFFFFFF;
12117 12123 pci_info->DeviceNumber = 0xFF;
12118 12124 pci_info->FunctionNumber = 0xFF;
12119 12125 }
12120 12126
12121 12127 /*
12122 12128 * Now get the interrupt vector and the pci header. The vector can
12123 12129 * only be 0 right now. The header is the first 256 bytes of config
12124 12130 * space.
12125 12131 */
12126 12132 pci_info->InterruptVector = 0;
12127 12133 for (i = 0; i < sizeof (pci_info->PciHeader); i++) {
12128 12134 pci_info->PciHeader[i] = pci_config_get8(mpt->m_config_handle,
12129 12135 i);
12130 12136 }
12131 12137 }
12132 12138
12133 12139 static int
12134 12140 mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data, int mode)
12135 12141 {
12136 12142 int status = 0;
12137 12143 mptsas_reg_access_t driverdata;
12138 12144
12139 12145 mutex_enter(&mpt->m_mutex);
12140 12146 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
12141 12147 switch (driverdata.Command) {
12142 12148 /*
12143 12149 * IO access is not supported.
12144 12150 */
12145 12151 case REG_IO_READ:
12146 12152 case REG_IO_WRITE:
12147 12153 mptsas_log(mpt, CE_WARN, "IO access is not "
12148 12154 "supported. Use memory access.");
12149 12155 status = EINVAL;
12150 12156 break;
12151 12157
12152 12158 case REG_MEM_READ:
12153 12159 driverdata.RegData = ddi_get32(mpt->m_datap,
12154 12160 (uint32_t *)(void *)mpt->m_reg +
12155 12161 driverdata.RegOffset);
12156 12162 if (ddi_copyout(&driverdata.RegData,
12157 12163 &data->RegData,
12158 12164 sizeof (driverdata.RegData), mode) != 0) {
12159 12165 mptsas_log(mpt, CE_WARN, "Register "
12160 12166 "Read Failed");
12161 12167 status = EFAULT;
12162 12168 }
12163 12169 break;
12164 12170
12165 12171 case REG_MEM_WRITE:
12166 12172 ddi_put32(mpt->m_datap,
12167 12173 (uint32_t *)(void *)mpt->m_reg +
12168 12174 driverdata.RegOffset,
12169 12175 driverdata.RegData);
12170 12176 break;
12171 12177
12172 12178 default:
12173 12179 status = EINVAL;
12174 12180 break;
12175 12181 }
12176 12182 } else {
12177 12183 status = EFAULT;
12178 12184 }
12179 12185
12180 12186 mutex_exit(&mpt->m_mutex);
12181 12187 return (status);
12182 12188 }
12183 12189
12184 12190 static int
12185 12191 led_control(mptsas_t *mpt, intptr_t data, int mode)
12186 12192 {
12187 12193 int ret = 0;
12188 12194 mptsas_led_control_t lc;
12189 12195 mptsas_target_t *ptgt;
12190 12196
12191 12197 if (ddi_copyin((void *)data, &lc, sizeof (lc), mode) != 0) {
12192 12198 return (EFAULT);
12193 12199 }
12194 12200
12195 12201 if ((lc.Command != MPTSAS_LEDCTL_FLAG_SET &&
12196 12202 lc.Command != MPTSAS_LEDCTL_FLAG_GET) ||
12197 12203 lc.Led < MPTSAS_LEDCTL_LED_MIN ||
12198 12204 lc.Led > MPTSAS_LEDCTL_LED_MAX ||
12199 12205 (lc.Command == MPTSAS_LEDCTL_FLAG_SET && lc.LedStatus != 0 &&
12200 12206 lc.LedStatus != 1)) {
12201 12207 return (EINVAL);
12202 12208 }
12203 12209
12204 12210 if ((lc.Command == MPTSAS_LEDCTL_FLAG_SET && (mode & FWRITE) == 0) ||
12205 12211 (lc.Command == MPTSAS_LEDCTL_FLAG_GET && (mode & FREAD) == 0))
12206 12212 return (EACCES);
12207 12213
12208 12214 /* Locate the target we're interrogating... */
12209 12215 mutex_enter(&mpt->m_mutex);
12210 12216 ptgt = refhash_linear_search(mpt->m_targets,
12211 12217 mptsas_target_eval_slot, &lc);
12212 12218 if (ptgt == NULL) {
12213 12219 /* We could not find a target for that enclosure/slot. */
12214 12220 mutex_exit(&mpt->m_mutex);
12215 12221 return (ENOENT);
12216 12222 }
12217 12223
12218 12224 if (lc.Command == MPTSAS_LEDCTL_FLAG_SET) {
12219 12225 /* Update our internal LED state. */
12220 12226 ptgt->m_led_status &= ~(1 << (lc.Led - 1));
12221 12227 ptgt->m_led_status |= lc.LedStatus << (lc.Led - 1);
12222 12228
12223 12229 /* Flush it to the controller. */
12224 12230 ret = mptsas_flush_led_status(mpt, ptgt);
12225 12231 mutex_exit(&mpt->m_mutex);
12226 12232 return (ret);
12227 12233 }
12228 12234
12229 12235 /* Return our internal LED state. */
12230 12236 lc.LedStatus = (ptgt->m_led_status >> (lc.Led - 1)) & 1;
12231 12237 mutex_exit(&mpt->m_mutex);
12232 12238
12233 12239 if (ddi_copyout(&lc, (void *)data, sizeof (lc), mode) != 0) {
12234 12240 return (EFAULT);
12235 12241 }
12236 12242
12237 12243 return (0);
12238 12244 }
12239 12245
12240 12246 static int
12241 12247 get_disk_info(mptsas_t *mpt, intptr_t data, int mode)
12242 12248 {
12243 12249 uint16_t i = 0;
12244 12250 uint16_t count = 0;
12245 12251 int ret = 0;
12246 12252 mptsas_target_t *ptgt;
12247 12253 mptsas_disk_info_t *di;
12248 12254 STRUCT_DECL(mptsas_get_disk_info, gdi);
12249 12255
12250 12256 if ((mode & FREAD) == 0)
12251 12257 return (EACCES);
12252 12258
12253 12259 STRUCT_INIT(gdi, get_udatamodel());
12254 12260
12255 12261 if (ddi_copyin((void *)data, STRUCT_BUF(gdi), STRUCT_SIZE(gdi),
12256 12262 mode) != 0) {
12257 12263 return (EFAULT);
12258 12264 }
12259 12265
12260 12266 /* Find out how many targets there are. */
12261 12267 mutex_enter(&mpt->m_mutex);
12262 12268 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12263 12269 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12264 12270 count++;
12265 12271 }
12266 12272 mutex_exit(&mpt->m_mutex);
12267 12273
12268 12274 /*
12269 12275 * If we haven't been asked to copy out information on each target,
12270 12276 * then just return the count.
12271 12277 */
12272 12278 STRUCT_FSET(gdi, DiskCount, count);
12273 12279 if (STRUCT_FGETP(gdi, PtrDiskInfoArray) == NULL)
12274 12280 goto copy_out;
12275 12281
12276 12282 /*
12277 12283 * If we haven't been given a large enough buffer to copy out into,
12278 12284 * let the caller know.
12279 12285 */
12280 12286 if (STRUCT_FGET(gdi, DiskInfoArraySize) <
12281 12287 count * sizeof (mptsas_disk_info_t)) {
12282 12288 ret = ENOSPC;
12283 12289 goto copy_out;
12284 12290 }
12285 12291
12286 12292 di = kmem_zalloc(count * sizeof (mptsas_disk_info_t), KM_SLEEP);
12287 12293
12288 12294 mutex_enter(&mpt->m_mutex);
12289 12295 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12290 12296 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12291 12297 if (i >= count) {
12292 12298 /*
12293 12299 * The number of targets changed while we weren't
12294 12300 * looking, so give up.
12295 12301 */
12296 12302 refhash_rele(mpt->m_targets, ptgt);
12297 12303 mutex_exit(&mpt->m_mutex);
12298 12304 kmem_free(di, count * sizeof (mptsas_disk_info_t));
12299 12305 return (EAGAIN);
12300 12306 }
12301 12307 di[i].Instance = mpt->m_instance;
12302 12308 di[i].Enclosure = ptgt->m_enclosure;
12303 12309 di[i].Slot = ptgt->m_slot_num;
12304 12310 di[i].SasAddress = ptgt->m_addr.mta_wwn;
12305 12311 i++;
12306 12312 }
12307 12313 mutex_exit(&mpt->m_mutex);
12308 12314 STRUCT_FSET(gdi, DiskCount, i);
12309 12315
12310 12316 /* Copy out the disk information to the caller. */
12311 12317 if (ddi_copyout((void *)di, STRUCT_FGETP(gdi, PtrDiskInfoArray),
12312 12318 i * sizeof (mptsas_disk_info_t), mode) != 0) {
12313 12319 ret = EFAULT;
12314 12320 }
12315 12321
12316 12322 kmem_free(di, count * sizeof (mptsas_disk_info_t));
12317 12323
12318 12324 copy_out:
12319 12325 if (ddi_copyout(STRUCT_BUF(gdi), (void *)data, STRUCT_SIZE(gdi),
12320 12326 mode) != 0) {
12321 12327 ret = EFAULT;
12322 12328 }
12323 12329
12324 12330 return (ret);
12325 12331 }
12326 12332
12327 12333 static int
12328 12334 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
12329 12335 int *rval)
12330 12336 {
12331 12337 int status = 0;
12332 12338 mptsas_t *mpt;
12333 12339 mptsas_update_flash_t flashdata;
12334 12340 mptsas_pass_thru_t passthru_data;
12335 12341 mptsas_adapter_data_t adapter_data;
12336 12342 mptsas_pci_info_t pci_info;
12337 12343 int copylen;
12338 12344
12339 12345 int iport_flag = 0;
12340 12346 dev_info_t *dip = NULL;
12341 12347 mptsas_phymask_t phymask = 0;
12342 12348 struct devctl_iocdata *dcp = NULL;
12343 12349 char *addr = NULL;
12344 12350 mptsas_target_t *ptgt = NULL;
12345 12351
12346 12352 *rval = MPTIOCTL_STATUS_GOOD;
12347 12353 if (secpolicy_sys_config(credp, B_FALSE) != 0) {
12348 12354 return (EPERM);
12349 12355 }
12350 12356
12351 12357 mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
12352 12358 if (mpt == NULL) {
12353 12359 /*
12354 12360 * Called from iport node, get the states
12355 12361 */
12356 12362 iport_flag = 1;
12357 12363 dip = mptsas_get_dip_from_dev(dev, &phymask);
12358 12364 if (dip == NULL) {
12359 12365 return (ENXIO);
12360 12366 }
12361 12367 mpt = DIP2MPT(dip);
12362 12368 }
12363 12369 /* Make sure power level is D0 before accessing registers */
12364 12370 mutex_enter(&mpt->m_mutex);
12365 12371 if (mpt->m_options & MPTSAS_OPT_PM) {
12366 12372 (void) pm_busy_component(mpt->m_dip, 0);
12367 12373 if (mpt->m_power_level != PM_LEVEL_D0) {
12368 12374 mutex_exit(&mpt->m_mutex);
12369 12375 if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
12370 12376 DDI_SUCCESS) {
12371 12377 mptsas_log(mpt, CE_WARN,
12372 12378 "mptsas%d: mptsas_ioctl: Raise power "
12373 12379 "request failed.", mpt->m_instance);
12374 12380 (void) pm_idle_component(mpt->m_dip, 0);
12375 12381 return (ENXIO);
12376 12382 }
12377 12383 } else {
12378 12384 mutex_exit(&mpt->m_mutex);
12379 12385 }
12380 12386 } else {
12381 12387 mutex_exit(&mpt->m_mutex);
12382 12388 }
12383 12389
12384 12390 if (iport_flag) {
12385 12391 status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
12386 12392 if (status != 0) {
12387 12393 goto out;
12388 12394 }
12389 12395 /*
12390 12396 * The following code control the OK2RM LED, it doesn't affect
12391 12397 * the ioctl return status.
12392 12398 */
12393 12399 if ((cmd == DEVCTL_DEVICE_ONLINE) ||
12394 12400 (cmd == DEVCTL_DEVICE_OFFLINE)) {
12395 12401 if (ndi_dc_allochdl((void *)data, &dcp) !=
12396 12402 NDI_SUCCESS) {
12397 12403 goto out;
12398 12404 }
12399 12405 addr = ndi_dc_getaddr(dcp);
12400 12406 ptgt = mptsas_addr_to_ptgt(mpt, addr, phymask);
12401 12407 if (ptgt == NULL) {
12402 12408 NDBG14(("mptsas_ioctl led control: tgt %s not "
12403 12409 "found", addr));
12404 12410 ndi_dc_freehdl(dcp);
12405 12411 goto out;
12406 12412 }
12407 12413 mutex_enter(&mpt->m_mutex);
12408 12414 if (cmd == DEVCTL_DEVICE_ONLINE) {
12409 12415 ptgt->m_tgt_unconfigured = 0;
12410 12416 } else if (cmd == DEVCTL_DEVICE_OFFLINE) {
12411 12417 ptgt->m_tgt_unconfigured = 1;
12412 12418 }
12413 12419 if (cmd == DEVCTL_DEVICE_OFFLINE) {
12414 12420 ptgt->m_led_status |=
12415 12421 (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1));
12416 12422 } else {
12417 12423 ptgt->m_led_status &=
12418 12424 ~(1 << (MPTSAS_LEDCTL_LED_OK2RM - 1));
12419 12425 }
12420 12426 (void) mptsas_flush_led_status(mpt, ptgt);
12421 12427 mutex_exit(&mpt->m_mutex);
12422 12428 ndi_dc_freehdl(dcp);
12423 12429 }
12424 12430 goto out;
12425 12431 }
12426 12432 switch (cmd) {
12427 12433 case MPTIOCTL_GET_DISK_INFO:
12428 12434 status = get_disk_info(mpt, data, mode);
12429 12435 break;
12430 12436 case MPTIOCTL_LED_CONTROL:
12431 12437 status = led_control(mpt, data, mode);
12432 12438 break;
12433 12439 case MPTIOCTL_UPDATE_FLASH:
12434 12440 if (ddi_copyin((void *)data, &flashdata,
12435 12441 sizeof (struct mptsas_update_flash), mode)) {
12436 12442 status = EFAULT;
12437 12443 break;
12438 12444 }
12439 12445
12440 12446 mutex_enter(&mpt->m_mutex);
12441 12447 if (mptsas_update_flash(mpt,
12442 12448 (caddr_t)(long)flashdata.PtrBuffer,
12443 12449 flashdata.ImageSize, flashdata.ImageType, mode)) {
12444 12450 status = EFAULT;
12445 12451 }
12446 12452
12447 12453 /*
12448 12454 * Reset the chip to start using the new
12449 12455 * firmware. Reset if failed also.
12450 12456 */
12451 12457 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12452 12458 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
12453 12459 status = EFAULT;
12454 12460 }
12455 12461 mutex_exit(&mpt->m_mutex);
12456 12462 break;
12457 12463 case MPTIOCTL_PASS_THRU:
12458 12464 /*
12459 12465 * The user has requested to pass through a command to
12460 12466 * be executed by the MPT firmware. Call our routine
12461 12467 * which does this. Only allow one passthru IOCTL at
12462 12468 * one time. Other threads will block on
12463 12469 * m_passthru_mutex, which is of adaptive variant.
12464 12470 */
12465 12471 if (ddi_copyin((void *)data, &passthru_data,
12466 12472 sizeof (mptsas_pass_thru_t), mode)) {
12467 12473 status = EFAULT;
12468 12474 break;
12469 12475 }
12470 12476 mutex_enter(&mpt->m_passthru_mutex);
12471 12477 mutex_enter(&mpt->m_mutex);
12472 12478 status = mptsas_pass_thru(mpt, &passthru_data, mode);
12473 12479 mutex_exit(&mpt->m_mutex);
12474 12480 mutex_exit(&mpt->m_passthru_mutex);
12475 12481
12476 12482 break;
12477 12483 case MPTIOCTL_GET_ADAPTER_DATA:
12478 12484 /*
12479 12485 * The user has requested to read adapter data. Call
12480 12486 * our routine which does this.
12481 12487 */
12482 12488 bzero(&adapter_data, sizeof (mptsas_adapter_data_t));
12483 12489 if (ddi_copyin((void *)data, (void *)&adapter_data,
12484 12490 sizeof (mptsas_adapter_data_t), mode)) {
12485 12491 status = EFAULT;
12486 12492 break;
12487 12493 }
12488 12494 if (adapter_data.StructureLength >=
12489 12495 sizeof (mptsas_adapter_data_t)) {
12490 12496 adapter_data.StructureLength = (uint32_t)
12491 12497 sizeof (mptsas_adapter_data_t);
12492 12498 copylen = sizeof (mptsas_adapter_data_t);
12493 12499 mutex_enter(&mpt->m_mutex);
12494 12500 mptsas_read_adapter_data(mpt, &adapter_data);
12495 12501 mutex_exit(&mpt->m_mutex);
12496 12502 } else {
12497 12503 adapter_data.StructureLength = (uint32_t)
12498 12504 sizeof (mptsas_adapter_data_t);
12499 12505 copylen = sizeof (adapter_data.StructureLength);
12500 12506 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
12501 12507 }
12502 12508 if (ddi_copyout((void *)(&adapter_data), (void *)data,
12503 12509 copylen, mode) != 0) {
12504 12510 status = EFAULT;
12505 12511 }
12506 12512 break;
12507 12513 case MPTIOCTL_GET_PCI_INFO:
12508 12514 /*
12509 12515 * The user has requested to read pci info. Call
12510 12516 * our routine which does this.
12511 12517 */
12512 12518 bzero(&pci_info, sizeof (mptsas_pci_info_t));
12513 12519 mutex_enter(&mpt->m_mutex);
12514 12520 mptsas_read_pci_info(mpt, &pci_info);
12515 12521 mutex_exit(&mpt->m_mutex);
12516 12522 if (ddi_copyout((void *)(&pci_info), (void *)data,
12517 12523 sizeof (mptsas_pci_info_t), mode) != 0) {
12518 12524 status = EFAULT;
12519 12525 }
12520 12526 break;
12521 12527 case MPTIOCTL_RESET_ADAPTER:
12522 12528 mutex_enter(&mpt->m_mutex);
12523 12529 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12524 12530 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
12525 12531 mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
12526 12532 "failed");
12527 12533 status = EFAULT;
12528 12534 }
12529 12535 mutex_exit(&mpt->m_mutex);
12530 12536 break;
12531 12537 case MPTIOCTL_DIAG_ACTION:
12532 12538 /*
12533 12539 * The user has done a diag buffer action. Call our
12534 12540 * routine which does this. Only allow one diag action
12535 12541 * at one time.
12536 12542 */
12537 12543 mutex_enter(&mpt->m_mutex);
12538 12544 if (mpt->m_diag_action_in_progress) {
12539 12545 mutex_exit(&mpt->m_mutex);
12540 12546 return (EBUSY);
12541 12547 }
12542 12548 mpt->m_diag_action_in_progress = 1;
12543 12549 status = mptsas_diag_action(mpt,
12544 12550 (mptsas_diag_action_t *)data, mode);
12545 12551 mpt->m_diag_action_in_progress = 0;
12546 12552 mutex_exit(&mpt->m_mutex);
12547 12553 break;
12548 12554 case MPTIOCTL_EVENT_QUERY:
12549 12555 /*
12550 12556 * The user has done an event query. Call our routine
12551 12557 * which does this.
12552 12558 */
12553 12559 status = mptsas_event_query(mpt,
12554 12560 (mptsas_event_query_t *)data, mode, rval);
12555 12561 break;
12556 12562 case MPTIOCTL_EVENT_ENABLE:
12557 12563 /*
12558 12564 * The user has done an event enable. Call our routine
12559 12565 * which does this.
12560 12566 */
12561 12567 status = mptsas_event_enable(mpt,
12562 12568 (mptsas_event_enable_t *)data, mode, rval);
12563 12569 break;
12564 12570 case MPTIOCTL_EVENT_REPORT:
12565 12571 /*
12566 12572 * The user has done an event report. Call our routine
12567 12573 * which does this.
12568 12574 */
12569 12575 status = mptsas_event_report(mpt,
12570 12576 (mptsas_event_report_t *)data, mode, rval);
12571 12577 break;
12572 12578 case MPTIOCTL_REG_ACCESS:
12573 12579 /*
12574 12580 * The user has requested register access. Call our
12575 12581 * routine which does this.
12576 12582 */
12577 12583 status = mptsas_reg_access(mpt,
12578 12584 (mptsas_reg_access_t *)data, mode);
12579 12585 break;
12580 12586 default:
12581 12587 status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
12582 12588 rval);
12583 12589 break;
12584 12590 }
12585 12591
12586 12592 out:
12587 12593 return (status);
12588 12594 }
12589 12595
12590 12596 int
12591 12597 mptsas_restart_ioc(mptsas_t *mpt)
12592 12598 {
12593 12599 int rval = DDI_SUCCESS;
12594 12600 mptsas_target_t *ptgt = NULL;
12595 12601
12596 12602 ASSERT(mutex_owned(&mpt->m_mutex));
12597 12603
12598 12604 /*
12599 12605 * Set a flag telling I/O path that we're processing a reset. This is
12600 12606 * needed because after the reset is complete, the hash table still
12601 12607 * needs to be rebuilt. If I/Os are started before the hash table is
12602 12608 * rebuilt, I/O errors will occur. This flag allows I/Os to be marked
12603 12609 * so that they can be retried.
12604 12610 */
12605 12611 mpt->m_in_reset = TRUE;
12606 12612
12607 12613 /*
12608 12614 * Set all throttles to HOLD
12609 12615 */
12610 12616 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12611 12617 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12612 12618 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
12613 12619 }
12614 12620
12615 12621 /*
12616 12622 * Disable interrupts
12617 12623 */
12618 12624 MPTSAS_DISABLE_INTR(mpt);
12619 12625
12620 12626 /*
12621 12627 * Abort all commands: outstanding commands, commands in waitq and
12622 12628 * tx_waitq.
12623 12629 */
12624 12630 mptsas_flush_hba(mpt);
12625 12631
12626 12632 /*
12627 12633 * Reinitialize the chip.
12628 12634 */
12629 12635 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
12630 12636 rval = DDI_FAILURE;
12631 12637 }
12632 12638
12633 12639 /*
12634 12640 * Enable interrupts again
12635 12641 */
12636 12642 MPTSAS_ENABLE_INTR(mpt);
12637 12643
12638 12644 /*
12639 12645 * If mptsas_init_chip was successful, update the driver data.
12640 12646 */
12641 12647 if (rval == DDI_SUCCESS) {
12642 12648 mptsas_update_driver_data(mpt);
12643 12649 }
12644 12650
12645 12651 /*
12646 12652 * Reset the throttles
12647 12653 */
12648 12654 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12649 12655 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12650 12656 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
12651 12657 }
12652 12658
12653 12659 mptsas_doneq_empty(mpt);
12654 12660 mptsas_restart_hba(mpt);
12655 12661
12656 12662 if (rval != DDI_SUCCESS) {
12657 12663 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
12658 12664 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
12659 12665 }
12660 12666
12661 12667 /*
12662 12668 * Clear the reset flag so that I/Os can continue.
12663 12669 */
12664 12670 mpt->m_in_reset = FALSE;
12665 12671
12666 12672 return (rval);
12667 12673 }
12668 12674
12669 12675 static int
12670 12676 mptsas_init_chip(mptsas_t *mpt, int first_time)
12671 12677 {
12672 12678 ddi_dma_cookie_t cookie;
12673 12679 uint32_t i;
12674 12680 int rval;
12675 12681
12676 12682 /*
12677 12683 * Check to see if the firmware image is valid
12678 12684 */
12679 12685 if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) &
12680 12686 MPI2_DIAG_FLASH_BAD_SIG) {
12681 12687 mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
12682 12688 goto fail;
12683 12689 }
12684 12690
12685 12691 /*
12686 12692 * Reset the chip
12687 12693 */
12688 12694 rval = mptsas_ioc_reset(mpt, first_time);
12689 12695 if (rval == MPTSAS_RESET_FAIL) {
12690 12696 mptsas_log(mpt, CE_WARN, "hard reset failed!");
12691 12697 goto fail;
12692 12698 }
12693 12699
12694 12700 if ((rval == MPTSAS_SUCCESS_MUR) && (!first_time)) {
12695 12701 goto mur;
12696 12702 }
12697 12703 /*
12698 12704 * Setup configuration space
12699 12705 */
12700 12706 if (mptsas_config_space_init(mpt) == FALSE) {
12701 12707 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
12702 12708 "failed!");
12703 12709 goto fail;
12704 12710 }
12705 12711
↓ open down ↓ |
11366 lines elided |
↑ open up ↑ |
12706 12712 /*
12707 12713 * IOC facts can change after a diag reset so all buffers that are
12708 12714 * based on these numbers must be de-allocated and re-allocated. Get
12709 12715 * new IOC facts each time chip is initialized.
12710 12716 */
12711 12717 if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
12712 12718 mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed");
12713 12719 goto fail;
12714 12720 }
12715 12721
12716 - mpt->m_targets = refhash_create(MPTSAS_TARGET_BUCKET_COUNT,
12717 - mptsas_target_addr_hash, mptsas_target_addr_cmp,
12718 - mptsas_target_free, sizeof (mptsas_target_t),
12719 - offsetof(mptsas_target_t, m_link),
12720 - offsetof(mptsas_target_t, m_addr), KM_SLEEP);
12721 -
12722 12722 if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
12723 12723 goto fail;
12724 12724 }
12725 12725 /*
12726 12726 * Allocate request message frames, reply free queue, reply descriptor
12727 12727 * post queue, and reply message frames using latest IOC facts.
12728 12728 */
12729 12729 if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
12730 12730 mptsas_log(mpt, CE_WARN, "mptsas_alloc_request_frames failed");
12731 12731 goto fail;
12732 12732 }
12733 12733 if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
12734 12734 mptsas_log(mpt, CE_WARN, "mptsas_alloc_free_queue failed!");
12735 12735 goto fail;
12736 12736 }
12737 12737 if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
12738 12738 mptsas_log(mpt, CE_WARN, "mptsas_alloc_post_queue failed!");
12739 12739 goto fail;
12740 12740 }
12741 12741 if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
12742 12742 mptsas_log(mpt, CE_WARN, "mptsas_alloc_reply_frames failed!");
12743 12743 goto fail;
12744 12744 }
12745 12745
12746 12746 mur:
12747 12747 /*
12748 12748 * Re-Initialize ioc to operational state
12749 12749 */
12750 12750 if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
12751 12751 mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
12752 12752 goto fail;
12753 12753 }
12754 12754
12755 12755 mptsas_alloc_reply_args(mpt);
12756 12756
12757 12757 /*
12758 12758 * Initialize reply post index. Reply free index is initialized after
12759 12759 * the next loop.
12760 12760 */
12761 12761 mpt->m_post_index = 0;
12762 12762
12763 12763 /*
12764 12764 * Initialize the Reply Free Queue with the physical addresses of our
12765 12765 * reply frames.
12766 12766 */
12767 12767 cookie.dmac_address = mpt->m_reply_frame_dma_addr;
12768 12768 for (i = 0; i < mpt->m_max_replies; i++) {
12769 12769 ddi_put32(mpt->m_acc_free_queue_hdl,
12770 12770 &((uint32_t *)(void *)mpt->m_free_queue)[i],
12771 12771 cookie.dmac_address);
12772 12772 cookie.dmac_address += mpt->m_reply_frame_size;
12773 12773 }
12774 12774 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
12775 12775 DDI_DMA_SYNC_FORDEV);
12776 12776
12777 12777 /*
12778 12778 * Initialize the reply free index to one past the last frame on the
12779 12779 * queue. This will signify that the queue is empty to start with.
12780 12780 */
12781 12781 mpt->m_free_index = i;
12782 12782 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, i);
12783 12783
12784 12784 /*
12785 12785 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
12786 12786 */
12787 12787 for (i = 0; i < mpt->m_post_queue_depth; i++) {
12788 12788 ddi_put64(mpt->m_acc_post_queue_hdl,
12789 12789 &((uint64_t *)(void *)mpt->m_post_queue)[i],
12790 12790 0xFFFFFFFFFFFFFFFF);
12791 12791 }
12792 12792 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
12793 12793 DDI_DMA_SYNC_FORDEV);
12794 12794
12795 12795 /*
12796 12796 * Enable ports
12797 12797 */
12798 12798 if (mptsas_ioc_enable_port(mpt) == DDI_FAILURE) {
12799 12799 mptsas_log(mpt, CE_WARN, "mptsas_ioc_enable_port failed");
12800 12800 goto fail;
12801 12801 }
12802 12802
12803 12803 /*
12804 12804 * enable events
12805 12805 */
12806 12806 if (mptsas_ioc_enable_event_notification(mpt)) {
12807 12807 goto fail;
12808 12808 }
12809 12809
12810 12810 /*
12811 12811 * We need checks in attach and these.
12812 12812 * chip_init is called in mult. places
12813 12813 */
12814 12814
12815 12815 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
12816 12816 DDI_SUCCESS) ||
12817 12817 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
12818 12818 DDI_SUCCESS) ||
12819 12819 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
12820 12820 DDI_SUCCESS) ||
12821 12821 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
12822 12822 DDI_SUCCESS) ||
12823 12823 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
12824 12824 DDI_SUCCESS)) {
12825 12825 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
12826 12826 goto fail;
12827 12827 }
12828 12828
12829 12829 /* Check all acc handles */
12830 12830 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
12831 12831 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
12832 12832 DDI_SUCCESS) ||
12833 12833 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
12834 12834 DDI_SUCCESS) ||
12835 12835 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
12836 12836 DDI_SUCCESS) ||
12837 12837 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
12838 12838 DDI_SUCCESS) ||
12839 12839 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
12840 12840 DDI_SUCCESS) ||
12841 12841 (mptsas_check_acc_handle(mpt->m_config_handle) !=
12842 12842 DDI_SUCCESS)) {
12843 12843 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
12844 12844 goto fail;
12845 12845 }
12846 12846
12847 12847 return (DDI_SUCCESS);
12848 12848
12849 12849 fail:
12850 12850 return (DDI_FAILURE);
12851 12851 }
12852 12852
12853 12853 static int
12854 12854 mptsas_get_pci_cap(mptsas_t *mpt)
12855 12855 {
12856 12856 ushort_t caps_ptr, cap, cap_count;
12857 12857
12858 12858 if (mpt->m_config_handle == NULL)
12859 12859 return (FALSE);
12860 12860 /*
12861 12861 * Check if capabilities list is supported and if so,
12862 12862 * get initial capabilities pointer and clear bits 0,1.
12863 12863 */
12864 12864 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
12865 12865 & PCI_STAT_CAP) {
12866 12866 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
12867 12867 PCI_CONF_CAP_PTR), 4);
12868 12868 } else {
12869 12869 caps_ptr = PCI_CAP_NEXT_PTR_NULL;
12870 12870 }
12871 12871
12872 12872 /*
12873 12873 * Walk capabilities if supported.
12874 12874 */
12875 12875 for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
12876 12876
12877 12877 /*
12878 12878 * Check that we haven't exceeded the maximum number of
12879 12879 * capabilities and that the pointer is in a valid range.
12880 12880 */
12881 12881 if (++cap_count > 48) {
12882 12882 mptsas_log(mpt, CE_WARN,
12883 12883 "too many device capabilities.\n");
12884 12884 break;
12885 12885 }
12886 12886 if (caps_ptr < 64) {
12887 12887 mptsas_log(mpt, CE_WARN,
12888 12888 "capabilities pointer 0x%x out of range.\n",
12889 12889 caps_ptr);
12890 12890 break;
12891 12891 }
12892 12892
12893 12893 /*
12894 12894 * Get next capability and check that it is valid.
12895 12895 * For now, we only support power management.
12896 12896 */
12897 12897 cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
12898 12898 switch (cap) {
12899 12899 case PCI_CAP_ID_PM:
12900 12900 mptsas_log(mpt, CE_NOTE,
12901 12901 "?mptsas%d supports power management.\n",
12902 12902 mpt->m_instance);
12903 12903 mpt->m_options |= MPTSAS_OPT_PM;
12904 12904
12905 12905 /* Save PMCSR offset */
12906 12906 mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
12907 12907 break;
12908 12908 /*
12909 12909 * The following capabilities are valid. Any others
12910 12910 * will cause a message to be logged.
12911 12911 */
12912 12912 case PCI_CAP_ID_VPD:
12913 12913 case PCI_CAP_ID_MSI:
12914 12914 case PCI_CAP_ID_PCIX:
12915 12915 case PCI_CAP_ID_PCI_E:
12916 12916 case PCI_CAP_ID_MSI_X:
12917 12917 break;
12918 12918 default:
12919 12919 mptsas_log(mpt, CE_NOTE,
12920 12920 "?mptsas%d unrecognized capability "
12921 12921 "0x%x.\n", mpt->m_instance, cap);
12922 12922 break;
12923 12923 }
12924 12924
12925 12925 /*
12926 12926 * Get next capabilities pointer and clear bits 0,1.
12927 12927 */
12928 12928 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
12929 12929 (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
12930 12930 }
12931 12931 return (TRUE);
12932 12932 }
12933 12933
12934 12934 static int
12935 12935 mptsas_init_pm(mptsas_t *mpt)
12936 12936 {
12937 12937 char pmc_name[16];
12938 12938 char *pmc[] = {
12939 12939 NULL,
12940 12940 "0=Off (PCI D3 State)",
12941 12941 "3=On (PCI D0 State)",
12942 12942 NULL
12943 12943 };
12944 12944 uint16_t pmcsr_stat;
12945 12945
12946 12946 if (mptsas_get_pci_cap(mpt) == FALSE) {
12947 12947 return (DDI_FAILURE);
12948 12948 }
12949 12949 /*
12950 12950 * If PCI's capability does not support PM, then don't need
12951 12951 * to registe the pm-components
12952 12952 */
12953 12953 if (!(mpt->m_options & MPTSAS_OPT_PM))
12954 12954 return (DDI_SUCCESS);
12955 12955 /*
12956 12956 * If power management is supported by this chip, create
12957 12957 * pm-components property for the power management framework
12958 12958 */
12959 12959 (void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
12960 12960 pmc[0] = pmc_name;
12961 12961 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
12962 12962 "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
12963 12963 mpt->m_options &= ~MPTSAS_OPT_PM;
12964 12964 mptsas_log(mpt, CE_WARN,
12965 12965 "mptsas%d: pm-component property creation failed.",
12966 12966 mpt->m_instance);
12967 12967 return (DDI_FAILURE);
12968 12968 }
12969 12969
12970 12970 /*
12971 12971 * Power on device.
12972 12972 */
12973 12973 (void) pm_busy_component(mpt->m_dip, 0);
12974 12974 pmcsr_stat = pci_config_get16(mpt->m_config_handle,
12975 12975 mpt->m_pmcsr_offset);
12976 12976 if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
12977 12977 mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
12978 12978 mpt->m_instance);
12979 12979 pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
12980 12980 PCI_PMCSR_D0);
12981 12981 }
12982 12982 if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
12983 12983 mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
12984 12984 return (DDI_FAILURE);
12985 12985 }
12986 12986 mpt->m_power_level = PM_LEVEL_D0;
12987 12987 /*
12988 12988 * Set pm idle delay.
12989 12989 */
12990 12990 mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
12991 12991 mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
12992 12992
12993 12993 return (DDI_SUCCESS);
12994 12994 }
12995 12995
12996 12996 static int
12997 12997 mptsas_register_intrs(mptsas_t *mpt)
12998 12998 {
12999 12999 dev_info_t *dip;
13000 13000 int intr_types;
13001 13001
13002 13002 dip = mpt->m_dip;
13003 13003
13004 13004 /* Get supported interrupt types */
13005 13005 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
13006 13006 mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
13007 13007 "failed\n");
13008 13008 return (FALSE);
13009 13009 }
13010 13010
13011 13011 NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
13012 13012
13013 13013 /*
13014 13014 * Try MSI, but fall back to FIXED
13015 13015 */
13016 13016 if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
13017 13017 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
13018 13018 NDBG0(("Using MSI interrupt type"));
13019 13019 mpt->m_intr_type = DDI_INTR_TYPE_MSI;
13020 13020 return (TRUE);
13021 13021 }
13022 13022 }
13023 13023 if (intr_types & DDI_INTR_TYPE_FIXED) {
13024 13024 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
13025 13025 NDBG0(("Using FIXED interrupt type"));
13026 13026 mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
13027 13027 return (TRUE);
13028 13028 } else {
13029 13029 NDBG0(("FIXED interrupt registration failed"));
13030 13030 return (FALSE);
13031 13031 }
13032 13032 }
13033 13033
13034 13034 return (FALSE);
13035 13035 }
13036 13036
13037 13037 static void
13038 13038 mptsas_unregister_intrs(mptsas_t *mpt)
13039 13039 {
13040 13040 mptsas_rem_intrs(mpt);
13041 13041 }
13042 13042
13043 13043 /*
13044 13044 * mptsas_add_intrs:
13045 13045 *
13046 13046 * Register FIXED or MSI interrupts.
13047 13047 */
13048 13048 static int
13049 13049 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
13050 13050 {
13051 13051 dev_info_t *dip = mpt->m_dip;
13052 13052 int avail, actual, count = 0;
13053 13053 int i, flag, ret;
13054 13054
13055 13055 NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
13056 13056
13057 13057 /* Get number of interrupts */
13058 13058 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
13059 13059 if ((ret != DDI_SUCCESS) || (count <= 0)) {
13060 13060 mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
13061 13061 "ret %d count %d\n", ret, count);
13062 13062
13063 13063 return (DDI_FAILURE);
13064 13064 }
13065 13065
13066 13066 /* Get number of available interrupts */
13067 13067 ret = ddi_intr_get_navail(dip, intr_type, &avail);
13068 13068 if ((ret != DDI_SUCCESS) || (avail == 0)) {
13069 13069 mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
13070 13070 "ret %d avail %d\n", ret, avail);
13071 13071
13072 13072 return (DDI_FAILURE);
13073 13073 }
13074 13074
13075 13075 if (avail < count) {
13076 13076 mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, "
13077 13077 "navail() returned %d", count, avail);
13078 13078 }
13079 13079
13080 13080 /* Mpt only have one interrupt routine */
13081 13081 if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
13082 13082 count = 1;
13083 13083 }
13084 13084
13085 13085 /* Allocate an array of interrupt handles */
13086 13086 mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
13087 13087 mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
13088 13088
13089 13089 flag = DDI_INTR_ALLOC_NORMAL;
13090 13090
13091 13091 /* call ddi_intr_alloc() */
13092 13092 ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
13093 13093 count, &actual, flag);
13094 13094
13095 13095 if ((ret != DDI_SUCCESS) || (actual == 0)) {
13096 13096 mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d\n",
13097 13097 ret);
13098 13098 kmem_free(mpt->m_htable, mpt->m_intr_size);
13099 13099 return (DDI_FAILURE);
13100 13100 }
13101 13101
13102 13102 /* use interrupt count returned or abort? */
13103 13103 if (actual < count) {
13104 13104 mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d\n",
13105 13105 count, actual);
13106 13106 }
13107 13107
13108 13108 mpt->m_intr_cnt = actual;
13109 13109
13110 13110 /*
13111 13111 * Get priority for first msi, assume remaining are all the same
13112 13112 */
13113 13113 if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
13114 13114 &mpt->m_intr_pri)) != DDI_SUCCESS) {
13115 13115 mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d\n", ret);
13116 13116
13117 13117 /* Free already allocated intr */
13118 13118 for (i = 0; i < actual; i++) {
13119 13119 (void) ddi_intr_free(mpt->m_htable[i]);
13120 13120 }
13121 13121
13122 13122 kmem_free(mpt->m_htable, mpt->m_intr_size);
13123 13123 return (DDI_FAILURE);
13124 13124 }
13125 13125
13126 13126 /* Test for high level mutex */
13127 13127 if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
13128 13128 mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
13129 13129 "Hi level interrupt not supported\n");
13130 13130
13131 13131 /* Free already allocated intr */
13132 13132 for (i = 0; i < actual; i++) {
13133 13133 (void) ddi_intr_free(mpt->m_htable[i]);
13134 13134 }
13135 13135
13136 13136 kmem_free(mpt->m_htable, mpt->m_intr_size);
13137 13137 return (DDI_FAILURE);
13138 13138 }
13139 13139
13140 13140 /* Call ddi_intr_add_handler() */
13141 13141 for (i = 0; i < actual; i++) {
13142 13142 if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
13143 13143 (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
13144 13144 mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
13145 13145 "failed %d\n", ret);
13146 13146
13147 13147 /* Free already allocated intr */
13148 13148 for (i = 0; i < actual; i++) {
13149 13149 (void) ddi_intr_free(mpt->m_htable[i]);
13150 13150 }
13151 13151
13152 13152 kmem_free(mpt->m_htable, mpt->m_intr_size);
13153 13153 return (DDI_FAILURE);
13154 13154 }
13155 13155 }
13156 13156
13157 13157 if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
13158 13158 != DDI_SUCCESS) {
13159 13159 mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d\n", ret);
13160 13160
13161 13161 /* Free already allocated intr */
13162 13162 for (i = 0; i < actual; i++) {
13163 13163 (void) ddi_intr_free(mpt->m_htable[i]);
13164 13164 }
13165 13165
13166 13166 kmem_free(mpt->m_htable, mpt->m_intr_size);
13167 13167 return (DDI_FAILURE);
13168 13168 }
13169 13169
13170 13170 /*
13171 13171 * Enable interrupts
13172 13172 */
13173 13173 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
13174 13174 /* Call ddi_intr_block_enable() for MSI interrupts */
13175 13175 (void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
13176 13176 } else {
13177 13177 /* Call ddi_intr_enable for MSI or FIXED interrupts */
13178 13178 for (i = 0; i < mpt->m_intr_cnt; i++) {
13179 13179 (void) ddi_intr_enable(mpt->m_htable[i]);
13180 13180 }
13181 13181 }
13182 13182 return (DDI_SUCCESS);
13183 13183 }
13184 13184
13185 13185 /*
13186 13186 * mptsas_rem_intrs:
13187 13187 *
13188 13188 * Unregister FIXED or MSI interrupts
13189 13189 */
13190 13190 static void
13191 13191 mptsas_rem_intrs(mptsas_t *mpt)
13192 13192 {
13193 13193 int i;
13194 13194
13195 13195 NDBG6(("mptsas_rem_intrs"));
13196 13196
13197 13197 /* Disable all interrupts */
13198 13198 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
13199 13199 /* Call ddi_intr_block_disable() */
13200 13200 (void) ddi_intr_block_disable(mpt->m_htable, mpt->m_intr_cnt);
13201 13201 } else {
13202 13202 for (i = 0; i < mpt->m_intr_cnt; i++) {
13203 13203 (void) ddi_intr_disable(mpt->m_htable[i]);
13204 13204 }
13205 13205 }
13206 13206
13207 13207 /* Call ddi_intr_remove_handler() */
13208 13208 for (i = 0; i < mpt->m_intr_cnt; i++) {
13209 13209 (void) ddi_intr_remove_handler(mpt->m_htable[i]);
13210 13210 (void) ddi_intr_free(mpt->m_htable[i]);
13211 13211 }
13212 13212
13213 13213 kmem_free(mpt->m_htable, mpt->m_intr_size);
13214 13214 }
13215 13215
13216 13216 /*
13217 13217 * The IO fault service error handling callback function
13218 13218 */
13219 13219 /*ARGSUSED*/
13220 13220 static int
13221 13221 mptsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
13222 13222 {
13223 13223 /*
13224 13224 * as the driver can always deal with an error in any dma or
13225 13225 * access handle, we can just return the fme_status value.
13226 13226 */
13227 13227 pci_ereport_post(dip, err, NULL);
13228 13228 return (err->fme_status);
13229 13229 }
13230 13230
13231 13231 /*
13232 13232 * mptsas_fm_init - initialize fma capabilities and register with IO
13233 13233 * fault services.
13234 13234 */
13235 13235 static void
13236 13236 mptsas_fm_init(mptsas_t *mpt)
13237 13237 {
13238 13238 /*
13239 13239 * Need to change iblock to priority for new MSI intr
13240 13240 */
13241 13241 ddi_iblock_cookie_t fm_ibc;
13242 13242
13243 13243 /* Only register with IO Fault Services if we have some capability */
13244 13244 if (mpt->m_fm_capabilities) {
13245 13245 /* Adjust access and dma attributes for FMA */
13246 13246 mpt->m_reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
13247 13247 mpt->m_msg_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
13248 13248 mpt->m_io_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
13249 13249
13250 13250 /*
13251 13251 * Register capabilities with IO Fault Services.
13252 13252 * mpt->m_fm_capabilities will be updated to indicate
13253 13253 * capabilities actually supported (not requested.)
13254 13254 */
13255 13255 ddi_fm_init(mpt->m_dip, &mpt->m_fm_capabilities, &fm_ibc);
13256 13256
13257 13257 /*
13258 13258 * Initialize pci ereport capabilities if ereport
13259 13259 * capable (should always be.)
13260 13260 */
13261 13261 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
13262 13262 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13263 13263 pci_ereport_setup(mpt->m_dip);
13264 13264 }
13265 13265
13266 13266 /*
13267 13267 * Register error callback if error callback capable.
13268 13268 */
13269 13269 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13270 13270 ddi_fm_handler_register(mpt->m_dip,
13271 13271 mptsas_fm_error_cb, (void *) mpt);
13272 13272 }
13273 13273 }
13274 13274 }
13275 13275
13276 13276 /*
13277 13277 * mptsas_fm_fini - Releases fma capabilities and un-registers with IO
13278 13278 * fault services.
13279 13279 *
13280 13280 */
13281 13281 static void
13282 13282 mptsas_fm_fini(mptsas_t *mpt)
13283 13283 {
13284 13284 /* Only unregister FMA capabilities if registered */
13285 13285 if (mpt->m_fm_capabilities) {
13286 13286
13287 13287 /*
13288 13288 * Un-register error callback if error callback capable.
13289 13289 */
13290 13290
13291 13291 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13292 13292 ddi_fm_handler_unregister(mpt->m_dip);
13293 13293 }
13294 13294
13295 13295 /*
13296 13296 * Release any resources allocated by pci_ereport_setup()
13297 13297 */
13298 13298
13299 13299 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
13300 13300 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13301 13301 pci_ereport_teardown(mpt->m_dip);
13302 13302 }
13303 13303
13304 13304 /* Unregister from IO Fault Services */
13305 13305 ddi_fm_fini(mpt->m_dip);
13306 13306
13307 13307 /* Adjust access and dma attributes for FMA */
13308 13308 mpt->m_reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
13309 13309 mpt->m_msg_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
13310 13310 mpt->m_io_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
13311 13311
13312 13312 }
13313 13313 }
13314 13314
13315 13315 int
13316 13316 mptsas_check_acc_handle(ddi_acc_handle_t handle)
13317 13317 {
13318 13318 ddi_fm_error_t de;
13319 13319
13320 13320 if (handle == NULL)
13321 13321 return (DDI_FAILURE);
13322 13322 ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
13323 13323 return (de.fme_status);
13324 13324 }
13325 13325
13326 13326 int
13327 13327 mptsas_check_dma_handle(ddi_dma_handle_t handle)
13328 13328 {
13329 13329 ddi_fm_error_t de;
13330 13330
13331 13331 if (handle == NULL)
13332 13332 return (DDI_FAILURE);
13333 13333 ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
13334 13334 return (de.fme_status);
13335 13335 }
13336 13336
13337 13337 void
13338 13338 mptsas_fm_ereport(mptsas_t *mpt, char *detail)
13339 13339 {
13340 13340 uint64_t ena;
13341 13341 char buf[FM_MAX_CLASS];
13342 13342
13343 13343 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
13344 13344 ena = fm_ena_generate(0, FM_ENA_FMT1);
13345 13345 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities)) {
13346 13346 ddi_fm_ereport_post(mpt->m_dip, buf, ena, DDI_NOSLEEP,
13347 13347 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
13348 13348 }
13349 13349 }
13350 13350
13351 13351 static int
13352 13352 mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
13353 13353 uint16_t *dev_handle, mptsas_target_t **pptgt)
13354 13354 {
13355 13355 int rval;
13356 13356 uint32_t dev_info;
13357 13357 uint64_t sas_wwn;
13358 13358 mptsas_phymask_t phymask;
13359 13359 uint8_t physport, phynum, config, disk;
13360 13360 uint64_t devicename;
13361 13361 uint16_t pdev_hdl;
13362 13362 mptsas_target_t *tmp_tgt = NULL;
13363 13363 uint16_t bay_num, enclosure, io_flags;
13364 13364
13365 13365 ASSERT(*pptgt == NULL);
13366 13366
13367 13367 rval = mptsas_get_sas_device_page0(mpt, page_address, dev_handle,
13368 13368 &sas_wwn, &dev_info, &physport, &phynum, &pdev_hdl,
13369 13369 &bay_num, &enclosure, &io_flags);
13370 13370 if (rval != DDI_SUCCESS) {
13371 13371 rval = DEV_INFO_FAIL_PAGE0;
13372 13372 return (rval);
13373 13373 }
13374 13374
13375 13375 if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
13376 13376 MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13377 13377 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) == NULL) {
13378 13378 rval = DEV_INFO_WRONG_DEVICE_TYPE;
13379 13379 return (rval);
13380 13380 }
13381 13381
13382 13382 /*
13383 13383 * Check if the dev handle is for a Phys Disk. If so, set return value
13384 13384 * and exit. Don't add Phys Disks to hash.
13385 13385 */
13386 13386 for (config = 0; config < mpt->m_num_raid_configs; config++) {
13387 13387 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
13388 13388 if (*dev_handle == mpt->m_raidconfig[config].
13389 13389 m_physdisk_devhdl[disk]) {
13390 13390 rval = DEV_INFO_PHYS_DISK;
13391 13391 return (rval);
13392 13392 }
13393 13393 }
13394 13394 }
13395 13395
13396 13396 /*
13397 13397 * Get SATA Device Name from SAS device page0 for
13398 13398 * sata device, if device name doesn't exist, set mta_wwn to
13399 13399 * 0 for direct attached SATA. For the device behind the expander
13400 13400 * we still can use STP address assigned by expander.
13401 13401 */
13402 13402 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13403 13403 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
13404 13404 mutex_exit(&mpt->m_mutex);
13405 13405 /* alloc a tmp_tgt to send the cmd */
13406 13406 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target),
13407 13407 KM_SLEEP);
13408 13408 tmp_tgt->m_devhdl = *dev_handle;
13409 13409 tmp_tgt->m_deviceinfo = dev_info;
13410 13410 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
13411 13411 tmp_tgt->m_qfull_retry_interval =
13412 13412 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
13413 13413 tmp_tgt->m_t_throttle = MAX_THROTTLE;
13414 13414 devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
13415 13415 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
13416 13416 mutex_enter(&mpt->m_mutex);
13417 13417 if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
13418 13418 sas_wwn = devicename;
13419 13419 } else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
13420 13420 sas_wwn = 0;
13421 13421 }
13422 13422 }
13423 13423
13424 13424 phymask = mptsas_physport_to_phymask(mpt, physport);
13425 13425 *pptgt = mptsas_tgt_alloc(mpt, *dev_handle, sas_wwn,
13426 13426 dev_info, phymask, phynum);
13427 13427 if (*pptgt == NULL) {
13428 13428 mptsas_log(mpt, CE_WARN, "Failed to allocated target"
13429 13429 "structure!");
13430 13430 rval = DEV_INFO_FAIL_ALLOC;
13431 13431 return (rval);
13432 13432 }
13433 13433 (*pptgt)->m_io_flags = io_flags;
13434 13434 (*pptgt)->m_enclosure = enclosure;
13435 13435 (*pptgt)->m_slot_num = bay_num;
13436 13436 return (DEV_INFO_SUCCESS);
13437 13437 }
13438 13438
13439 13439 uint64_t
13440 13440 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
13441 13441 {
13442 13442 uint64_t sata_guid = 0, *pwwn = NULL;
13443 13443 int target = ptgt->m_devhdl;
13444 13444 uchar_t *inq83 = NULL;
13445 13445 int inq83_len = 0xFF;
13446 13446 uchar_t *dblk = NULL;
13447 13447 int inq83_retry = 3;
13448 13448 int rval = DDI_FAILURE;
13449 13449
13450 13450 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
13451 13451
13452 13452 inq83_retry:
13453 13453 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
13454 13454 inq83_len, NULL, 1);
13455 13455 if (rval != DDI_SUCCESS) {
13456 13456 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
13457 13457 "0x83 for target:%x, lun:%x failed!", target, lun);
13458 13458 goto out;
13459 13459 }
13460 13460 /* According to SAT2, the first descriptor is logic unit name */
13461 13461 dblk = &inq83[4];
13462 13462 if ((dblk[1] & 0x30) != 0) {
13463 13463 mptsas_log(mpt, CE_WARN, "!Descriptor is not lun associated.");
13464 13464 goto out;
13465 13465 }
13466 13466 pwwn = (uint64_t *)(void *)(&dblk[4]);
13467 13467 if ((dblk[4] & 0xf0) == 0x50) {
13468 13468 sata_guid = BE_64(*pwwn);
13469 13469 goto out;
13470 13470 } else if (dblk[4] == 'A') {
13471 13471 NDBG20(("SATA drive has no NAA format GUID."));
13472 13472 goto out;
13473 13473 } else {
13474 13474 /* The data is not ready, wait and retry */
13475 13475 inq83_retry--;
13476 13476 if (inq83_retry <= 0) {
13477 13477 goto out;
13478 13478 }
13479 13479 NDBG20(("The GUID is not ready, retry..."));
13480 13480 delay(1 * drv_usectohz(1000000));
13481 13481 goto inq83_retry;
13482 13482 }
13483 13483 out:
13484 13484 kmem_free(inq83, inq83_len);
13485 13485 return (sata_guid);
13486 13486 }
13487 13487
13488 13488 static int
13489 13489 mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, uchar_t page,
13490 13490 unsigned char *buf, int len, int *reallen, uchar_t evpd)
13491 13491 {
13492 13492 uchar_t cdb[CDB_GROUP0];
13493 13493 struct scsi_address ap;
13494 13494 struct buf *data_bp = NULL;
13495 13495 int resid = 0;
13496 13496 int ret = DDI_FAILURE;
13497 13497
13498 13498 ASSERT(len <= 0xffff);
13499 13499
13500 13500 ap.a_target = MPTSAS_INVALID_DEVHDL;
13501 13501 ap.a_lun = (uchar_t)(lun);
13502 13502 ap.a_hba_tran = mpt->m_tran;
13503 13503
13504 13504 data_bp = scsi_alloc_consistent_buf(&ap,
13505 13505 (struct buf *)NULL, len, B_READ, NULL_FUNC, NULL);
13506 13506 if (data_bp == NULL) {
13507 13507 return (ret);
13508 13508 }
13509 13509 bzero(cdb, CDB_GROUP0);
13510 13510 cdb[0] = SCMD_INQUIRY;
13511 13511 cdb[1] = evpd;
13512 13512 cdb[2] = page;
13513 13513 cdb[3] = (len & 0xff00) >> 8;
13514 13514 cdb[4] = (len & 0x00ff);
13515 13515 cdb[5] = 0;
13516 13516
13517 13517 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP0, data_bp,
13518 13518 &resid);
13519 13519 if (ret == DDI_SUCCESS) {
13520 13520 if (reallen) {
13521 13521 *reallen = len - resid;
13522 13522 }
13523 13523 bcopy((caddr_t)data_bp->b_un.b_addr, buf, len);
13524 13524 }
13525 13525 if (data_bp) {
13526 13526 scsi_free_consistent_buf(data_bp);
13527 13527 }
13528 13528 return (ret);
13529 13529 }
13530 13530
13531 13531 static int
13532 13532 mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
13533 13533 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
13534 13534 int *resid)
13535 13535 {
13536 13536 struct scsi_pkt *pktp = NULL;
13537 13537 scsi_hba_tran_t *tran_clone = NULL;
13538 13538 mptsas_tgt_private_t *tgt_private = NULL;
13539 13539 int ret = DDI_FAILURE;
13540 13540
13541 13541 /*
13542 13542 * scsi_hba_tran_t->tran_tgt_private is used to pass the address
13543 13543 * information to scsi_init_pkt, allocate a scsi_hba_tran structure
13544 13544 * to simulate the cmds from sd
13545 13545 */
13546 13546 tran_clone = kmem_alloc(
13547 13547 sizeof (scsi_hba_tran_t), KM_SLEEP);
13548 13548 if (tran_clone == NULL) {
13549 13549 goto out;
13550 13550 }
13551 13551 bcopy((caddr_t)mpt->m_tran,
13552 13552 (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
13553 13553 tgt_private = kmem_alloc(
13554 13554 sizeof (mptsas_tgt_private_t), KM_SLEEP);
13555 13555 if (tgt_private == NULL) {
13556 13556 goto out;
13557 13557 }
13558 13558 tgt_private->t_lun = ap->a_lun;
13559 13559 tgt_private->t_private = ptgt;
13560 13560 tran_clone->tran_tgt_private = tgt_private;
13561 13561 ap->a_hba_tran = tran_clone;
13562 13562
13563 13563 pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
13564 13564 data_bp, cdblen, sizeof (struct scsi_arq_status),
13565 13565 0, PKT_CONSISTENT, NULL, NULL);
13566 13566 if (pktp == NULL) {
13567 13567 goto out;
13568 13568 }
13569 13569 bcopy(cdb, pktp->pkt_cdbp, cdblen);
13570 13570 pktp->pkt_flags = FLAG_NOPARITY;
13571 13571 if (scsi_poll(pktp) < 0) {
13572 13572 goto out;
13573 13573 }
13574 13574 if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
13575 13575 goto out;
13576 13576 }
13577 13577 if (resid != NULL) {
13578 13578 *resid = pktp->pkt_resid;
13579 13579 }
13580 13580
13581 13581 ret = DDI_SUCCESS;
13582 13582 out:
13583 13583 if (pktp) {
13584 13584 scsi_destroy_pkt(pktp);
13585 13585 }
13586 13586 if (tran_clone) {
13587 13587 kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
13588 13588 }
13589 13589 if (tgt_private) {
13590 13590 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
13591 13591 }
13592 13592 return (ret);
13593 13593 }
13594 13594 static int
13595 13595 mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, int *lun)
13596 13596 {
13597 13597 char *cp = NULL;
13598 13598 char *ptr = NULL;
13599 13599 size_t s = 0;
13600 13600 char *wwid_str = NULL;
13601 13601 char *lun_str = NULL;
13602 13602 long lunnum;
13603 13603 long phyid = -1;
13604 13604 int rc = DDI_FAILURE;
13605 13605
13606 13606 ptr = name;
13607 13607 ASSERT(ptr[0] == 'w' || ptr[0] == 'p');
13608 13608 ptr++;
13609 13609 if ((cp = strchr(ptr, ',')) == NULL) {
13610 13610 return (DDI_FAILURE);
13611 13611 }
13612 13612
13613 13613 wwid_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13614 13614 s = (uintptr_t)cp - (uintptr_t)ptr;
13615 13615
13616 13616 bcopy(ptr, wwid_str, s);
13617 13617 wwid_str[s] = '\0';
13618 13618
13619 13619 ptr = ++cp;
13620 13620
13621 13621 if ((cp = strchr(ptr, '\0')) == NULL) {
13622 13622 goto out;
13623 13623 }
13624 13624 lun_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13625 13625 s = (uintptr_t)cp - (uintptr_t)ptr;
13626 13626
13627 13627 bcopy(ptr, lun_str, s);
13628 13628 lun_str[s] = '\0';
13629 13629
13630 13630 if (name[0] == 'p') {
13631 13631 rc = ddi_strtol(wwid_str, NULL, 0x10, &phyid);
13632 13632 } else {
13633 13633 rc = scsi_wwnstr_to_wwn(wwid_str, wwid);
13634 13634 }
13635 13635 if (rc != DDI_SUCCESS)
13636 13636 goto out;
13637 13637
13638 13638 if (phyid != -1) {
13639 13639 ASSERT(phyid < MPTSAS_MAX_PHYS);
13640 13640 *phy = (uint8_t)phyid;
13641 13641 }
13642 13642 rc = ddi_strtol(lun_str, NULL, 0x10, &lunnum);
13643 13643 if (rc != 0)
13644 13644 goto out;
13645 13645
13646 13646 *lun = (int)lunnum;
13647 13647 rc = DDI_SUCCESS;
13648 13648 out:
13649 13649 if (wwid_str)
13650 13650 kmem_free(wwid_str, SCSI_MAXNAMELEN);
13651 13651 if (lun_str)
13652 13652 kmem_free(lun_str, SCSI_MAXNAMELEN);
13653 13653
13654 13654 return (rc);
13655 13655 }
13656 13656
13657 13657 /*
13658 13658 * mptsas_parse_smp_name() is to parse sas wwn string
13659 13659 * which format is "wWWN"
13660 13660 */
13661 13661 static int
13662 13662 mptsas_parse_smp_name(char *name, uint64_t *wwn)
13663 13663 {
13664 13664 char *ptr = name;
13665 13665
13666 13666 if (*ptr != 'w') {
13667 13667 return (DDI_FAILURE);
13668 13668 }
13669 13669
13670 13670 ptr++;
13671 13671 if (scsi_wwnstr_to_wwn(ptr, wwn)) {
13672 13672 return (DDI_FAILURE);
13673 13673 }
13674 13674 return (DDI_SUCCESS);
13675 13675 }
13676 13676
13677 13677 static int
13678 13678 mptsas_bus_config(dev_info_t *pdip, uint_t flag,
13679 13679 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
13680 13680 {
13681 13681 int ret = NDI_FAILURE;
13682 13682 int circ = 0;
13683 13683 int circ1 = 0;
13684 13684 mptsas_t *mpt;
13685 13685 char *ptr = NULL;
13686 13686 char *devnm = NULL;
13687 13687 uint64_t wwid = 0;
13688 13688 uint8_t phy = 0xFF;
13689 13689 int lun = 0;
13690 13690 uint_t mflags = flag;
13691 13691 int bconfig = TRUE;
13692 13692
13693 13693 if (scsi_hba_iport_unit_address(pdip) == 0) {
13694 13694 return (DDI_FAILURE);
13695 13695 }
13696 13696
13697 13697 mpt = DIP2MPT(pdip);
13698 13698 if (!mpt) {
13699 13699 return (DDI_FAILURE);
13700 13700 }
13701 13701 /*
13702 13702 * Hold the nexus across the bus_config
13703 13703 */
13704 13704 ndi_devi_enter(scsi_vhci_dip, &circ);
13705 13705 ndi_devi_enter(pdip, &circ1);
13706 13706 switch (op) {
13707 13707 case BUS_CONFIG_ONE:
13708 13708 /* parse wwid/target name out of name given */
13709 13709 if ((ptr = strchr((char *)arg, '@')) == NULL) {
13710 13710 ret = NDI_FAILURE;
13711 13711 break;
13712 13712 }
13713 13713 ptr++;
13714 13714 if (strncmp((char *)arg, "smp", 3) == 0) {
13715 13715 /*
13716 13716 * This is a SMP target device
13717 13717 */
13718 13718 ret = mptsas_parse_smp_name(ptr, &wwid);
13719 13719 if (ret != DDI_SUCCESS) {
13720 13720 ret = NDI_FAILURE;
13721 13721 break;
13722 13722 }
13723 13723 ret = mptsas_config_smp(pdip, wwid, childp);
13724 13724 } else if ((ptr[0] == 'w') || (ptr[0] == 'p')) {
13725 13725 /*
13726 13726 * OBP could pass down a non-canonical form
13727 13727 * bootpath without LUN part when LUN is 0.
13728 13728 * So driver need adjust the string.
13729 13729 */
13730 13730 if (strchr(ptr, ',') == NULL) {
13731 13731 devnm = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13732 13732 (void) sprintf(devnm, "%s,0", (char *)arg);
13733 13733 ptr = strchr(devnm, '@');
13734 13734 ptr++;
13735 13735 }
13736 13736
13737 13737 /*
13738 13738 * The device path is wWWID format and the device
13739 13739 * is not SMP target device.
13740 13740 */
13741 13741 ret = mptsas_parse_address(ptr, &wwid, &phy, &lun);
13742 13742 if (ret != DDI_SUCCESS) {
13743 13743 ret = NDI_FAILURE;
13744 13744 break;
13745 13745 }
13746 13746 *childp = NULL;
13747 13747 if (ptr[0] == 'w') {
13748 13748 ret = mptsas_config_one_addr(pdip, wwid,
13749 13749 lun, childp);
13750 13750 } else if (ptr[0] == 'p') {
13751 13751 ret = mptsas_config_one_phy(pdip, phy, lun,
13752 13752 childp);
13753 13753 }
13754 13754
13755 13755 /*
13756 13756 * If this is CD/DVD device in OBP path, the
13757 13757 * ndi_busop_bus_config can be skipped as config one
13758 13758 * operation is done above.
13759 13759 */
13760 13760 if ((ret == NDI_SUCCESS) && (*childp != NULL) &&
13761 13761 (strcmp(ddi_node_name(*childp), "cdrom") == 0) &&
13762 13762 (strncmp((char *)arg, "disk", 4) == 0)) {
13763 13763 bconfig = FALSE;
13764 13764 ndi_hold_devi(*childp);
13765 13765 }
13766 13766 } else {
13767 13767 ret = NDI_FAILURE;
13768 13768 break;
13769 13769 }
13770 13770
13771 13771 /*
13772 13772 * DDI group instructed us to use this flag.
13773 13773 */
13774 13774 mflags |= NDI_MDI_FALLBACK;
13775 13775 break;
13776 13776 case BUS_CONFIG_DRIVER:
13777 13777 case BUS_CONFIG_ALL:
13778 13778 mptsas_config_all(pdip);
13779 13779 ret = NDI_SUCCESS;
13780 13780 break;
13781 13781 }
13782 13782
13783 13783 if ((ret == NDI_SUCCESS) && bconfig) {
13784 13784 ret = ndi_busop_bus_config(pdip, mflags, op,
13785 13785 (devnm == NULL) ? arg : devnm, childp, 0);
13786 13786 }
13787 13787
13788 13788 ndi_devi_exit(pdip, circ1);
13789 13789 ndi_devi_exit(scsi_vhci_dip, circ);
13790 13790 if (devnm != NULL)
13791 13791 kmem_free(devnm, SCSI_MAXNAMELEN);
13792 13792 return (ret);
13793 13793 }
13794 13794
13795 13795 static int
13796 13796 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
13797 13797 mptsas_target_t *ptgt)
13798 13798 {
13799 13799 int rval = DDI_FAILURE;
13800 13800 struct scsi_inquiry *sd_inq = NULL;
13801 13801 mptsas_t *mpt = DIP2MPT(pdip);
13802 13802
13803 13803 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
13804 13804
13805 13805 rval = mptsas_inquiry(mpt, ptgt, lun, 0, (uchar_t *)sd_inq,
13806 13806 SUN_INQSIZE, 0, (uchar_t)0);
13807 13807
13808 13808 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
13809 13809 rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
13810 13810 } else {
13811 13811 rval = DDI_FAILURE;
13812 13812 }
13813 13813
13814 13814 kmem_free(sd_inq, SUN_INQSIZE);
13815 13815 return (rval);
13816 13816 }
13817 13817
13818 13818 static int
13819 13819 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
13820 13820 dev_info_t **lundip)
13821 13821 {
13822 13822 int rval;
13823 13823 mptsas_t *mpt = DIP2MPT(pdip);
13824 13824 int phymask;
13825 13825 mptsas_target_t *ptgt = NULL;
13826 13826
13827 13827 /*
13828 13828 * Get the physical port associated to the iport
13829 13829 */
13830 13830 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13831 13831 "phymask", 0);
13832 13832
13833 13833 ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
13834 13834 if (ptgt == NULL) {
13835 13835 /*
13836 13836 * didn't match any device by searching
13837 13837 */
13838 13838 return (DDI_FAILURE);
13839 13839 }
13840 13840 /*
13841 13841 * If the LUN already exists and the status is online,
13842 13842 * we just return the pointer to dev_info_t directly.
13843 13843 * For the mdi_pathinfo node, we'll handle it in
13844 13844 * mptsas_create_virt_lun()
13845 13845 * TODO should be also in mptsas_handle_dr
13846 13846 */
13847 13847
13848 13848 *lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
13849 13849 if (*lundip != NULL) {
13850 13850 /*
13851 13851 * TODO Another senario is, we hotplug the same disk
13852 13852 * on the same slot, the devhdl changed, is this
13853 13853 * possible?
13854 13854 * tgt_private->t_private != ptgt
13855 13855 */
13856 13856 if (sasaddr != ptgt->m_addr.mta_wwn) {
13857 13857 /*
13858 13858 * The device has changed although the devhdl is the
13859 13859 * same (Enclosure mapping mode, change drive on the
13860 13860 * same slot)
13861 13861 */
13862 13862 return (DDI_FAILURE);
13863 13863 }
13864 13864 return (DDI_SUCCESS);
13865 13865 }
13866 13866
13867 13867 if (phymask == 0) {
13868 13868 /*
13869 13869 * Configure IR volume
13870 13870 */
13871 13871 rval = mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
13872 13872 return (rval);
13873 13873 }
13874 13874 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
13875 13875
13876 13876 return (rval);
13877 13877 }
13878 13878
13879 13879 static int
13880 13880 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
13881 13881 dev_info_t **lundip)
13882 13882 {
13883 13883 int rval;
13884 13884 mptsas_t *mpt = DIP2MPT(pdip);
13885 13885 mptsas_phymask_t phymask;
13886 13886 mptsas_target_t *ptgt = NULL;
13887 13887
13888 13888 /*
13889 13889 * Get the physical port associated to the iport
13890 13890 */
13891 13891 phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13892 13892 "phymask", 0);
13893 13893
13894 13894 ptgt = mptsas_phy_to_tgt(mpt, phymask, phy);
13895 13895 if (ptgt == NULL) {
13896 13896 /*
13897 13897 * didn't match any device by searching
13898 13898 */
13899 13899 return (DDI_FAILURE);
13900 13900 }
13901 13901
13902 13902 /*
13903 13903 * If the LUN already exists and the status is online,
13904 13904 * we just return the pointer to dev_info_t directly.
13905 13905 * For the mdi_pathinfo node, we'll handle it in
13906 13906 * mptsas_create_virt_lun().
13907 13907 */
13908 13908
13909 13909 *lundip = mptsas_find_child_phy(pdip, phy);
13910 13910 if (*lundip != NULL) {
13911 13911 return (DDI_SUCCESS);
13912 13912 }
13913 13913
13914 13914 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
13915 13915
13916 13916 return (rval);
13917 13917 }
13918 13918
13919 13919 static int
13920 13920 mptsas_retrieve_lundata(int lun_cnt, uint8_t *buf, uint16_t *lun_num,
13921 13921 uint8_t *lun_addr_type)
13922 13922 {
13923 13923 uint32_t lun_idx = 0;
13924 13924
13925 13925 ASSERT(lun_num != NULL);
13926 13926 ASSERT(lun_addr_type != NULL);
13927 13927
13928 13928 lun_idx = (lun_cnt + 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
13929 13929 /* determine report luns addressing type */
13930 13930 switch (buf[lun_idx] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) {
13931 13931 /*
13932 13932 * Vendors in the field have been found to be concatenating
13933 13933 * bus/target/lun to equal the complete lun value instead
13934 13934 * of switching to flat space addressing
13935 13935 */
13936 13936 /* 00b - peripheral device addressing method */
13937 13937 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL:
13938 13938 /* FALLTHRU */
13939 13939 /* 10b - logical unit addressing method */
13940 13940 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT:
13941 13941 /* FALLTHRU */
13942 13942 /* 01b - flat space addressing method */
13943 13943 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE:
13944 13944 /* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
13945 13945 *lun_addr_type = (buf[lun_idx] &
13946 13946 MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) >> 6;
13947 13947 *lun_num = (buf[lun_idx] & 0x3F) << 8;
13948 13948 *lun_num |= buf[lun_idx + 1];
13949 13949 return (DDI_SUCCESS);
13950 13950 default:
13951 13951 return (DDI_FAILURE);
13952 13952 }
13953 13953 }
13954 13954
13955 13955 static int
13956 13956 mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt)
13957 13957 {
13958 13958 struct buf *repluns_bp = NULL;
13959 13959 struct scsi_address ap;
13960 13960 uchar_t cdb[CDB_GROUP5];
13961 13961 int ret = DDI_FAILURE;
13962 13962 int retry = 0;
13963 13963 int lun_list_len = 0;
13964 13964 uint16_t lun_num = 0;
13965 13965 uint8_t lun_addr_type = 0;
13966 13966 uint32_t lun_cnt = 0;
13967 13967 uint32_t lun_total = 0;
13968 13968 dev_info_t *cdip = NULL;
13969 13969 uint16_t *saved_repluns = NULL;
13970 13970 char *buffer = NULL;
13971 13971 int buf_len = 128;
13972 13972 mptsas_t *mpt = DIP2MPT(pdip);
13973 13973 uint64_t sas_wwn = 0;
13974 13974 uint8_t phy = 0xFF;
13975 13975 uint32_t dev_info = 0;
13976 13976
13977 13977 mutex_enter(&mpt->m_mutex);
13978 13978 sas_wwn = ptgt->m_addr.mta_wwn;
13979 13979 phy = ptgt->m_phynum;
13980 13980 dev_info = ptgt->m_deviceinfo;
13981 13981 mutex_exit(&mpt->m_mutex);
13982 13982
13983 13983 if (sas_wwn == 0) {
13984 13984 /*
13985 13985 * It's a SATA without Device Name
13986 13986 * So don't try multi-LUNs
13987 13987 */
13988 13988 if (mptsas_find_child_phy(pdip, phy)) {
13989 13989 return (DDI_SUCCESS);
13990 13990 } else {
13991 13991 /*
13992 13992 * need configure and create node
13993 13993 */
13994 13994 return (DDI_FAILURE);
13995 13995 }
13996 13996 }
13997 13997
13998 13998 /*
13999 13999 * WWN (SAS address or Device Name exist)
14000 14000 */
14001 14001 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14002 14002 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14003 14003 /*
14004 14004 * SATA device with Device Name
14005 14005 * So don't try multi-LUNs
14006 14006 */
14007 14007 if (mptsas_find_child_addr(pdip, sas_wwn, 0)) {
14008 14008 return (DDI_SUCCESS);
14009 14009 } else {
14010 14010 return (DDI_FAILURE);
14011 14011 }
14012 14012 }
14013 14013
14014 14014 do {
14015 14015 ap.a_target = MPTSAS_INVALID_DEVHDL;
14016 14016 ap.a_lun = 0;
14017 14017 ap.a_hba_tran = mpt->m_tran;
14018 14018 repluns_bp = scsi_alloc_consistent_buf(&ap,
14019 14019 (struct buf *)NULL, buf_len, B_READ, NULL_FUNC, NULL);
14020 14020 if (repluns_bp == NULL) {
14021 14021 retry++;
14022 14022 continue;
14023 14023 }
14024 14024 bzero(cdb, CDB_GROUP5);
14025 14025 cdb[0] = SCMD_REPORT_LUNS;
14026 14026 cdb[6] = (buf_len & 0xff000000) >> 24;
14027 14027 cdb[7] = (buf_len & 0x00ff0000) >> 16;
14028 14028 cdb[8] = (buf_len & 0x0000ff00) >> 8;
14029 14029 cdb[9] = (buf_len & 0x000000ff);
14030 14030
14031 14031 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP5,
14032 14032 repluns_bp, NULL);
14033 14033 if (ret != DDI_SUCCESS) {
14034 14034 scsi_free_consistent_buf(repluns_bp);
14035 14035 retry++;
14036 14036 continue;
14037 14037 }
14038 14038 lun_list_len = BE_32(*(int *)((void *)(
14039 14039 repluns_bp->b_un.b_addr)));
14040 14040 if (buf_len >= lun_list_len + 8) {
14041 14041 ret = DDI_SUCCESS;
14042 14042 break;
14043 14043 }
14044 14044 scsi_free_consistent_buf(repluns_bp);
14045 14045 buf_len = lun_list_len + 8;
14046 14046
14047 14047 } while (retry < 3);
14048 14048
14049 14049 if (ret != DDI_SUCCESS)
14050 14050 return (ret);
14051 14051 buffer = (char *)repluns_bp->b_un.b_addr;
14052 14052 /*
14053 14053 * find out the number of luns returned by the SCSI ReportLun call
14054 14054 * and allocate buffer space
14055 14055 */
14056 14056 lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
14057 14057 saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
14058 14058 if (saved_repluns == NULL) {
14059 14059 scsi_free_consistent_buf(repluns_bp);
14060 14060 return (DDI_FAILURE);
14061 14061 }
14062 14062 for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
14063 14063 if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
14064 14064 &lun_num, &lun_addr_type) != DDI_SUCCESS) {
14065 14065 continue;
14066 14066 }
14067 14067 saved_repluns[lun_cnt] = lun_num;
14068 14068 if (cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num))
14069 14069 ret = DDI_SUCCESS;
14070 14070 else
14071 14071 ret = mptsas_probe_lun(pdip, lun_num, &cdip,
14072 14072 ptgt);
14073 14073 if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
14074 14074 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
14075 14075 MPTSAS_DEV_GONE);
14076 14076 }
14077 14077 }
14078 14078 mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
14079 14079 kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
14080 14080 scsi_free_consistent_buf(repluns_bp);
14081 14081 return (DDI_SUCCESS);
14082 14082 }
14083 14083
14084 14084 static int
14085 14085 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
14086 14086 {
14087 14087 int rval = DDI_FAILURE;
14088 14088 struct scsi_inquiry *sd_inq = NULL;
14089 14089 mptsas_t *mpt = DIP2MPT(pdip);
14090 14090 mptsas_target_t *ptgt = NULL;
14091 14091
14092 14092 mutex_enter(&mpt->m_mutex);
14093 14093 ptgt = refhash_linear_search(mpt->m_targets,
14094 14094 mptsas_target_eval_devhdl, &target);
14095 14095 mutex_exit(&mpt->m_mutex);
14096 14096 if (ptgt == NULL) {
14097 14097 mptsas_log(mpt, CE_WARN, "Volume with VolDevHandle of 0x%x "
14098 14098 "not found.", target);
14099 14099 return (rval);
14100 14100 }
14101 14101
14102 14102 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
14103 14103 rval = mptsas_inquiry(mpt, ptgt, 0, 0, (uchar_t *)sd_inq,
14104 14104 SUN_INQSIZE, 0, (uchar_t)0);
14105 14105
14106 14106 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
14107 14107 rval = mptsas_create_phys_lun(pdip, sd_inq, NULL, dip, ptgt,
14108 14108 0);
14109 14109 } else {
14110 14110 rval = DDI_FAILURE;
14111 14111 }
14112 14112
14113 14113 kmem_free(sd_inq, SUN_INQSIZE);
14114 14114 return (rval);
14115 14115 }
14116 14116
14117 14117 /*
14118 14118 * configure all RAID volumes for virtual iport
14119 14119 */
14120 14120 static void
14121 14121 mptsas_config_all_viport(dev_info_t *pdip)
14122 14122 {
14123 14123 mptsas_t *mpt = DIP2MPT(pdip);
14124 14124 int config, vol;
14125 14125 int target;
14126 14126 dev_info_t *lundip = NULL;
14127 14127
14128 14128 /*
14129 14129 * Get latest RAID info and search for any Volume DevHandles. If any
14130 14130 * are found, configure the volume.
14131 14131 */
14132 14132 mutex_enter(&mpt->m_mutex);
14133 14133 for (config = 0; config < mpt->m_num_raid_configs; config++) {
14134 14134 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
14135 14135 if (mpt->m_raidconfig[config].m_raidvol[vol].m_israid
14136 14136 == 1) {
14137 14137 target = mpt->m_raidconfig[config].
14138 14138 m_raidvol[vol].m_raidhandle;
14139 14139 mutex_exit(&mpt->m_mutex);
14140 14140 (void) mptsas_config_raid(pdip, target,
14141 14141 &lundip);
14142 14142 mutex_enter(&mpt->m_mutex);
14143 14143 }
14144 14144 }
14145 14145 }
14146 14146 mutex_exit(&mpt->m_mutex);
14147 14147 }
14148 14148
14149 14149 static void
14150 14150 mptsas_offline_missed_luns(dev_info_t *pdip, uint16_t *repluns,
14151 14151 int lun_cnt, mptsas_target_t *ptgt)
14152 14152 {
14153 14153 dev_info_t *child = NULL, *savechild = NULL;
14154 14154 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
14155 14155 uint64_t sas_wwn, wwid;
14156 14156 uint8_t phy;
14157 14157 int lun;
14158 14158 int i;
14159 14159 int find;
14160 14160 char *addr;
14161 14161 char *nodename;
14162 14162 mptsas_t *mpt = DIP2MPT(pdip);
14163 14163
14164 14164 mutex_enter(&mpt->m_mutex);
14165 14165 wwid = ptgt->m_addr.mta_wwn;
14166 14166 mutex_exit(&mpt->m_mutex);
14167 14167
14168 14168 child = ddi_get_child(pdip);
14169 14169 while (child) {
14170 14170 find = 0;
14171 14171 savechild = child;
14172 14172 child = ddi_get_next_sibling(child);
14173 14173
14174 14174 nodename = ddi_node_name(savechild);
14175 14175 if (strcmp(nodename, "smp") == 0) {
14176 14176 continue;
14177 14177 }
14178 14178
14179 14179 addr = ddi_get_name_addr(savechild);
14180 14180 if (addr == NULL) {
14181 14181 continue;
14182 14182 }
14183 14183
14184 14184 if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
14185 14185 DDI_SUCCESS) {
14186 14186 continue;
14187 14187 }
14188 14188
14189 14189 if (wwid == sas_wwn) {
14190 14190 for (i = 0; i < lun_cnt; i++) {
14191 14191 if (repluns[i] == lun) {
14192 14192 find = 1;
14193 14193 break;
14194 14194 }
14195 14195 }
14196 14196 } else {
14197 14197 continue;
14198 14198 }
14199 14199 if (find == 0) {
14200 14200 /*
14201 14201 * The lun has not been there already
14202 14202 */
14203 14203 (void) mptsas_offline_lun(pdip, savechild, NULL,
14204 14204 NDI_DEVI_REMOVE);
14205 14205 }
14206 14206 }
14207 14207
14208 14208 pip = mdi_get_next_client_path(pdip, NULL);
14209 14209 while (pip) {
14210 14210 find = 0;
14211 14211 savepip = pip;
14212 14212 addr = MDI_PI(pip)->pi_addr;
14213 14213
14214 14214 pip = mdi_get_next_client_path(pdip, pip);
14215 14215
14216 14216 if (addr == NULL) {
14217 14217 continue;
14218 14218 }
14219 14219
14220 14220 if (mptsas_parse_address(addr, &sas_wwn, &phy,
14221 14221 &lun) != DDI_SUCCESS) {
14222 14222 continue;
14223 14223 }
14224 14224
14225 14225 if (sas_wwn == wwid) {
14226 14226 for (i = 0; i < lun_cnt; i++) {
14227 14227 if (repluns[i] == lun) {
14228 14228 find = 1;
14229 14229 break;
14230 14230 }
14231 14231 }
14232 14232 } else {
14233 14233 continue;
14234 14234 }
14235 14235
14236 14236 if (find == 0) {
14237 14237 /*
14238 14238 * The lun has not been there already
14239 14239 */
14240 14240 (void) mptsas_offline_lun(pdip, NULL, savepip,
14241 14241 NDI_DEVI_REMOVE);
14242 14242 }
14243 14243 }
14244 14244 }
14245 14245
14246 14246 void
14247 14247 mptsas_update_hashtab(struct mptsas *mpt)
14248 14248 {
14249 14249 uint32_t page_address;
14250 14250 int rval = 0;
14251 14251 uint16_t dev_handle;
14252 14252 mptsas_target_t *ptgt = NULL;
14253 14253 mptsas_smp_t smp_node;
14254 14254
14255 14255 /*
14256 14256 * Get latest RAID info.
14257 14257 */
14258 14258 (void) mptsas_get_raid_info(mpt);
14259 14259
14260 14260 dev_handle = mpt->m_smp_devhdl;
14261 14261 for (; mpt->m_done_traverse_smp == 0; ) {
14262 14262 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
14263 14263 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)dev_handle;
14264 14264 if (mptsas_get_sas_expander_page0(mpt, page_address, &smp_node)
14265 14265 != DDI_SUCCESS) {
14266 14266 break;
14267 14267 }
14268 14268 mpt->m_smp_devhdl = dev_handle = smp_node.m_devhdl;
14269 14269 (void) mptsas_smp_alloc(mpt, &smp_node);
14270 14270 }
14271 14271
14272 14272 /*
14273 14273 * Config target devices
14274 14274 */
14275 14275 dev_handle = mpt->m_dev_handle;
14276 14276
14277 14277 /*
14278 14278 * Do loop to get sas device page 0 by GetNextHandle till the
14279 14279 * the last handle. If the sas device is a SATA/SSP target,
14280 14280 * we try to config it.
14281 14281 */
14282 14282 for (; mpt->m_done_traverse_dev == 0; ) {
14283 14283 ptgt = NULL;
14284 14284 page_address =
14285 14285 (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
14286 14286 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14287 14287 (uint32_t)dev_handle;
14288 14288 rval = mptsas_get_target_device_info(mpt, page_address,
14289 14289 &dev_handle, &ptgt);
14290 14290 if ((rval == DEV_INFO_FAIL_PAGE0) ||
14291 14291 (rval == DEV_INFO_FAIL_ALLOC)) {
14292 14292 break;
14293 14293 }
14294 14294
14295 14295 mpt->m_dev_handle = dev_handle;
14296 14296 }
14297 14297
14298 14298 }
14299 14299
14300 14300 void
14301 14301 mptsas_update_driver_data(struct mptsas *mpt)
14302 14302 {
14303 14303 mptsas_target_t *tp;
14304 14304 mptsas_smp_t *sp;
14305 14305
↓ open down ↓ |
1574 lines elided |
↑ open up ↑ |
14306 14306 ASSERT(MUTEX_HELD(&mpt->m_mutex));
14307 14307
14308 14308 /*
14309 14309 * TODO after hard reset, update the driver data structures
14310 14310 * 1. update port/phymask mapping table mpt->m_phy_info
14311 14311 * 2. invalid all the entries in hash table
14312 14312 * m_devhdl = 0xffff and m_deviceinfo = 0
14313 14313 * 3. call sas_device_page/expander_page to update hash table
14314 14314 */
14315 14315 mptsas_update_phymask(mpt);
14316 +
14316 14317 /*
14317 - * Invalid the existing entries
14318 - *
14319 - * XXX - It seems like we should just delete everything here. We are
14320 - * holding the lock and are about to refresh all the targets in both
14321 - * hashes anyway. Given the path we're in, what outstanding async
14322 - * event could possibly be trying to reference one of these things
14323 - * without taking the lock, and how would that be useful anyway?
14318 + * Remove all the devhdls for existing entries but leave their
14319 + * addresses alone. In update_hashtab() below, we'll find all
14320 + * targets that are still present and reassociate them with
14321 + * their potentially new devhdls. Leaving the targets around in
14322 + * this fashion allows them to be used on the tx waitq even
14323 + * while IOC reset it occurring.
14324 14324 */
14325 14325 for (tp = refhash_first(mpt->m_targets); tp != NULL;
14326 14326 tp = refhash_next(mpt->m_targets, tp)) {
14327 14327 tp->m_devhdl = MPTSAS_INVALID_DEVHDL;
14328 14328 tp->m_deviceinfo = 0;
14329 14329 tp->m_dr_flag = MPTSAS_DR_INACTIVE;
14330 14330 }
14331 14331 for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
14332 14332 sp = refhash_next(mpt->m_smp_targets, sp)) {
14333 14333 sp->m_devhdl = MPTSAS_INVALID_DEVHDL;
14334 14334 sp->m_deviceinfo = 0;
14335 14335 }
14336 14336 mpt->m_done_traverse_dev = 0;
14337 14337 mpt->m_done_traverse_smp = 0;
14338 14338 mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
14339 14339 mptsas_update_hashtab(mpt);
14340 14340 }
14341 14341
14342 14342 static void
14343 14343 mptsas_config_all(dev_info_t *pdip)
14344 14344 {
14345 14345 dev_info_t *smpdip = NULL;
14346 14346 mptsas_t *mpt = DIP2MPT(pdip);
14347 14347 int phymask = 0;
14348 14348 mptsas_phymask_t phy_mask;
14349 14349 mptsas_target_t *ptgt = NULL;
14350 14350 mptsas_smp_t *psmp;
14351 14351
14352 14352 /*
14353 14353 * Get the phymask associated to the iport
14354 14354 */
14355 14355 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14356 14356 "phymask", 0);
14357 14357
14358 14358 /*
14359 14359 * Enumerate RAID volumes here (phymask == 0).
14360 14360 */
14361 14361 if (phymask == 0) {
14362 14362 mptsas_config_all_viport(pdip);
14363 14363 return;
14364 14364 }
14365 14365
14366 14366 mutex_enter(&mpt->m_mutex);
14367 14367
14368 14368 if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp) {
14369 14369 mptsas_update_hashtab(mpt);
14370 14370 }
14371 14371
14372 14372 for (psmp = refhash_first(mpt->m_smp_targets); psmp != NULL;
14373 14373 psmp = refhash_next(mpt->m_smp_targets, psmp)) {
14374 14374 phy_mask = psmp->m_addr.mta_phymask;
14375 14375 if (phy_mask == phymask) {
14376 14376 smpdip = NULL;
14377 14377 mutex_exit(&mpt->m_mutex);
14378 14378 (void) mptsas_online_smp(pdip, psmp, &smpdip);
14379 14379 mutex_enter(&mpt->m_mutex);
14380 14380 }
14381 14381 }
14382 14382
14383 14383 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
14384 14384 ptgt = refhash_next(mpt->m_targets, ptgt)) {
14385 14385 phy_mask = ptgt->m_addr.mta_phymask;
14386 14386 if (phy_mask == phymask) {
14387 14387 mutex_exit(&mpt->m_mutex);
14388 14388 (void) mptsas_config_target(pdip, ptgt);
14389 14389 mutex_enter(&mpt->m_mutex);
14390 14390 }
14391 14391 }
14392 14392 mutex_exit(&mpt->m_mutex);
14393 14393 }
14394 14394
14395 14395 static int
14396 14396 mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt)
14397 14397 {
14398 14398 int rval = DDI_FAILURE;
14399 14399 dev_info_t *tdip;
14400 14400
14401 14401 rval = mptsas_config_luns(pdip, ptgt);
14402 14402 if (rval != DDI_SUCCESS) {
14403 14403 /*
14404 14404 * The return value means the SCMD_REPORT_LUNS
14405 14405 * did not execute successfully. The target maybe
14406 14406 * doesn't support such command.
14407 14407 */
14408 14408 rval = mptsas_probe_lun(pdip, 0, &tdip, ptgt);
14409 14409 }
14410 14410 return (rval);
14411 14411 }
14412 14412
14413 14413 /*
14414 14414 * Return fail if not all the childs/paths are freed.
14415 14415 * if there is any path under the HBA, the return value will be always fail
14416 14416 * because we didn't call mdi_pi_free for path
14417 14417 */
14418 14418 static int
14419 14419 mptsas_offline_target(dev_info_t *pdip, char *name)
14420 14420 {
14421 14421 dev_info_t *child = NULL, *prechild = NULL;
14422 14422 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
14423 14423 int tmp_rval, rval = DDI_SUCCESS;
14424 14424 char *addr, *cp;
14425 14425 size_t s;
14426 14426 mptsas_t *mpt = DIP2MPT(pdip);
14427 14427
14428 14428 child = ddi_get_child(pdip);
14429 14429 while (child) {
14430 14430 addr = ddi_get_name_addr(child);
14431 14431 prechild = child;
14432 14432 child = ddi_get_next_sibling(child);
14433 14433
14434 14434 if (addr == NULL) {
14435 14435 continue;
14436 14436 }
14437 14437 if ((cp = strchr(addr, ',')) == NULL) {
14438 14438 continue;
14439 14439 }
14440 14440
14441 14441 s = (uintptr_t)cp - (uintptr_t)addr;
14442 14442
14443 14443 if (strncmp(addr, name, s) != 0) {
14444 14444 continue;
14445 14445 }
14446 14446
14447 14447 tmp_rval = mptsas_offline_lun(pdip, prechild, NULL,
14448 14448 NDI_DEVI_REMOVE);
14449 14449 if (tmp_rval != DDI_SUCCESS) {
14450 14450 rval = DDI_FAILURE;
14451 14451 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14452 14452 prechild, MPTSAS_DEV_GONE) !=
14453 14453 DDI_PROP_SUCCESS) {
14454 14454 mptsas_log(mpt, CE_WARN, "mptsas driver "
14455 14455 "unable to create property for "
14456 14456 "SAS %s (MPTSAS_DEV_GONE)", addr);
14457 14457 }
14458 14458 }
14459 14459 }
14460 14460
14461 14461 pip = mdi_get_next_client_path(pdip, NULL);
14462 14462 while (pip) {
14463 14463 addr = MDI_PI(pip)->pi_addr;
14464 14464 savepip = pip;
14465 14465 pip = mdi_get_next_client_path(pdip, pip);
14466 14466 if (addr == NULL) {
14467 14467 continue;
14468 14468 }
14469 14469
14470 14470 if ((cp = strchr(addr, ',')) == NULL) {
14471 14471 continue;
14472 14472 }
14473 14473
14474 14474 s = (uintptr_t)cp - (uintptr_t)addr;
14475 14475
14476 14476 if (strncmp(addr, name, s) != 0) {
14477 14477 continue;
14478 14478 }
14479 14479
14480 14480 (void) mptsas_offline_lun(pdip, NULL, savepip,
14481 14481 NDI_DEVI_REMOVE);
14482 14482 /*
14483 14483 * driver will not invoke mdi_pi_free, so path will not
14484 14484 * be freed forever, return DDI_FAILURE.
14485 14485 */
14486 14486 rval = DDI_FAILURE;
14487 14487 }
14488 14488 return (rval);
14489 14489 }
14490 14490
14491 14491 static int
14492 14492 mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
14493 14493 mdi_pathinfo_t *rpip, uint_t flags)
14494 14494 {
14495 14495 int rval = DDI_FAILURE;
14496 14496 char *devname;
14497 14497 dev_info_t *cdip, *parent;
14498 14498
14499 14499 if (rpip != NULL) {
14500 14500 parent = scsi_vhci_dip;
14501 14501 cdip = mdi_pi_get_client(rpip);
14502 14502 } else if (rdip != NULL) {
14503 14503 parent = pdip;
14504 14504 cdip = rdip;
14505 14505 } else {
14506 14506 return (DDI_FAILURE);
14507 14507 }
14508 14508
14509 14509 /*
14510 14510 * Make sure node is attached otherwise
14511 14511 * it won't have related cache nodes to
14512 14512 * clean up. i_ddi_devi_attached is
14513 14513 * similiar to i_ddi_node_state(cdip) >=
14514 14514 * DS_ATTACHED.
14515 14515 */
14516 14516 if (i_ddi_devi_attached(cdip)) {
14517 14517
14518 14518 /* Get full devname */
14519 14519 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
14520 14520 (void) ddi_deviname(cdip, devname);
14521 14521 /* Clean cache */
14522 14522 (void) devfs_clean(parent, devname + 1,
14523 14523 DV_CLEAN_FORCE);
14524 14524 kmem_free(devname, MAXNAMELEN + 1);
14525 14525 }
14526 14526 if (rpip != NULL) {
14527 14527 if (MDI_PI_IS_OFFLINE(rpip)) {
14528 14528 rval = DDI_SUCCESS;
14529 14529 } else {
14530 14530 rval = mdi_pi_offline(rpip, 0);
14531 14531 }
14532 14532 } else {
14533 14533 rval = ndi_devi_offline(cdip, flags);
14534 14534 }
14535 14535
14536 14536 return (rval);
14537 14537 }
14538 14538
14539 14539 static dev_info_t *
14540 14540 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
14541 14541 {
14542 14542 dev_info_t *child = NULL;
14543 14543 char *smp_wwn = NULL;
14544 14544
14545 14545 child = ddi_get_child(parent);
14546 14546 while (child) {
14547 14547 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
14548 14548 DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
14549 14549 != DDI_SUCCESS) {
14550 14550 child = ddi_get_next_sibling(child);
14551 14551 continue;
14552 14552 }
14553 14553
14554 14554 if (strcmp(smp_wwn, str_wwn) == 0) {
14555 14555 ddi_prop_free(smp_wwn);
14556 14556 break;
14557 14557 }
14558 14558 child = ddi_get_next_sibling(child);
14559 14559 ddi_prop_free(smp_wwn);
14560 14560 }
14561 14561 return (child);
14562 14562 }
14563 14563
14564 14564 static int
14565 14565 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, uint_t flags)
14566 14566 {
14567 14567 int rval = DDI_FAILURE;
14568 14568 char *devname;
14569 14569 char wwn_str[MPTSAS_WWN_STRLEN];
14570 14570 dev_info_t *cdip;
14571 14571
14572 14572 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
14573 14573
14574 14574 cdip = mptsas_find_smp_child(pdip, wwn_str);
14575 14575
14576 14576 if (cdip == NULL)
14577 14577 return (DDI_SUCCESS);
14578 14578
14579 14579 /*
14580 14580 * Make sure node is attached otherwise
14581 14581 * it won't have related cache nodes to
14582 14582 * clean up. i_ddi_devi_attached is
14583 14583 * similiar to i_ddi_node_state(cdip) >=
14584 14584 * DS_ATTACHED.
14585 14585 */
14586 14586 if (i_ddi_devi_attached(cdip)) {
14587 14587
14588 14588 /* Get full devname */
14589 14589 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
14590 14590 (void) ddi_deviname(cdip, devname);
14591 14591 /* Clean cache */
14592 14592 (void) devfs_clean(pdip, devname + 1,
14593 14593 DV_CLEAN_FORCE);
14594 14594 kmem_free(devname, MAXNAMELEN + 1);
14595 14595 }
14596 14596
14597 14597 rval = ndi_devi_offline(cdip, flags);
14598 14598
14599 14599 return (rval);
14600 14600 }
14601 14601
14602 14602 static dev_info_t *
14603 14603 mptsas_find_child(dev_info_t *pdip, char *name)
14604 14604 {
14605 14605 dev_info_t *child = NULL;
14606 14606 char *rname = NULL;
14607 14607 int rval = DDI_FAILURE;
14608 14608
14609 14609 rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14610 14610
14611 14611 child = ddi_get_child(pdip);
14612 14612 while (child) {
14613 14613 rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
14614 14614 if (rval != DDI_SUCCESS) {
14615 14615 child = ddi_get_next_sibling(child);
14616 14616 bzero(rname, SCSI_MAXNAMELEN);
14617 14617 continue;
14618 14618 }
14619 14619
14620 14620 if (strcmp(rname, name) == 0) {
14621 14621 break;
14622 14622 }
14623 14623 child = ddi_get_next_sibling(child);
14624 14624 bzero(rname, SCSI_MAXNAMELEN);
14625 14625 }
14626 14626
14627 14627 kmem_free(rname, SCSI_MAXNAMELEN);
14628 14628
14629 14629 return (child);
14630 14630 }
14631 14631
14632 14632
14633 14633 static dev_info_t *
14634 14634 mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, int lun)
14635 14635 {
14636 14636 dev_info_t *child = NULL;
14637 14637 char *name = NULL;
14638 14638 char *addr = NULL;
14639 14639
14640 14640 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14641 14641 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14642 14642 (void) sprintf(name, "%016"PRIx64, sasaddr);
14643 14643 (void) sprintf(addr, "w%s,%x", name, lun);
14644 14644 child = mptsas_find_child(pdip, addr);
14645 14645 kmem_free(name, SCSI_MAXNAMELEN);
14646 14646 kmem_free(addr, SCSI_MAXNAMELEN);
14647 14647 return (child);
14648 14648 }
14649 14649
14650 14650 static dev_info_t *
14651 14651 mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy)
14652 14652 {
14653 14653 dev_info_t *child;
14654 14654 char *addr;
14655 14655
14656 14656 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14657 14657 (void) sprintf(addr, "p%x,0", phy);
14658 14658 child = mptsas_find_child(pdip, addr);
14659 14659 kmem_free(addr, SCSI_MAXNAMELEN);
14660 14660 return (child);
14661 14661 }
14662 14662
14663 14663 static mdi_pathinfo_t *
14664 14664 mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy)
14665 14665 {
14666 14666 mdi_pathinfo_t *path;
14667 14667 char *addr = NULL;
14668 14668
14669 14669 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14670 14670 (void) sprintf(addr, "p%x,0", phy);
14671 14671 path = mdi_pi_find(pdip, NULL, addr);
14672 14672 kmem_free(addr, SCSI_MAXNAMELEN);
14673 14673 return (path);
14674 14674 }
14675 14675
14676 14676 static mdi_pathinfo_t *
14677 14677 mptsas_find_path_addr(dev_info_t *parent, uint64_t sasaddr, int lun)
14678 14678 {
14679 14679 mdi_pathinfo_t *path;
14680 14680 char *name = NULL;
14681 14681 char *addr = NULL;
14682 14682
14683 14683 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14684 14684 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14685 14685 (void) sprintf(name, "%016"PRIx64, sasaddr);
14686 14686 (void) sprintf(addr, "w%s,%x", name, lun);
14687 14687 path = mdi_pi_find(parent, NULL, addr);
14688 14688 kmem_free(name, SCSI_MAXNAMELEN);
14689 14689 kmem_free(addr, SCSI_MAXNAMELEN);
14690 14690
14691 14691 return (path);
14692 14692 }
14693 14693
14694 14694 static int
14695 14695 mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
14696 14696 dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
14697 14697 {
14698 14698 int i = 0;
14699 14699 uchar_t *inq83 = NULL;
14700 14700 int inq83_len1 = 0xFF;
14701 14701 int inq83_len = 0;
14702 14702 int rval = DDI_FAILURE;
14703 14703 ddi_devid_t devid;
14704 14704 char *guid = NULL;
14705 14705 int target = ptgt->m_devhdl;
14706 14706 mdi_pathinfo_t *pip = NULL;
14707 14707 mptsas_t *mpt = DIP2MPT(pdip);
14708 14708
14709 14709 /*
14710 14710 * For DVD/CD ROM and tape devices and optical
14711 14711 * devices, we won't try to enumerate them under
14712 14712 * scsi_vhci, so no need to try page83
14713 14713 */
14714 14714 if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
14715 14715 sd_inq->inq_dtype == DTYPE_OPTICAL ||
14716 14716 sd_inq->inq_dtype == DTYPE_ESI))
14717 14717 goto create_lun;
14718 14718
14719 14719 /*
14720 14720 * The LCA returns good SCSI status, but corrupt page 83 data the first
14721 14721 * time it is queried. The solution is to keep trying to request page83
14722 14722 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
14723 14723 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
14724 14724 * give up to get VPD page at this stage and fail the enumeration.
14725 14725 */
14726 14726
14727 14727 inq83 = kmem_zalloc(inq83_len1, KM_SLEEP);
14728 14728
14729 14729 for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
14730 14730 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
14731 14731 inq83_len1, &inq83_len, 1);
14732 14732 if (rval != 0) {
14733 14733 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
14734 14734 "0x83 for target:%x, lun:%x failed!", target, lun);
14735 14735 if (mptsas_physical_bind_failed_page_83 != B_FALSE)
14736 14736 goto create_lun;
14737 14737 goto out;
14738 14738 }
14739 14739 /*
14740 14740 * create DEVID from inquiry data
14741 14741 */
14742 14742 if ((rval = ddi_devid_scsi_encode(
14743 14743 DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
14744 14744 sizeof (struct scsi_inquiry), NULL, 0, inq83,
14745 14745 (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
14746 14746 /*
14747 14747 * extract GUID from DEVID
14748 14748 */
14749 14749 guid = ddi_devid_to_guid(devid);
14750 14750
14751 14751 /*
14752 14752 * Do not enable MPXIO if the strlen(guid) is greater
14753 14753 * than MPTSAS_MAX_GUID_LEN, this constrain would be
14754 14754 * handled by framework later.
14755 14755 */
14756 14756 if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
14757 14757 ddi_devid_free_guid(guid);
14758 14758 guid = NULL;
14759 14759 if (mpt->m_mpxio_enable == TRUE) {
14760 14760 mptsas_log(mpt, CE_NOTE, "!Target:%x, "
14761 14761 "lun:%x doesn't have a valid GUID, "
14762 14762 "multipathing for this drive is "
14763 14763 "not enabled", target, lun);
14764 14764 }
14765 14765 }
14766 14766
14767 14767 /*
14768 14768 * devid no longer needed
14769 14769 */
14770 14770 ddi_devid_free(devid);
14771 14771 break;
14772 14772 } else if (rval == DDI_NOT_WELL_FORMED) {
14773 14773 /*
14774 14774 * return value of ddi_devid_scsi_encode equal to
14775 14775 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
14776 14776 * to retry inquiry page 0x83 and get GUID.
14777 14777 */
14778 14778 NDBG20(("Not well formed devid, retry..."));
14779 14779 delay(1 * drv_usectohz(1000000));
14780 14780 continue;
14781 14781 } else {
14782 14782 mptsas_log(mpt, CE_WARN, "!Encode devid failed for "
14783 14783 "path target:%x, lun:%x", target, lun);
14784 14784 rval = DDI_FAILURE;
14785 14785 goto create_lun;
14786 14786 }
14787 14787 }
14788 14788
14789 14789 if (i == mptsas_inq83_retry_timeout) {
14790 14790 mptsas_log(mpt, CE_WARN, "!Repeated page83 requests timeout "
14791 14791 "for path target:%x, lun:%x", target, lun);
14792 14792 }
14793 14793
14794 14794 rval = DDI_FAILURE;
14795 14795
14796 14796 create_lun:
14797 14797 if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
14798 14798 rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
14799 14799 ptgt, lun);
14800 14800 }
14801 14801 if (rval != DDI_SUCCESS) {
14802 14802 rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
14803 14803 ptgt, lun);
14804 14804
14805 14805 }
14806 14806 out:
14807 14807 if (guid != NULL) {
14808 14808 /*
14809 14809 * guid no longer needed
14810 14810 */
14811 14811 ddi_devid_free_guid(guid);
14812 14812 }
14813 14813 if (inq83 != NULL)
14814 14814 kmem_free(inq83, inq83_len1);
14815 14815 return (rval);
14816 14816 }
14817 14817
14818 14818 static int
14819 14819 mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid,
14820 14820 dev_info_t **lun_dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, int lun)
14821 14821 {
14822 14822 int target;
14823 14823 char *nodename = NULL;
14824 14824 char **compatible = NULL;
14825 14825 int ncompatible = 0;
14826 14826 int mdi_rtn = MDI_FAILURE;
14827 14827 int rval = DDI_FAILURE;
14828 14828 char *old_guid = NULL;
14829 14829 mptsas_t *mpt = DIP2MPT(pdip);
14830 14830 char *lun_addr = NULL;
14831 14831 char *wwn_str = NULL;
14832 14832 char *attached_wwn_str = NULL;
14833 14833 char *component = NULL;
14834 14834 uint8_t phy = 0xFF;
14835 14835 uint64_t sas_wwn;
14836 14836 int64_t lun64 = 0;
14837 14837 uint32_t devinfo;
14838 14838 uint16_t dev_hdl;
14839 14839 uint16_t pdev_hdl;
14840 14840 uint64_t dev_sas_wwn;
14841 14841 uint64_t pdev_sas_wwn;
14842 14842 uint32_t pdev_info;
14843 14843 uint8_t physport;
14844 14844 uint8_t phy_id;
14845 14845 uint32_t page_address;
14846 14846 uint16_t bay_num, enclosure, io_flags;
14847 14847 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
14848 14848 uint32_t dev_info;
14849 14849
14850 14850 mutex_enter(&mpt->m_mutex);
14851 14851 target = ptgt->m_devhdl;
14852 14852 sas_wwn = ptgt->m_addr.mta_wwn;
14853 14853 devinfo = ptgt->m_deviceinfo;
14854 14854 phy = ptgt->m_phynum;
14855 14855 mutex_exit(&mpt->m_mutex);
14856 14856
14857 14857 if (sas_wwn) {
14858 14858 *pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
14859 14859 } else {
14860 14860 *pip = mptsas_find_path_phy(pdip, phy);
14861 14861 }
14862 14862
14863 14863 if (*pip != NULL) {
14864 14864 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
14865 14865 ASSERT(*lun_dip != NULL);
14866 14866 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
14867 14867 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
14868 14868 MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
14869 14869 if (strncmp(guid, old_guid, strlen(guid)) == 0) {
14870 14870 /*
14871 14871 * Same path back online again.
14872 14872 */
14873 14873 (void) ddi_prop_free(old_guid);
14874 14874 if ((!MDI_PI_IS_ONLINE(*pip)) &&
14875 14875 (!MDI_PI_IS_STANDBY(*pip)) &&
14876 14876 (ptgt->m_tgt_unconfigured == 0)) {
14877 14877 rval = mdi_pi_online(*pip, 0);
14878 14878 mutex_enter(&mpt->m_mutex);
14879 14879 ptgt->m_led_status = 0;
14880 14880 (void) mptsas_flush_led_status(mpt,
14881 14881 ptgt);
14882 14882 mutex_exit(&mpt->m_mutex);
14883 14883 } else {
14884 14884 rval = DDI_SUCCESS;
14885 14885 }
14886 14886 if (rval != DDI_SUCCESS) {
14887 14887 mptsas_log(mpt, CE_WARN, "path:target: "
14888 14888 "%x, lun:%x online failed!", target,
14889 14889 lun);
14890 14890 *pip = NULL;
14891 14891 *lun_dip = NULL;
14892 14892 }
14893 14893 return (rval);
14894 14894 } else {
14895 14895 /*
14896 14896 * The GUID of the LUN has changed which maybe
14897 14897 * because customer mapped another volume to the
14898 14898 * same LUN.
14899 14899 */
14900 14900 mptsas_log(mpt, CE_WARN, "The GUID of the "
14901 14901 "target:%x, lun:%x was changed, maybe "
14902 14902 "because someone mapped another volume "
14903 14903 "to the same LUN", target, lun);
14904 14904 (void) ddi_prop_free(old_guid);
14905 14905 if (!MDI_PI_IS_OFFLINE(*pip)) {
14906 14906 rval = mdi_pi_offline(*pip, 0);
14907 14907 if (rval != MDI_SUCCESS) {
14908 14908 mptsas_log(mpt, CE_WARN, "path:"
14909 14909 "target:%x, lun:%x offline "
14910 14910 "failed!", target, lun);
14911 14911 *pip = NULL;
14912 14912 *lun_dip = NULL;
14913 14913 return (DDI_FAILURE);
14914 14914 }
14915 14915 }
14916 14916 if (mdi_pi_free(*pip, 0) != MDI_SUCCESS) {
14917 14917 mptsas_log(mpt, CE_WARN, "path:target:"
14918 14918 "%x, lun:%x free failed!", target,
14919 14919 lun);
14920 14920 *pip = NULL;
14921 14921 *lun_dip = NULL;
14922 14922 return (DDI_FAILURE);
14923 14923 }
14924 14924 }
14925 14925 } else {
14926 14926 mptsas_log(mpt, CE_WARN, "Can't get client-guid "
14927 14927 "property for path:target:%x, lun:%x", target, lun);
14928 14928 *pip = NULL;
14929 14929 *lun_dip = NULL;
14930 14930 return (DDI_FAILURE);
14931 14931 }
14932 14932 }
14933 14933 scsi_hba_nodename_compatible_get(inq, NULL,
14934 14934 inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
14935 14935
14936 14936 /*
14937 14937 * if nodename can't be determined then print a message and skip it
14938 14938 */
14939 14939 if (nodename == NULL) {
14940 14940 mptsas_log(mpt, CE_WARN, "mptsas driver found no compatible "
14941 14941 "driver for target%d lun %d dtype:0x%02x", target, lun,
14942 14942 inq->inq_dtype);
14943 14943 return (DDI_FAILURE);
14944 14944 }
14945 14945
14946 14946 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
14947 14947 /* The property is needed by MPAPI */
14948 14948 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
14949 14949
14950 14950 lun_addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14951 14951 if (guid) {
14952 14952 (void) sprintf(lun_addr, "w%s,%x", wwn_str, lun);
14953 14953 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14954 14954 } else {
14955 14955 (void) sprintf(lun_addr, "p%x,%x", phy, lun);
14956 14956 (void) sprintf(wwn_str, "p%x", phy);
14957 14957 }
14958 14958
14959 14959 mdi_rtn = mdi_pi_alloc_compatible(pdip, nodename,
14960 14960 guid, lun_addr, compatible, ncompatible,
14961 14961 0, pip);
14962 14962 if (mdi_rtn == MDI_SUCCESS) {
14963 14963
14964 14964 if (mdi_prop_update_string(*pip, MDI_GUID,
14965 14965 guid) != DDI_SUCCESS) {
14966 14966 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14967 14967 "create prop for target %d lun %d (MDI_GUID)",
14968 14968 target, lun);
14969 14969 mdi_rtn = MDI_FAILURE;
14970 14970 goto virt_create_done;
14971 14971 }
14972 14972
14973 14973 if (mdi_prop_update_int(*pip, LUN_PROP,
14974 14974 lun) != DDI_SUCCESS) {
14975 14975 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14976 14976 "create prop for target %d lun %d (LUN_PROP)",
14977 14977 target, lun);
14978 14978 mdi_rtn = MDI_FAILURE;
14979 14979 goto virt_create_done;
14980 14980 }
14981 14981 lun64 = (int64_t)lun;
14982 14982 if (mdi_prop_update_int64(*pip, LUN64_PROP,
14983 14983 lun64) != DDI_SUCCESS) {
14984 14984 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14985 14985 "create prop for target %d (LUN64_PROP)",
14986 14986 target);
14987 14987 mdi_rtn = MDI_FAILURE;
14988 14988 goto virt_create_done;
14989 14989 }
14990 14990 if (mdi_prop_update_string_array(*pip, "compatible",
14991 14991 compatible, ncompatible) !=
14992 14992 DDI_PROP_SUCCESS) {
14993 14993 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14994 14994 "create prop for target %d lun %d (COMPATIBLE)",
14995 14995 target, lun);
14996 14996 mdi_rtn = MDI_FAILURE;
14997 14997 goto virt_create_done;
14998 14998 }
14999 14999 if (sas_wwn && (mdi_prop_update_string(*pip,
15000 15000 SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS)) {
15001 15001 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15002 15002 "create prop for target %d lun %d "
15003 15003 "(target-port)", target, lun);
15004 15004 mdi_rtn = MDI_FAILURE;
15005 15005 goto virt_create_done;
15006 15006 } else if ((sas_wwn == 0) && (mdi_prop_update_int(*pip,
15007 15007 "sata-phy", phy) != DDI_PROP_SUCCESS)) {
15008 15008 /*
15009 15009 * Direct attached SATA device without DeviceName
15010 15010 */
15011 15011 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15012 15012 "create prop for SAS target %d lun %d "
15013 15013 "(sata-phy)", target, lun);
15014 15014 mdi_rtn = MDI_FAILURE;
15015 15015 goto virt_create_done;
15016 15016 }
15017 15017 mutex_enter(&mpt->m_mutex);
15018 15018
15019 15019 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15020 15020 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15021 15021 (uint32_t)ptgt->m_devhdl;
15022 15022 rval = mptsas_get_sas_device_page0(mpt, page_address,
15023 15023 &dev_hdl, &dev_sas_wwn, &dev_info, &physport,
15024 15024 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15025 15025 if (rval != DDI_SUCCESS) {
15026 15026 mutex_exit(&mpt->m_mutex);
15027 15027 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15028 15028 "parent device for handle %d", page_address);
15029 15029 mdi_rtn = MDI_FAILURE;
15030 15030 goto virt_create_done;
15031 15031 }
15032 15032
15033 15033 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15034 15034 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
15035 15035 rval = mptsas_get_sas_device_page0(mpt, page_address,
15036 15036 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
15037 15037 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15038 15038 if (rval != DDI_SUCCESS) {
15039 15039 mutex_exit(&mpt->m_mutex);
15040 15040 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
15041 15041 "device info for handle %d", page_address);
15042 15042 mdi_rtn = MDI_FAILURE;
15043 15043 goto virt_create_done;
15044 15044 }
15045 15045
15046 15046 mutex_exit(&mpt->m_mutex);
15047 15047
15048 15048 /*
15049 15049 * If this device direct attached to the controller
15050 15050 * set the attached-port to the base wwid
15051 15051 */
15052 15052 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15053 15053 != DEVINFO_DIRECT_ATTACHED) {
15054 15054 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15055 15055 pdev_sas_wwn);
15056 15056 } else {
15057 15057 /*
15058 15058 * Update the iport's attached-port to guid
15059 15059 */
15060 15060 if (sas_wwn == 0) {
15061 15061 (void) sprintf(wwn_str, "p%x", phy);
15062 15062 } else {
15063 15063 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15064 15064 }
15065 15065 if (ddi_prop_update_string(DDI_DEV_T_NONE,
15066 15066 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15067 15067 DDI_PROP_SUCCESS) {
15068 15068 mptsas_log(mpt, CE_WARN,
15069 15069 "mptsas unable to create "
15070 15070 "property for iport target-port"
15071 15071 " %s (sas_wwn)",
15072 15072 wwn_str);
15073 15073 mdi_rtn = MDI_FAILURE;
15074 15074 goto virt_create_done;
15075 15075 }
15076 15076
15077 15077 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15078 15078 mpt->un.m_base_wwid);
15079 15079 }
15080 15080
15081 15081 if (mdi_prop_update_string(*pip,
15082 15082 SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
15083 15083 DDI_PROP_SUCCESS) {
15084 15084 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15085 15085 "property for iport attached-port %s (sas_wwn)",
15086 15086 attached_wwn_str);
15087 15087 mdi_rtn = MDI_FAILURE;
15088 15088 goto virt_create_done;
15089 15089 }
15090 15090
15091 15091
15092 15092 if (inq->inq_dtype == 0) {
15093 15093 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
15094 15094 /*
15095 15095 * set obp path for pathinfo
15096 15096 */
15097 15097 (void) snprintf(component, MAXPATHLEN,
15098 15098 "disk@%s", lun_addr);
15099 15099
15100 15100 if (mdi_pi_pathname_obp_set(*pip, component) !=
15101 15101 DDI_SUCCESS) {
15102 15102 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
15103 15103 "unable to set obp-path for object %s",
15104 15104 component);
15105 15105 mdi_rtn = MDI_FAILURE;
15106 15106 goto virt_create_done;
15107 15107 }
15108 15108 }
15109 15109
15110 15110 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
15111 15111 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
15112 15112 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
15113 15113 if ((ndi_prop_update_int(DDI_DEV_T_NONE, *lun_dip,
15114 15114 "pm-capable", 1)) !=
15115 15115 DDI_PROP_SUCCESS) {
15116 15116 mptsas_log(mpt, CE_WARN, "mptsas driver"
15117 15117 "failed to create pm-capable "
15118 15118 "property, target %d", target);
15119 15119 mdi_rtn = MDI_FAILURE;
15120 15120 goto virt_create_done;
15121 15121 }
15122 15122 }
15123 15123 /*
15124 15124 * Create the phy-num property
15125 15125 */
15126 15126 if (mdi_prop_update_int(*pip, "phy-num",
15127 15127 ptgt->m_phynum) != DDI_SUCCESS) {
15128 15128 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15129 15129 "create phy-num property for target %d lun %d",
15130 15130 target, lun);
15131 15131 mdi_rtn = MDI_FAILURE;
15132 15132 goto virt_create_done;
15133 15133 }
15134 15134 NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
15135 15135 mdi_rtn = mdi_pi_online(*pip, 0);
15136 15136 if (mdi_rtn == MDI_SUCCESS) {
15137 15137 mutex_enter(&mpt->m_mutex);
15138 15138 ptgt->m_led_status = 0;
15139 15139 (void) mptsas_flush_led_status(mpt, ptgt);
15140 15140 mutex_exit(&mpt->m_mutex);
15141 15141 }
15142 15142 if (mdi_rtn == MDI_NOT_SUPPORTED) {
15143 15143 mdi_rtn = MDI_FAILURE;
15144 15144 }
15145 15145 virt_create_done:
15146 15146 if (*pip && mdi_rtn != MDI_SUCCESS) {
15147 15147 (void) mdi_pi_free(*pip, 0);
15148 15148 *pip = NULL;
15149 15149 *lun_dip = NULL;
15150 15150 }
15151 15151 }
15152 15152
15153 15153 scsi_hba_nodename_compatible_free(nodename, compatible);
15154 15154 if (lun_addr != NULL) {
15155 15155 kmem_free(lun_addr, SCSI_MAXNAMELEN);
15156 15156 }
15157 15157 if (wwn_str != NULL) {
15158 15158 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
15159 15159 }
15160 15160 if (component != NULL) {
15161 15161 kmem_free(component, MAXPATHLEN);
15162 15162 }
15163 15163
15164 15164 return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15165 15165 }
15166 15166
15167 15167 static int
15168 15168 mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq,
15169 15169 char *guid, dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
15170 15170 {
15171 15171 int target;
15172 15172 int rval;
15173 15173 int ndi_rtn = NDI_FAILURE;
15174 15174 uint64_t be_sas_wwn;
15175 15175 char *nodename = NULL;
15176 15176 char **compatible = NULL;
15177 15177 int ncompatible = 0;
15178 15178 int instance = 0;
15179 15179 mptsas_t *mpt = DIP2MPT(pdip);
15180 15180 char *wwn_str = NULL;
15181 15181 char *component = NULL;
15182 15182 char *attached_wwn_str = NULL;
15183 15183 uint8_t phy = 0xFF;
15184 15184 uint64_t sas_wwn;
15185 15185 uint32_t devinfo;
15186 15186 uint16_t dev_hdl;
15187 15187 uint16_t pdev_hdl;
15188 15188 uint64_t pdev_sas_wwn;
15189 15189 uint64_t dev_sas_wwn;
15190 15190 uint32_t pdev_info;
15191 15191 uint8_t physport;
15192 15192 uint8_t phy_id;
15193 15193 uint32_t page_address;
15194 15194 uint16_t bay_num, enclosure, io_flags;
15195 15195 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
15196 15196 uint32_t dev_info;
15197 15197 int64_t lun64 = 0;
15198 15198
15199 15199 mutex_enter(&mpt->m_mutex);
15200 15200 target = ptgt->m_devhdl;
15201 15201 sas_wwn = ptgt->m_addr.mta_wwn;
15202 15202 devinfo = ptgt->m_deviceinfo;
15203 15203 phy = ptgt->m_phynum;
15204 15204 mutex_exit(&mpt->m_mutex);
15205 15205
15206 15206 /*
15207 15207 * generate compatible property with binding-set "mpt"
15208 15208 */
15209 15209 scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL,
15210 15210 &nodename, &compatible, &ncompatible);
15211 15211
15212 15212 /*
15213 15213 * if nodename can't be determined then print a message and skip it
15214 15214 */
15215 15215 if (nodename == NULL) {
15216 15216 mptsas_log(mpt, CE_WARN, "mptsas found no compatible driver "
15217 15217 "for target %d lun %d", target, lun);
15218 15218 return (DDI_FAILURE);
15219 15219 }
15220 15220
15221 15221 ndi_rtn = ndi_devi_alloc(pdip, nodename,
15222 15222 DEVI_SID_NODEID, lun_dip);
15223 15223
15224 15224 /*
15225 15225 * if lun alloc success, set props
15226 15226 */
15227 15227 if (ndi_rtn == NDI_SUCCESS) {
15228 15228
15229 15229 if (ndi_prop_update_int(DDI_DEV_T_NONE,
15230 15230 *lun_dip, LUN_PROP, lun) !=
15231 15231 DDI_PROP_SUCCESS) {
15232 15232 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15233 15233 "property for target %d lun %d (LUN_PROP)",
15234 15234 target, lun);
15235 15235 ndi_rtn = NDI_FAILURE;
15236 15236 goto phys_create_done;
15237 15237 }
15238 15238
15239 15239 lun64 = (int64_t)lun;
15240 15240 if (ndi_prop_update_int64(DDI_DEV_T_NONE,
15241 15241 *lun_dip, LUN64_PROP, lun64) !=
15242 15242 DDI_PROP_SUCCESS) {
15243 15243 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15244 15244 "property for target %d lun64 %d (LUN64_PROP)",
15245 15245 target, lun);
15246 15246 ndi_rtn = NDI_FAILURE;
15247 15247 goto phys_create_done;
15248 15248 }
15249 15249 if (ndi_prop_update_string_array(DDI_DEV_T_NONE,
15250 15250 *lun_dip, "compatible", compatible, ncompatible)
15251 15251 != DDI_PROP_SUCCESS) {
15252 15252 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15253 15253 "property for target %d lun %d (COMPATIBLE)",
15254 15254 target, lun);
15255 15255 ndi_rtn = NDI_FAILURE;
15256 15256 goto phys_create_done;
15257 15257 }
15258 15258
15259 15259 /*
15260 15260 * We need the SAS WWN for non-multipath devices, so
15261 15261 * we'll use the same property as that multipathing
15262 15262 * devices need to present for MPAPI. If we don't have
15263 15263 * a WWN (e.g. parallel SCSI), don't create the prop.
15264 15264 */
15265 15265 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
15266 15266 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15267 15267 if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE,
15268 15268 *lun_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str)
15269 15269 != DDI_PROP_SUCCESS) {
15270 15270 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15271 15271 "create property for SAS target %d lun %d "
15272 15272 "(target-port)", target, lun);
15273 15273 ndi_rtn = NDI_FAILURE;
15274 15274 goto phys_create_done;
15275 15275 }
15276 15276
15277 15277 be_sas_wwn = BE_64(sas_wwn);
15278 15278 if (sas_wwn && ndi_prop_update_byte_array(
15279 15279 DDI_DEV_T_NONE, *lun_dip, "port-wwn",
15280 15280 (uchar_t *)&be_sas_wwn, 8) != DDI_PROP_SUCCESS) {
15281 15281 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15282 15282 "create property for SAS target %d lun %d "
15283 15283 "(port-wwn)", target, lun);
15284 15284 ndi_rtn = NDI_FAILURE;
15285 15285 goto phys_create_done;
15286 15286 } else if ((sas_wwn == 0) && (ndi_prop_update_int(
15287 15287 DDI_DEV_T_NONE, *lun_dip, "sata-phy", phy) !=
15288 15288 DDI_PROP_SUCCESS)) {
15289 15289 /*
15290 15290 * Direct attached SATA device without DeviceName
15291 15291 */
15292 15292 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15293 15293 "create property for SAS target %d lun %d "
15294 15294 "(sata-phy)", target, lun);
15295 15295 ndi_rtn = NDI_FAILURE;
15296 15296 goto phys_create_done;
15297 15297 }
15298 15298
15299 15299 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15300 15300 *lun_dip, SAS_PROP) != DDI_PROP_SUCCESS) {
15301 15301 mptsas_log(mpt, CE_WARN, "mptsas unable to"
15302 15302 "create property for SAS target %d lun %d"
15303 15303 " (SAS_PROP)", target, lun);
15304 15304 ndi_rtn = NDI_FAILURE;
15305 15305 goto phys_create_done;
15306 15306 }
15307 15307 if (guid && (ndi_prop_update_string(DDI_DEV_T_NONE,
15308 15308 *lun_dip, NDI_GUID, guid) != DDI_SUCCESS)) {
15309 15309 mptsas_log(mpt, CE_WARN, "mptsas unable "
15310 15310 "to create guid property for target %d "
15311 15311 "lun %d", target, lun);
15312 15312 ndi_rtn = NDI_FAILURE;
15313 15313 goto phys_create_done;
15314 15314 }
15315 15315
15316 15316 /*
15317 15317 * The following code is to set properties for SM-HBA support,
15318 15318 * it doesn't apply to RAID volumes
15319 15319 */
15320 15320 if (ptgt->m_addr.mta_phymask == 0)
15321 15321 goto phys_raid_lun;
15322 15322
15323 15323 mutex_enter(&mpt->m_mutex);
15324 15324
15325 15325 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15326 15326 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15327 15327 (uint32_t)ptgt->m_devhdl;
15328 15328 rval = mptsas_get_sas_device_page0(mpt, page_address,
15329 15329 &dev_hdl, &dev_sas_wwn, &dev_info,
15330 15330 &physport, &phy_id, &pdev_hdl,
15331 15331 &bay_num, &enclosure, &io_flags);
15332 15332 if (rval != DDI_SUCCESS) {
15333 15333 mutex_exit(&mpt->m_mutex);
15334 15334 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
15335 15335 "parent device for handle %d.", page_address);
15336 15336 ndi_rtn = NDI_FAILURE;
15337 15337 goto phys_create_done;
15338 15338 }
15339 15339
15340 15340 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15341 15341 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
15342 15342 rval = mptsas_get_sas_device_page0(mpt, page_address,
15343 15343 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
15344 15344 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15345 15345 if (rval != DDI_SUCCESS) {
15346 15346 mutex_exit(&mpt->m_mutex);
15347 15347 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15348 15348 "device for handle %d.", page_address);
15349 15349 ndi_rtn = NDI_FAILURE;
15350 15350 goto phys_create_done;
15351 15351 }
15352 15352
15353 15353 mutex_exit(&mpt->m_mutex);
15354 15354
15355 15355 /*
15356 15356 * If this device direct attached to the controller
15357 15357 * set the attached-port to the base wwid
15358 15358 */
15359 15359 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15360 15360 != DEVINFO_DIRECT_ATTACHED) {
15361 15361 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15362 15362 pdev_sas_wwn);
15363 15363 } else {
15364 15364 /*
15365 15365 * Update the iport's attached-port to guid
15366 15366 */
15367 15367 if (sas_wwn == 0) {
15368 15368 (void) sprintf(wwn_str, "p%x", phy);
15369 15369 } else {
15370 15370 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15371 15371 }
15372 15372 if (ddi_prop_update_string(DDI_DEV_T_NONE,
15373 15373 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15374 15374 DDI_PROP_SUCCESS) {
15375 15375 mptsas_log(mpt, CE_WARN,
15376 15376 "mptsas unable to create "
15377 15377 "property for iport target-port"
15378 15378 " %s (sas_wwn)",
15379 15379 wwn_str);
15380 15380 ndi_rtn = NDI_FAILURE;
15381 15381 goto phys_create_done;
15382 15382 }
15383 15383
15384 15384 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15385 15385 mpt->un.m_base_wwid);
15386 15386 }
15387 15387
15388 15388 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15389 15389 *lun_dip, SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
15390 15390 DDI_PROP_SUCCESS) {
15391 15391 mptsas_log(mpt, CE_WARN,
15392 15392 "mptsas unable to create "
15393 15393 "property for iport attached-port %s (sas_wwn)",
15394 15394 attached_wwn_str);
15395 15395 ndi_rtn = NDI_FAILURE;
15396 15396 goto phys_create_done;
15397 15397 }
15398 15398
15399 15399 if (IS_SATA_DEVICE(dev_info)) {
15400 15400 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15401 15401 *lun_dip, MPTSAS_VARIANT, "sata") !=
15402 15402 DDI_PROP_SUCCESS) {
15403 15403 mptsas_log(mpt, CE_WARN,
15404 15404 "mptsas unable to create "
15405 15405 "property for device variant ");
15406 15406 ndi_rtn = NDI_FAILURE;
15407 15407 goto phys_create_done;
15408 15408 }
15409 15409 }
15410 15410
15411 15411 if (IS_ATAPI_DEVICE(dev_info)) {
15412 15412 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15413 15413 *lun_dip, MPTSAS_VARIANT, "atapi") !=
15414 15414 DDI_PROP_SUCCESS) {
15415 15415 mptsas_log(mpt, CE_WARN,
15416 15416 "mptsas unable to create "
15417 15417 "property for device variant ");
15418 15418 ndi_rtn = NDI_FAILURE;
15419 15419 goto phys_create_done;
15420 15420 }
15421 15421 }
15422 15422
15423 15423 phys_raid_lun:
15424 15424 /*
15425 15425 * if this is a SAS controller, and the target is a SATA
15426 15426 * drive, set the 'pm-capable' property for sd and if on
15427 15427 * an OPL platform, also check if this is an ATAPI
15428 15428 * device.
15429 15429 */
15430 15430 instance = ddi_get_instance(mpt->m_dip);
15431 15431 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
15432 15432 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
15433 15433 NDBG2(("mptsas%d: creating pm-capable property, "
15434 15434 "target %d", instance, target));
15435 15435
15436 15436 if ((ndi_prop_update_int(DDI_DEV_T_NONE,
15437 15437 *lun_dip, "pm-capable", 1)) !=
15438 15438 DDI_PROP_SUCCESS) {
15439 15439 mptsas_log(mpt, CE_WARN, "mptsas "
15440 15440 "failed to create pm-capable "
15441 15441 "property, target %d", target);
15442 15442 ndi_rtn = NDI_FAILURE;
15443 15443 goto phys_create_done;
15444 15444 }
15445 15445
15446 15446 }
15447 15447
15448 15448 if ((inq->inq_dtype == 0) || (inq->inq_dtype == 5)) {
15449 15449 /*
15450 15450 * add 'obp-path' properties for devinfo
15451 15451 */
15452 15452 bzero(wwn_str, sizeof (wwn_str));
15453 15453 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
15454 15454 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
15455 15455 if (guid) {
15456 15456 (void) snprintf(component, MAXPATHLEN,
15457 15457 "disk@w%s,%x", wwn_str, lun);
15458 15458 } else {
15459 15459 (void) snprintf(component, MAXPATHLEN,
15460 15460 "disk@p%x,%x", phy, lun);
15461 15461 }
15462 15462 if (ddi_pathname_obp_set(*lun_dip, component)
15463 15463 != DDI_SUCCESS) {
15464 15464 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
15465 15465 "unable to set obp-path for SAS "
15466 15466 "object %s", component);
15467 15467 ndi_rtn = NDI_FAILURE;
15468 15468 goto phys_create_done;
15469 15469 }
15470 15470 }
15471 15471 /*
15472 15472 * Create the phy-num property for non-raid disk
15473 15473 */
15474 15474 if (ptgt->m_addr.mta_phymask != 0) {
15475 15475 if (ndi_prop_update_int(DDI_DEV_T_NONE,
15476 15476 *lun_dip, "phy-num", ptgt->m_phynum) !=
15477 15477 DDI_PROP_SUCCESS) {
15478 15478 mptsas_log(mpt, CE_WARN, "mptsas driver "
15479 15479 "failed to create phy-num property for "
15480 15480 "target %d", target);
15481 15481 ndi_rtn = NDI_FAILURE;
15482 15482 goto phys_create_done;
15483 15483 }
15484 15484 }
15485 15485 phys_create_done:
15486 15486 /*
15487 15487 * If props were setup ok, online the lun
15488 15488 */
15489 15489 if (ndi_rtn == NDI_SUCCESS) {
15490 15490 /*
15491 15491 * Try to online the new node
15492 15492 */
15493 15493 ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
15494 15494 }
15495 15495 if (ndi_rtn == NDI_SUCCESS) {
15496 15496 mutex_enter(&mpt->m_mutex);
15497 15497 ptgt->m_led_status = 0;
15498 15498 (void) mptsas_flush_led_status(mpt, ptgt);
15499 15499 mutex_exit(&mpt->m_mutex);
15500 15500 }
15501 15501
15502 15502 /*
15503 15503 * If success set rtn flag, else unwire alloc'd lun
15504 15504 */
15505 15505 if (ndi_rtn != NDI_SUCCESS) {
15506 15506 NDBG12(("mptsas driver unable to online "
15507 15507 "target %d lun %d", target, lun));
15508 15508 ndi_prop_remove_all(*lun_dip);
15509 15509 (void) ndi_devi_free(*lun_dip);
15510 15510 *lun_dip = NULL;
15511 15511 }
15512 15512 }
15513 15513
15514 15514 scsi_hba_nodename_compatible_free(nodename, compatible);
15515 15515
15516 15516 if (wwn_str != NULL) {
15517 15517 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
15518 15518 }
15519 15519 if (component != NULL) {
15520 15520 kmem_free(component, MAXPATHLEN);
15521 15521 }
15522 15522
15523 15523
15524 15524 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15525 15525 }
15526 15526
15527 15527 static int
15528 15528 mptsas_probe_smp(dev_info_t *pdip, uint64_t wwn)
15529 15529 {
15530 15530 mptsas_t *mpt = DIP2MPT(pdip);
15531 15531 struct smp_device smp_sd;
15532 15532
15533 15533 /* XXX An HBA driver should not be allocating an smp_device. */
15534 15534 bzero(&smp_sd, sizeof (struct smp_device));
15535 15535 smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran;
15536 15536 bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE);
15537 15537
15538 15538 if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS)
15539 15539 return (NDI_FAILURE);
15540 15540 return (NDI_SUCCESS);
15541 15541 }
15542 15542
15543 15543 static int
15544 15544 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
15545 15545 {
15546 15546 mptsas_t *mpt = DIP2MPT(pdip);
15547 15547 mptsas_smp_t *psmp = NULL;
15548 15548 int rval;
15549 15549 int phymask;
15550 15550
15551 15551 /*
15552 15552 * Get the physical port associated to the iport
15553 15553 * PHYMASK TODO
15554 15554 */
15555 15555 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
15556 15556 "phymask", 0);
15557 15557 /*
15558 15558 * Find the smp node in hash table with specified sas address and
15559 15559 * physical port
15560 15560 */
15561 15561 psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
15562 15562 if (psmp == NULL) {
15563 15563 return (DDI_FAILURE);
15564 15564 }
15565 15565
15566 15566 rval = mptsas_online_smp(pdip, psmp, smp_dip);
15567 15567
15568 15568 return (rval);
15569 15569 }
15570 15570
15571 15571 static int
15572 15572 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
15573 15573 dev_info_t **smp_dip)
15574 15574 {
15575 15575 char wwn_str[MPTSAS_WWN_STRLEN];
15576 15576 char attached_wwn_str[MPTSAS_WWN_STRLEN];
15577 15577 int ndi_rtn = NDI_FAILURE;
15578 15578 int rval = 0;
15579 15579 mptsas_smp_t dev_info;
15580 15580 uint32_t page_address;
15581 15581 mptsas_t *mpt = DIP2MPT(pdip);
15582 15582 uint16_t dev_hdl;
15583 15583 uint64_t sas_wwn;
15584 15584 uint64_t smp_sas_wwn;
15585 15585 uint8_t physport;
15586 15586 uint8_t phy_id;
15587 15587 uint16_t pdev_hdl;
15588 15588 uint8_t numphys = 0;
15589 15589 uint16_t i = 0;
15590 15590 char phymask[MPTSAS_MAX_PHYS];
15591 15591 char *iport = NULL;
15592 15592 mptsas_phymask_t phy_mask = 0;
15593 15593 uint16_t attached_devhdl;
15594 15594 uint16_t bay_num, enclosure, io_flags;
15595 15595
15596 15596 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
15597 15597
15598 15598 /*
15599 15599 * Probe smp device, prevent the node of removed device from being
15600 15600 * configured succesfully
15601 15601 */
15602 15602 if (mptsas_probe_smp(pdip, smp_node->m_addr.mta_wwn) != NDI_SUCCESS) {
15603 15603 return (DDI_FAILURE);
15604 15604 }
15605 15605
15606 15606 if ((*smp_dip = mptsas_find_smp_child(pdip, wwn_str)) != NULL) {
15607 15607 return (DDI_SUCCESS);
15608 15608 }
15609 15609
15610 15610 ndi_rtn = ndi_devi_alloc(pdip, "smp", DEVI_SID_NODEID, smp_dip);
15611 15611
15612 15612 /*
15613 15613 * if lun alloc success, set props
15614 15614 */
15615 15615 if (ndi_rtn == NDI_SUCCESS) {
15616 15616 /*
15617 15617 * Set the flavor of the child to be SMP flavored
15618 15618 */
15619 15619 ndi_flavor_set(*smp_dip, SCSA_FLAVOR_SMP);
15620 15620
15621 15621 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15622 15622 *smp_dip, SMP_WWN, wwn_str) !=
15623 15623 DDI_PROP_SUCCESS) {
15624 15624 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15625 15625 "property for smp device %s (sas_wwn)",
15626 15626 wwn_str);
15627 15627 ndi_rtn = NDI_FAILURE;
15628 15628 goto smp_create_done;
15629 15629 }
15630 15630 (void) sprintf(wwn_str, "w%"PRIx64, smp_node->m_addr.mta_wwn);
15631 15631 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15632 15632 *smp_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str) !=
15633 15633 DDI_PROP_SUCCESS) {
15634 15634 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15635 15635 "property for iport target-port %s (sas_wwn)",
15636 15636 wwn_str);
15637 15637 ndi_rtn = NDI_FAILURE;
15638 15638 goto smp_create_done;
15639 15639 }
15640 15640
15641 15641 mutex_enter(&mpt->m_mutex);
15642 15642
15643 15643 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
15644 15644 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | smp_node->m_devhdl;
15645 15645 rval = mptsas_get_sas_expander_page0(mpt, page_address,
15646 15646 &dev_info);
15647 15647 if (rval != DDI_SUCCESS) {
15648 15648 mutex_exit(&mpt->m_mutex);
15649 15649 mptsas_log(mpt, CE_WARN,
15650 15650 "mptsas unable to get expander "
15651 15651 "parent device info for %x", page_address);
15652 15652 ndi_rtn = NDI_FAILURE;
15653 15653 goto smp_create_done;
15654 15654 }
15655 15655
15656 15656 smp_node->m_pdevhdl = dev_info.m_pdevhdl;
15657 15657 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15658 15658 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15659 15659 (uint32_t)dev_info.m_pdevhdl;
15660 15660 rval = mptsas_get_sas_device_page0(mpt, page_address,
15661 15661 &dev_hdl, &sas_wwn, &smp_node->m_pdevinfo, &physport,
15662 15662 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15663 15663 if (rval != DDI_SUCCESS) {
15664 15664 mutex_exit(&mpt->m_mutex);
15665 15665 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15666 15666 "device info for %x", page_address);
15667 15667 ndi_rtn = NDI_FAILURE;
15668 15668 goto smp_create_done;
15669 15669 }
15670 15670
15671 15671 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15672 15672 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15673 15673 (uint32_t)dev_info.m_devhdl;
15674 15674 rval = mptsas_get_sas_device_page0(mpt, page_address,
15675 15675 &dev_hdl, &smp_sas_wwn, &smp_node->m_deviceinfo,
15676 15676 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure,
15677 15677 &io_flags);
15678 15678 if (rval != DDI_SUCCESS) {
15679 15679 mutex_exit(&mpt->m_mutex);
15680 15680 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15681 15681 "device info for %x", page_address);
15682 15682 ndi_rtn = NDI_FAILURE;
15683 15683 goto smp_create_done;
15684 15684 }
15685 15685 mutex_exit(&mpt->m_mutex);
15686 15686
15687 15687 /*
15688 15688 * If this smp direct attached to the controller
15689 15689 * set the attached-port to the base wwid
15690 15690 */
15691 15691 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15692 15692 != DEVINFO_DIRECT_ATTACHED) {
15693 15693 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
15694 15694 sas_wwn);
15695 15695 } else {
15696 15696 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
15697 15697 mpt->un.m_base_wwid);
15698 15698 }
15699 15699
15700 15700 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15701 15701 *smp_dip, SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwn_str) !=
15702 15702 DDI_PROP_SUCCESS) {
15703 15703 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15704 15704 "property for smp attached-port %s (sas_wwn)",
15705 15705 attached_wwn_str);
15706 15706 ndi_rtn = NDI_FAILURE;
15707 15707 goto smp_create_done;
15708 15708 }
15709 15709
15710 15710 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15711 15711 *smp_dip, SMP_PROP) != DDI_PROP_SUCCESS) {
15712 15712 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15713 15713 "create property for SMP %s (SMP_PROP) ",
15714 15714 wwn_str);
15715 15715 ndi_rtn = NDI_FAILURE;
15716 15716 goto smp_create_done;
15717 15717 }
15718 15718
15719 15719 /*
15720 15720 * check the smp to see whether it direct
15721 15721 * attached to the controller
15722 15722 */
15723 15723 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15724 15724 != DEVINFO_DIRECT_ATTACHED) {
15725 15725 goto smp_create_done;
15726 15726 }
15727 15727 numphys = ddi_prop_get_int(DDI_DEV_T_ANY, pdip,
15728 15728 DDI_PROP_DONTPASS, MPTSAS_NUM_PHYS, -1);
15729 15729 if (numphys > 0) {
15730 15730 goto smp_create_done;
15731 15731 }
15732 15732 /*
15733 15733 * this iport is an old iport, we need to
15734 15734 * reconfig the props for it.
15735 15735 */
15736 15736 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
15737 15737 MPTSAS_VIRTUAL_PORT, 0) !=
15738 15738 DDI_PROP_SUCCESS) {
15739 15739 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
15740 15740 MPTSAS_VIRTUAL_PORT);
15741 15741 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
15742 15742 "prop update failed");
15743 15743 goto smp_create_done;
15744 15744 }
15745 15745
15746 15746 mutex_enter(&mpt->m_mutex);
15747 15747 numphys = 0;
15748 15748 iport = ddi_get_name_addr(pdip);
15749 15749 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15750 15750 bzero(phymask, sizeof (phymask));
15751 15751 (void) sprintf(phymask,
15752 15752 "%x", mpt->m_phy_info[i].phy_mask);
15753 15753 if (strcmp(phymask, iport) == 0) {
15754 15754 phy_mask = mpt->m_phy_info[i].phy_mask;
15755 15755 break;
15756 15756 }
15757 15757 }
15758 15758
15759 15759 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15760 15760 if ((phy_mask >> i) & 0x01) {
15761 15761 numphys++;
15762 15762 }
15763 15763 }
15764 15764 /*
15765 15765 * Update PHY info for smhba
15766 15766 */
15767 15767 if (mptsas_smhba_phy_init(mpt)) {
15768 15768 mutex_exit(&mpt->m_mutex);
15769 15769 mptsas_log(mpt, CE_WARN, "mptsas phy update "
15770 15770 "failed");
15771 15771 goto smp_create_done;
15772 15772 }
15773 15773 mutex_exit(&mpt->m_mutex);
15774 15774
15775 15775 mptsas_smhba_set_all_phy_props(mpt, pdip, numphys, phy_mask,
15776 15776 &attached_devhdl);
15777 15777
15778 15778 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
15779 15779 MPTSAS_NUM_PHYS, numphys) !=
15780 15780 DDI_PROP_SUCCESS) {
15781 15781 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
15782 15782 MPTSAS_NUM_PHYS);
15783 15783 mptsas_log(mpt, CE_WARN, "mptsas update "
15784 15784 "num phys props failed");
15785 15785 goto smp_create_done;
15786 15786 }
15787 15787 /*
15788 15788 * Add parent's props for SMHBA support
15789 15789 */
15790 15790 if (ddi_prop_update_string(DDI_DEV_T_NONE, pdip,
15791 15791 SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15792 15792 DDI_PROP_SUCCESS) {
15793 15793 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
15794 15794 SCSI_ADDR_PROP_ATTACHED_PORT);
15795 15795 mptsas_log(mpt, CE_WARN, "mptsas update iport"
15796 15796 "attached-port failed");
15797 15797 goto smp_create_done;
15798 15798 }
15799 15799
15800 15800 smp_create_done:
15801 15801 /*
15802 15802 * If props were setup ok, online the lun
15803 15803 */
15804 15804 if (ndi_rtn == NDI_SUCCESS) {
15805 15805 /*
15806 15806 * Try to online the new node
15807 15807 */
15808 15808 ndi_rtn = ndi_devi_online(*smp_dip, NDI_ONLINE_ATTACH);
15809 15809 }
15810 15810
15811 15811 /*
15812 15812 * If success set rtn flag, else unwire alloc'd lun
15813 15813 */
15814 15814 if (ndi_rtn != NDI_SUCCESS) {
15815 15815 NDBG12(("mptsas unable to online "
15816 15816 "SMP target %s", wwn_str));
15817 15817 ndi_prop_remove_all(*smp_dip);
15818 15818 (void) ndi_devi_free(*smp_dip);
15819 15819 }
15820 15820 }
15821 15821
15822 15822 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15823 15823 }
15824 15824
15825 15825 /* smp transport routine */
15826 15826 static int mptsas_smp_start(struct smp_pkt *smp_pkt)
15827 15827 {
15828 15828 uint64_t wwn;
15829 15829 Mpi2SmpPassthroughRequest_t req;
15830 15830 Mpi2SmpPassthroughReply_t rep;
15831 15831 uint32_t direction = 0;
15832 15832 mptsas_t *mpt;
15833 15833 int ret;
15834 15834 uint64_t tmp64;
15835 15835
15836 15836 mpt = (mptsas_t *)smp_pkt->smp_pkt_address->
15837 15837 smp_a_hba_tran->smp_tran_hba_private;
15838 15838
15839 15839 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
15840 15840 /*
15841 15841 * Need to compose a SMP request message
15842 15842 * and call mptsas_do_passthru() function
15843 15843 */
15844 15844 bzero(&req, sizeof (req));
15845 15845 bzero(&rep, sizeof (rep));
15846 15846 req.PassthroughFlags = 0;
15847 15847 req.PhysicalPort = 0xff;
15848 15848 req.ChainOffset = 0;
15849 15849 req.Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
15850 15850
15851 15851 if ((smp_pkt->smp_pkt_reqsize & 0xffff0000ul) != 0) {
15852 15852 smp_pkt->smp_pkt_reason = ERANGE;
15853 15853 return (DDI_FAILURE);
15854 15854 }
15855 15855 req.RequestDataLength = LE_16((uint16_t)(smp_pkt->smp_pkt_reqsize - 4));
15856 15856
15857 15857 req.MsgFlags = 0;
15858 15858 tmp64 = LE_64(wwn);
15859 15859 bcopy(&tmp64, &req.SASAddress, SAS_WWN_BYTE_SIZE);
15860 15860 if (smp_pkt->smp_pkt_rspsize > 0) {
15861 15861 direction |= MPTSAS_PASS_THRU_DIRECTION_READ;
15862 15862 }
15863 15863 if (smp_pkt->smp_pkt_reqsize > 0) {
15864 15864 direction |= MPTSAS_PASS_THRU_DIRECTION_WRITE;
15865 15865 }
15866 15866
15867 15867 mutex_enter(&mpt->m_mutex);
15868 15868 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep,
15869 15869 (uint8_t *)smp_pkt->smp_pkt_rsp,
15870 15870 offsetof(Mpi2SmpPassthroughRequest_t, SGL), sizeof (rep),
15871 15871 smp_pkt->smp_pkt_rspsize - 4, direction,
15872 15872 (uint8_t *)smp_pkt->smp_pkt_req, smp_pkt->smp_pkt_reqsize - 4,
15873 15873 smp_pkt->smp_pkt_timeout, FKIOCTL);
15874 15874 mutex_exit(&mpt->m_mutex);
15875 15875 if (ret != 0) {
15876 15876 cmn_err(CE_WARN, "smp_start do passthru error %d", ret);
15877 15877 smp_pkt->smp_pkt_reason = (uchar_t)(ret);
15878 15878 return (DDI_FAILURE);
15879 15879 }
15880 15880 /* do passthrough success, check the smp status */
15881 15881 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
15882 15882 switch (LE_16(rep.IOCStatus)) {
15883 15883 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
15884 15884 smp_pkt->smp_pkt_reason = ENODEV;
15885 15885 break;
15886 15886 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
15887 15887 smp_pkt->smp_pkt_reason = EOVERFLOW;
15888 15888 break;
15889 15889 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
15890 15890 smp_pkt->smp_pkt_reason = EIO;
15891 15891 break;
15892 15892 default:
15893 15893 mptsas_log(mpt, CE_NOTE, "smp_start: get unknown ioc"
15894 15894 "status:%x", LE_16(rep.IOCStatus));
15895 15895 smp_pkt->smp_pkt_reason = EIO;
15896 15896 break;
15897 15897 }
15898 15898 return (DDI_FAILURE);
15899 15899 }
15900 15900 if (rep.SASStatus != MPI2_SASSTATUS_SUCCESS) {
15901 15901 mptsas_log(mpt, CE_NOTE, "smp_start: get error SAS status:%x",
15902 15902 rep.SASStatus);
15903 15903 smp_pkt->smp_pkt_reason = EIO;
15904 15904 return (DDI_FAILURE);
15905 15905 }
15906 15906
15907 15907 return (DDI_SUCCESS);
15908 15908 }
15909 15909
15910 15910 /*
15911 15911 * If we didn't get a match, we need to get sas page0 for each device, and
15912 15912 * untill we get a match. If failed, return NULL
15913 15913 */
15914 15914 static mptsas_target_t *
15915 15915 mptsas_phy_to_tgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint8_t phy)
15916 15916 {
15917 15917 int i, j = 0;
15918 15918 int rval = 0;
15919 15919 uint16_t cur_handle;
15920 15920 uint32_t page_address;
15921 15921 mptsas_target_t *ptgt = NULL;
15922 15922
15923 15923 /*
15924 15924 * PHY named device must be direct attached and attaches to
15925 15925 * narrow port, if the iport is not parent of the device which
15926 15926 * we are looking for.
15927 15927 */
15928 15928 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15929 15929 if ((1 << i) & phymask)
15930 15930 j++;
15931 15931 }
15932 15932
15933 15933 if (j > 1)
15934 15934 return (NULL);
15935 15935
15936 15936 /*
15937 15937 * Must be a narrow port and single device attached to the narrow port
15938 15938 * So the physical port num of device which is equal to the iport's
15939 15939 * port num is the device what we are looking for.
15940 15940 */
15941 15941
15942 15942 if (mpt->m_phy_info[phy].phy_mask != phymask)
15943 15943 return (NULL);
15944 15944
15945 15945 mutex_enter(&mpt->m_mutex);
15946 15946
15947 15947 ptgt = refhash_linear_search(mpt->m_targets, mptsas_target_eval_nowwn,
15948 15948 &phy);
15949 15949 if (ptgt != NULL) {
15950 15950 mutex_exit(&mpt->m_mutex);
15951 15951 return (ptgt);
15952 15952 }
15953 15953
15954 15954 if (mpt->m_done_traverse_dev) {
15955 15955 mutex_exit(&mpt->m_mutex);
15956 15956 return (NULL);
15957 15957 }
15958 15958
15959 15959 /* If didn't get a match, come here */
15960 15960 cur_handle = mpt->m_dev_handle;
15961 15961 for (; ; ) {
15962 15962 ptgt = NULL;
15963 15963 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
15964 15964 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)cur_handle;
15965 15965 rval = mptsas_get_target_device_info(mpt, page_address,
15966 15966 &cur_handle, &ptgt);
15967 15967 if ((rval == DEV_INFO_FAIL_PAGE0) ||
15968 15968 (rval == DEV_INFO_FAIL_ALLOC)) {
15969 15969 break;
15970 15970 }
15971 15971 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
15972 15972 (rval == DEV_INFO_PHYS_DISK)) {
15973 15973 continue;
15974 15974 }
15975 15975 mpt->m_dev_handle = cur_handle;
15976 15976
15977 15977 if ((ptgt->m_addr.mta_wwn == 0) && (ptgt->m_phynum == phy)) {
15978 15978 break;
15979 15979 }
15980 15980 }
15981 15981
15982 15982 mutex_exit(&mpt->m_mutex);
15983 15983 return (ptgt);
15984 15984 }
15985 15985
15986 15986 /*
15987 15987 * The ptgt->m_addr.mta_wwn contains the wwid for each disk.
15988 15988 * For Raid volumes, we need to check m_raidvol[x].m_raidwwid
15989 15989 * If we didn't get a match, we need to get sas page0 for each device, and
15990 15990 * untill we get a match
15991 15991 * If failed, return NULL
15992 15992 */
15993 15993 static mptsas_target_t *
15994 15994 mptsas_wwid_to_ptgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
15995 15995 {
15996 15996 int rval = 0;
15997 15997 uint16_t cur_handle;
15998 15998 uint32_t page_address;
15999 15999 mptsas_target_t *tmp_tgt = NULL;
16000 16000 mptsas_target_addr_t addr;
16001 16001
16002 16002 addr.mta_wwn = wwid;
16003 16003 addr.mta_phymask = phymask;
16004 16004 mutex_enter(&mpt->m_mutex);
16005 16005 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
16006 16006 if (tmp_tgt != NULL) {
16007 16007 mutex_exit(&mpt->m_mutex);
16008 16008 return (tmp_tgt);
16009 16009 }
16010 16010
16011 16011 if (phymask == 0) {
16012 16012 /*
16013 16013 * It's IR volume
16014 16014 */
16015 16015 rval = mptsas_get_raid_info(mpt);
16016 16016 if (rval) {
16017 16017 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
16018 16018 }
16019 16019 mutex_exit(&mpt->m_mutex);
16020 16020 return (tmp_tgt);
16021 16021 }
16022 16022
16023 16023 if (mpt->m_done_traverse_dev) {
16024 16024 mutex_exit(&mpt->m_mutex);
16025 16025 return (NULL);
16026 16026 }
16027 16027
16028 16028 /* If didn't get a match, come here */
16029 16029 cur_handle = mpt->m_dev_handle;
16030 16030 for (;;) {
16031 16031 tmp_tgt = NULL;
16032 16032 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
16033 16033 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | cur_handle;
16034 16034 rval = mptsas_get_target_device_info(mpt, page_address,
16035 16035 &cur_handle, &tmp_tgt);
16036 16036 if ((rval == DEV_INFO_FAIL_PAGE0) ||
16037 16037 (rval == DEV_INFO_FAIL_ALLOC)) {
16038 16038 tmp_tgt = NULL;
16039 16039 break;
16040 16040 }
16041 16041 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
16042 16042 (rval == DEV_INFO_PHYS_DISK)) {
16043 16043 continue;
16044 16044 }
16045 16045 mpt->m_dev_handle = cur_handle;
16046 16046 if ((tmp_tgt->m_addr.mta_wwn) &&
16047 16047 (tmp_tgt->m_addr.mta_wwn == wwid) &&
16048 16048 (tmp_tgt->m_addr.mta_phymask == phymask)) {
16049 16049 break;
16050 16050 }
16051 16051 }
16052 16052
16053 16053 mutex_exit(&mpt->m_mutex);
16054 16054 return (tmp_tgt);
16055 16055 }
16056 16056
16057 16057 static mptsas_smp_t *
16058 16058 mptsas_wwid_to_psmp(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
16059 16059 {
16060 16060 int rval = 0;
16061 16061 uint16_t cur_handle;
16062 16062 uint32_t page_address;
16063 16063 mptsas_smp_t smp_node, *psmp = NULL;
16064 16064 mptsas_target_addr_t addr;
16065 16065
16066 16066 addr.mta_wwn = wwid;
16067 16067 addr.mta_phymask = phymask;
16068 16068 mutex_enter(&mpt->m_mutex);
16069 16069 psmp = refhash_lookup(mpt->m_smp_targets, &addr);
16070 16070 if (psmp != NULL) {
16071 16071 mutex_exit(&mpt->m_mutex);
16072 16072 return (psmp);
16073 16073 }
16074 16074
16075 16075 if (mpt->m_done_traverse_smp) {
16076 16076 mutex_exit(&mpt->m_mutex);
16077 16077 return (NULL);
16078 16078 }
16079 16079
16080 16080 /* If didn't get a match, come here */
16081 16081 cur_handle = mpt->m_smp_devhdl;
16082 16082 for (;;) {
16083 16083 psmp = NULL;
16084 16084 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
16085 16085 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)cur_handle;
16086 16086 rval = mptsas_get_sas_expander_page0(mpt, page_address,
16087 16087 &smp_node);
16088 16088 if (rval != DDI_SUCCESS) {
16089 16089 break;
16090 16090 }
16091 16091 mpt->m_smp_devhdl = cur_handle = smp_node.m_devhdl;
16092 16092 psmp = mptsas_smp_alloc(mpt, &smp_node);
16093 16093 ASSERT(psmp);
16094 16094 if ((psmp->m_addr.mta_wwn) && (psmp->m_addr.mta_wwn == wwid) &&
16095 16095 (psmp->m_addr.mta_phymask == phymask)) {
16096 16096 break;
16097 16097 }
16098 16098 }
16099 16099
16100 16100 mutex_exit(&mpt->m_mutex);
16101 16101 return (psmp);
16102 16102 }
16103 16103
16104 16104 mptsas_target_t *
16105 16105 mptsas_tgt_alloc(mptsas_t *mpt, uint16_t devhdl, uint64_t wwid,
16106 16106 uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum)
16107 16107 {
16108 16108 mptsas_target_t *tmp_tgt = NULL;
16109 16109 mptsas_target_addr_t addr;
16110 16110
16111 16111 addr.mta_wwn = wwid;
16112 16112 addr.mta_phymask = phymask;
16113 16113 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
16114 16114 if (tmp_tgt != NULL) {
16115 16115 NDBG20(("Hash item already exist"));
16116 16116 tmp_tgt->m_deviceinfo = devinfo;
16117 16117 tmp_tgt->m_devhdl = devhdl; /* XXX - duplicate? */
16118 16118 return (tmp_tgt);
16119 16119 }
16120 16120 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
16121 16121 if (tmp_tgt == NULL) {
16122 16122 cmn_err(CE_WARN, "Fatal, allocated tgt failed");
16123 16123 return (NULL);
16124 16124 }
16125 16125 tmp_tgt->m_devhdl = devhdl;
16126 16126 tmp_tgt->m_addr.mta_wwn = wwid;
16127 16127 tmp_tgt->m_deviceinfo = devinfo;
16128 16128 tmp_tgt->m_addr.mta_phymask = phymask;
16129 16129 tmp_tgt->m_phynum = phynum;
16130 16130 /* Initialized the tgt structure */
16131 16131 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
16132 16132 tmp_tgt->m_qfull_retry_interval =
16133 16133 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
16134 16134 tmp_tgt->m_t_throttle = MAX_THROTTLE;
16135 16135 TAILQ_INIT(&tmp_tgt->m_active_cmdq);
16136 16136
16137 16137 refhash_insert(mpt->m_targets, tmp_tgt);
16138 16138
16139 16139 return (tmp_tgt);
16140 16140 }
16141 16141
16142 16142 static void
16143 16143 mptsas_smp_target_copy(mptsas_smp_t *src, mptsas_smp_t *dst)
16144 16144 {
16145 16145 dst->m_devhdl = src->m_devhdl;
16146 16146 dst->m_deviceinfo = src->m_deviceinfo;
16147 16147 dst->m_pdevhdl = src->m_pdevhdl;
16148 16148 dst->m_pdevinfo = src->m_pdevinfo;
16149 16149 }
16150 16150
16151 16151 static mptsas_smp_t *
16152 16152 mptsas_smp_alloc(mptsas_t *mpt, mptsas_smp_t *data)
16153 16153 {
16154 16154 mptsas_target_addr_t addr;
16155 16155 mptsas_smp_t *ret_data;
16156 16156
16157 16157 addr.mta_wwn = data->m_addr.mta_wwn;
16158 16158 addr.mta_phymask = data->m_addr.mta_phymask;
16159 16159 ret_data = refhash_lookup(mpt->m_smp_targets, &addr);
16160 16160 /*
16161 16161 * If there's already a matching SMP target, update its fields
16162 16162 * in place. Since the address is not changing, it's safe to do
16163 16163 * this. We cannot just bcopy() here because the structure we've
16164 16164 * been given has invalid hash links.
16165 16165 */
16166 16166 if (ret_data != NULL) {
16167 16167 mptsas_smp_target_copy(data, ret_data);
16168 16168 return (ret_data);
16169 16169 }
16170 16170
16171 16171 ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
16172 16172 bcopy(data, ret_data, sizeof (mptsas_smp_t));
16173 16173 refhash_insert(mpt->m_smp_targets, ret_data);
16174 16174 return (ret_data);
16175 16175 }
16176 16176
16177 16177 /*
16178 16178 * Functions for SGPIO LED support
16179 16179 */
16180 16180 static dev_info_t *
16181 16181 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
16182 16182 {
16183 16183 dev_info_t *dip;
16184 16184 int prop;
16185 16185 dip = e_ddi_hold_devi_by_dev(dev, 0);
16186 16186 if (dip == NULL)
16187 16187 return (dip);
16188 16188 prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
16189 16189 "phymask", 0);
16190 16190 *phymask = (mptsas_phymask_t)prop;
16191 16191 ddi_release_devi(dip);
16192 16192 return (dip);
16193 16193 }
16194 16194 static mptsas_target_t *
16195 16195 mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask)
16196 16196 {
16197 16197 uint8_t phynum;
16198 16198 uint64_t wwn;
16199 16199 int lun;
16200 16200 mptsas_target_t *ptgt = NULL;
16201 16201
16202 16202 if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) {
16203 16203 return (NULL);
16204 16204 }
16205 16205 if (addr[0] == 'w') {
16206 16206 ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn);
16207 16207 } else {
16208 16208 ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum);
16209 16209 }
16210 16210 return (ptgt);
16211 16211 }
16212 16212
16213 16213 static int
16214 16214 mptsas_flush_led_status(mptsas_t *mpt, mptsas_target_t *ptgt)
16215 16215 {
16216 16216 uint32_t slotstatus = 0;
16217 16217
16218 16218 /* Build an MPI2 Slot Status based on our view of the world */
16219 16219 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_IDENT - 1)))
16220 16220 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST;
16221 16221 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_FAIL - 1)))
16222 16222 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT;
16223 16223 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1)))
16224 16224 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
16225 16225
16226 16226 /* Write it to the controller */
16227 16227 NDBG14(("mptsas_ioctl: set LED status %x for slot %x",
16228 16228 slotstatus, ptgt->m_slot_num));
16229 16229 return (mptsas_send_sep(mpt, ptgt, &slotstatus,
16230 16230 MPI2_SEP_REQ_ACTION_WRITE_STATUS));
16231 16231 }
16232 16232
16233 16233 /*
16234 16234 * send sep request, use enclosure/slot addressing
16235 16235 */
16236 16236 static int
16237 16237 mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
16238 16238 uint32_t *status, uint8_t act)
16239 16239 {
16240 16240 Mpi2SepRequest_t req;
16241 16241 Mpi2SepReply_t rep;
16242 16242 int ret;
16243 16243
16244 16244 ASSERT(mutex_owned(&mpt->m_mutex));
16245 16245
16246 16246 /*
16247 16247 * We only support SEP control of directly-attached targets, in which
16248 16248 * case the "SEP" we're talking to is a virtual one contained within
16249 16249 * the HBA itself. This is necessary because DA targets typically have
16250 16250 * no other mechanism for LED control. Targets for which a separate
16251 16251 * enclosure service processor exists should be controlled via ses(7d)
16252 16252 * or sgen(7d). Furthermore, since such requests can time out, they
16253 16253 * should be made in user context rather than in response to
16254 16254 * asynchronous fabric changes.
16255 16255 *
16256 16256 * In addition, we do not support this operation for RAID volumes,
16257 16257 * since there is no slot associated with them.
16258 16258 */
16259 16259 if (!(ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED) ||
16260 16260 ptgt->m_addr.mta_phymask == 0) {
16261 16261 return (ENOTTY);
16262 16262 }
16263 16263
16264 16264 bzero(&req, sizeof (req));
16265 16265 bzero(&rep, sizeof (rep));
16266 16266
16267 16267 req.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
16268 16268 req.Action = act;
16269 16269 req.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
16270 16270 req.EnclosureHandle = LE_16(ptgt->m_enclosure);
16271 16271 req.Slot = LE_16(ptgt->m_slot_num);
16272 16272 if (act == MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16273 16273 req.SlotStatus = LE_32(*status);
16274 16274 }
16275 16275 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
16276 16276 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
16277 16277 if (ret != 0) {
16278 16278 mptsas_log(mpt, CE_NOTE, "mptsas_send_sep: passthru SEP "
16279 16279 "Processor Request message error %d", ret);
16280 16280 return (ret);
16281 16281 }
16282 16282 /* do passthrough success, check the ioc status */
16283 16283 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
16284 16284 mptsas_log(mpt, CE_NOTE, "send_sep act %x: ioc "
16285 16285 "status:%x loginfo %x", act, LE_16(rep.IOCStatus),
16286 16286 LE_32(rep.IOCLogInfo));
16287 16287 switch (LE_16(rep.IOCStatus) & MPI2_IOCSTATUS_MASK) {
16288 16288 case MPI2_IOCSTATUS_INVALID_FUNCTION:
16289 16289 case MPI2_IOCSTATUS_INVALID_VPID:
16290 16290 case MPI2_IOCSTATUS_INVALID_FIELD:
16291 16291 case MPI2_IOCSTATUS_INVALID_STATE:
16292 16292 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
16293 16293 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
16294 16294 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
16295 16295 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
16296 16296 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
16297 16297 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
16298 16298 return (EINVAL);
16299 16299 case MPI2_IOCSTATUS_BUSY:
16300 16300 return (EBUSY);
16301 16301 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
16302 16302 return (EAGAIN);
16303 16303 case MPI2_IOCSTATUS_INVALID_SGL:
16304 16304 case MPI2_IOCSTATUS_INTERNAL_ERROR:
16305 16305 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
16306 16306 default:
16307 16307 return (EIO);
16308 16308 }
16309 16309 }
16310 16310 if (act != MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16311 16311 *status = LE_32(rep.SlotStatus);
16312 16312 }
16313 16313
16314 16314 return (0);
16315 16315 }
16316 16316
16317 16317 int
16318 16318 mptsas_dma_addr_create(mptsas_t *mpt, ddi_dma_attr_t dma_attr,
16319 16319 ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp, caddr_t *dma_memp,
16320 16320 uint32_t alloc_size, ddi_dma_cookie_t *cookiep)
16321 16321 {
16322 16322 ddi_dma_cookie_t new_cookie;
16323 16323 size_t alloc_len;
16324 16324 uint_t ncookie;
16325 16325
16326 16326 if (cookiep == NULL)
16327 16327 cookiep = &new_cookie;
16328 16328
16329 16329 if (ddi_dma_alloc_handle(mpt->m_dip, &dma_attr, DDI_DMA_SLEEP,
16330 16330 NULL, dma_hdp) != DDI_SUCCESS) {
16331 16331 return (FALSE);
16332 16332 }
16333 16333
16334 16334 if (ddi_dma_mem_alloc(*dma_hdp, alloc_size, &mpt->m_dev_acc_attr,
16335 16335 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, dma_memp, &alloc_len,
16336 16336 acc_hdp) != DDI_SUCCESS) {
16337 16337 ddi_dma_free_handle(dma_hdp);
16338 16338 return (FALSE);
16339 16339 }
16340 16340
16341 16341 if (ddi_dma_addr_bind_handle(*dma_hdp, NULL, *dma_memp, alloc_len,
16342 16342 (DDI_DMA_RDWR | DDI_DMA_CONSISTENT), DDI_DMA_SLEEP, NULL,
16343 16343 cookiep, &ncookie) != DDI_DMA_MAPPED) {
16344 16344 (void) ddi_dma_mem_free(acc_hdp);
16345 16345 ddi_dma_free_handle(dma_hdp);
16346 16346 return (FALSE);
16347 16347 }
16348 16348
16349 16349 return (TRUE);
16350 16350 }
16351 16351
16352 16352 void
16353 16353 mptsas_dma_addr_destroy(ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp)
16354 16354 {
16355 16355 if (*dma_hdp == NULL)
16356 16356 return;
16357 16357
16358 16358 (void) ddi_dma_unbind_handle(*dma_hdp);
16359 16359 (void) ddi_dma_mem_free(acc_hdp);
16360 16360 ddi_dma_free_handle(dma_hdp);
16361 16361 }
↓ open down ↓ |
2028 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX