Print this page
9702 HBA drivers don't need the redundant devfs_clean step
Reviewed by: Dan Fields <dan.fields@nexenta.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c
+++ new/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright 2016 Nexenta Systems, Inc. All rights reserved.
25 25 * Copyright (c) 2017, Joyent, Inc.
26 26 * Copyright 2014 OmniTI Computer Consulting, Inc. All rights reserved.
27 27 * Copyright (c) 2014, Tegile Systems Inc. All rights reserved.
28 28 */
29 29
30 30 /*
31 31 * Copyright (c) 2000 to 2010, LSI Corporation.
32 32 * All rights reserved.
33 33 *
34 34 * Redistribution and use in source and binary forms of all code within
35 35 * this file that is exclusively owned by LSI, with or without
36 36 * modification, is permitted provided that, in addition to the CDDL 1.0
37 37 * License requirements, the following conditions are met:
38 38 *
39 39 * Neither the name of the author nor the names of its contributors may be
40 40 * used to endorse or promote products derived from this software without
41 41 * specific prior written permission.
42 42 *
43 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
46 46 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
47 47 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
48 48 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
49 49 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
50 50 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
51 51 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
52 52 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 54 * DAMAGE.
55 55 */
56 56
57 57 /*
58 58 * mptsas - This is a driver based on LSI Logic's MPT2.0/2.5 interface.
59 59 *
60 60 */
61 61
62 62 #if defined(lint) || defined(DEBUG)
63 63 #define MPTSAS_DEBUG
64 64 #endif
65 65
66 66 /*
67 67 * standard header files.
68 68 */
69 69 #include <sys/note.h>
70 70 #include <sys/scsi/scsi.h>
71 71 #include <sys/pci.h>
72 72 #include <sys/file.h>
73 73 #include <sys/policy.h>
74 74 #include <sys/model.h>
75 75 #include <sys/sysevent.h>
76 76 #include <sys/sysevent/eventdefs.h>
77 77 #include <sys/sysevent/dr.h>
78 78 #include <sys/sata/sata_defs.h>
79 79 #include <sys/sata/sata_hba.h>
80 80 #include <sys/scsi/generic/sas.h>
81 81 #include <sys/scsi/impl/scsi_sas.h>
82 82
83 83 #pragma pack(1)
84 84 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
85 85 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
86 86 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
87 87 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
88 88 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
89 89 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
90 90 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
91 91 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
92 92 #pragma pack()
93 93
94 94 /*
↓ open down ↓ |
94 lines elided |
↑ open up ↑ |
95 95 * private header files.
96 96 *
97 97 */
98 98 #include <sys/scsi/impl/scsi_reset_notify.h>
99 99 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
100 100 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
101 101 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
102 102 #include <sys/scsi/adapters/mpt_sas/mptsas_hash.h>
103 103 #include <sys/raidioctl.h>
104 104
105 -#include <sys/fs/dv_node.h> /* devfs_clean */
106 -
107 105 /*
108 106 * FMA header files
109 107 */
110 108 #include <sys/ddifm.h>
111 109 #include <sys/fm/protocol.h>
112 110 #include <sys/fm/util.h>
113 111 #include <sys/fm/io/ddi.h>
114 112
115 113 /*
116 114 * autoconfiguration data and routines.
117 115 */
118 116 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
119 117 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
120 118 static int mptsas_power(dev_info_t *dip, int component, int level);
121 119
122 120 /*
123 121 * cb_ops function
124 122 */
125 123 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
126 124 cred_t *credp, int *rval);
127 125 #ifdef __sparc
128 126 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd);
129 127 #else /* __sparc */
130 128 static int mptsas_quiesce(dev_info_t *devi);
131 129 #endif /* __sparc */
132 130
133 131 /*
134 132 * Resource initilaization for hardware
135 133 */
136 134 static void mptsas_setup_cmd_reg(mptsas_t *mpt);
137 135 static void mptsas_disable_bus_master(mptsas_t *mpt);
138 136 static void mptsas_hba_fini(mptsas_t *mpt);
139 137 static void mptsas_cfg_fini(mptsas_t *mptsas_blkp);
140 138 static int mptsas_hba_setup(mptsas_t *mpt);
141 139 static void mptsas_hba_teardown(mptsas_t *mpt);
142 140 static int mptsas_config_space_init(mptsas_t *mpt);
143 141 static void mptsas_config_space_fini(mptsas_t *mpt);
144 142 static void mptsas_iport_register(mptsas_t *mpt);
145 143 static int mptsas_smp_setup(mptsas_t *mpt);
146 144 static void mptsas_smp_teardown(mptsas_t *mpt);
147 145 static int mptsas_enc_setup(mptsas_t *mpt);
148 146 static void mptsas_enc_teardown(mptsas_t *mpt);
149 147 static int mptsas_cache_create(mptsas_t *mpt);
150 148 static void mptsas_cache_destroy(mptsas_t *mpt);
151 149 static int mptsas_alloc_request_frames(mptsas_t *mpt);
152 150 static int mptsas_alloc_sense_bufs(mptsas_t *mpt);
153 151 static int mptsas_alloc_reply_frames(mptsas_t *mpt);
154 152 static int mptsas_alloc_free_queue(mptsas_t *mpt);
155 153 static int mptsas_alloc_post_queue(mptsas_t *mpt);
156 154 static void mptsas_alloc_reply_args(mptsas_t *mpt);
157 155 static int mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
158 156 static void mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
159 157 static int mptsas_init_chip(mptsas_t *mpt, int first_time);
160 158 static void mptsas_update_hashtab(mptsas_t *mpt);
161 159
162 160 /*
163 161 * SCSA function prototypes
164 162 */
165 163 static int mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
166 164 static int mptsas_scsi_reset(struct scsi_address *ap, int level);
167 165 static int mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
168 166 static int mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
169 167 static int mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
170 168 int tgtonly);
171 169 static void mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
172 170 static struct scsi_pkt *mptsas_scsi_init_pkt(struct scsi_address *ap,
173 171 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
174 172 int tgtlen, int flags, int (*callback)(), caddr_t arg);
175 173 static void mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
176 174 static void mptsas_scsi_destroy_pkt(struct scsi_address *ap,
177 175 struct scsi_pkt *pkt);
178 176 static int mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
179 177 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
180 178 static void mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
181 179 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
182 180 static int mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
183 181 void (*callback)(caddr_t), caddr_t arg);
184 182 static int mptsas_get_name(struct scsi_device *sd, char *name, int len);
185 183 static int mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len);
186 184 static int mptsas_scsi_quiesce(dev_info_t *dip);
187 185 static int mptsas_scsi_unquiesce(dev_info_t *dip);
188 186 static int mptsas_bus_config(dev_info_t *pdip, uint_t flags,
189 187 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
190 188
191 189 /*
192 190 * SMP functions
193 191 */
194 192 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
195 193
196 194 /*
197 195 * internal function prototypes.
198 196 */
199 197 static void mptsas_list_add(mptsas_t *mpt);
200 198 static void mptsas_list_del(mptsas_t *mpt);
201 199
202 200 static int mptsas_quiesce_bus(mptsas_t *mpt);
203 201 static int mptsas_unquiesce_bus(mptsas_t *mpt);
204 202
205 203 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
206 204 static void mptsas_free_handshake_msg(mptsas_t *mpt);
207 205
208 206 static void mptsas_ncmds_checkdrain(void *arg);
209 207
210 208 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
211 209 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
212 210 static int mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
213 211 static void mptsas_accept_tx_waitq(mptsas_t *mpt);
214 212
215 213 static int mptsas_do_detach(dev_info_t *dev);
216 214 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
217 215 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
218 216 struct scsi_pkt *pkt);
219 217 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp);
220 218
221 219 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
222 220 static void mptsas_handle_event(void *args);
223 221 static int mptsas_handle_event_sync(void *args);
224 222 static void mptsas_handle_dr(void *args);
225 223 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
226 224 dev_info_t *pdip);
227 225
228 226 static void mptsas_restart_cmd(void *);
229 227
230 228 static void mptsas_flush_hba(mptsas_t *mpt);
231 229 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
232 230 uint8_t tasktype);
233 231 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
234 232 uchar_t reason, uint_t stat);
235 233
236 234 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
237 235 static void mptsas_process_intr(mptsas_t *mpt,
238 236 pMpi2ReplyDescriptorsUnion_t reply_desc_union);
239 237 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
240 238 pMpi2ReplyDescriptorsUnion_t reply_desc);
241 239 static void mptsas_handle_address_reply(mptsas_t *mpt,
242 240 pMpi2ReplyDescriptorsUnion_t reply_desc);
243 241 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
244 242 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
245 243 uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
246 244
247 245 static void mptsas_watch(void *arg);
248 246 static void mptsas_watchsubr(mptsas_t *mpt);
249 247 static void mptsas_cmd_timeout(mptsas_t *mpt, mptsas_target_t *ptgt);
250 248
251 249 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
252 250 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
253 251 uint8_t *data, uint32_t request_size, uint32_t reply_size,
254 252 uint32_t data_size, uint32_t direction, uint8_t *dataout,
255 253 uint32_t dataout_size, short timeout, int mode);
256 254 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
257 255
258 256 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt,
259 257 uint32_t unique_id);
260 258 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd);
261 259 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt,
262 260 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
263 261 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt,
264 262 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
265 263 uint32_t diag_type);
266 264 static int mptsas_diag_register(mptsas_t *mpt,
267 265 mptsas_fw_diag_register_t *diag_register, uint32_t *return_code);
268 266 static int mptsas_diag_unregister(mptsas_t *mpt,
269 267 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
270 268 static int mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
271 269 uint32_t *return_code);
272 270 static int mptsas_diag_read_buffer(mptsas_t *mpt,
273 271 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
274 272 uint32_t *return_code, int ioctl_mode);
275 273 static int mptsas_diag_release(mptsas_t *mpt,
276 274 mptsas_fw_diag_release_t *diag_release, uint32_t *return_code);
277 275 static int mptsas_do_diag_action(mptsas_t *mpt, uint32_t action,
278 276 uint8_t *diag_action, uint32_t length, uint32_t *return_code,
279 277 int ioctl_mode);
280 278 static int mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *data,
281 279 int mode);
282 280
283 281 static int mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
284 282 int cmdlen, int tgtlen, int statuslen, int kf);
285 283 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
286 284
287 285 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
288 286 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
289 287
290 288 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
291 289 int kmflags);
292 290 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
293 291
294 292 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
295 293 mptsas_cmd_t *cmd);
296 294 static void mptsas_check_task_mgt(mptsas_t *mpt,
297 295 pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
298 296 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
299 297 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
300 298 int *resid);
301 299
302 300 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
303 301 static void mptsas_free_active_slots(mptsas_t *mpt);
304 302 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
305 303
306 304 static void mptsas_restart_hba(mptsas_t *mpt);
307 305 static void mptsas_restart_waitq(mptsas_t *mpt);
308 306
309 307 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
310 308 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);
311 309 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
312 310
313 311 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
314 312 static void mptsas_doneq_empty(mptsas_t *mpt);
315 313 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
316 314
317 315 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
318 316 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
319 317 static mptsas_cmd_t *mptsas_tx_waitq_rm(mptsas_t *mpt);
320 318 static void mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
321 319
322 320
323 321 static void mptsas_start_watch_reset_delay();
324 322 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
325 323 static void mptsas_watch_reset_delay(void *arg);
326 324 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
327 325
328 326 /*
329 327 * helper functions
330 328 */
331 329 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
332 330
333 331 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
334 332 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
335 333 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
336 334 int lun);
337 335 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
338 336 int lun);
339 337 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
340 338 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
341 339
342 340 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
343 341 int *lun);
344 342 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
345 343
346 344 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt,
347 345 mptsas_phymask_t phymask, uint8_t phy);
348 346 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt,
349 347 mptsas_phymask_t phymask, uint64_t wwid);
350 348 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt,
351 349 mptsas_phymask_t phymask, uint64_t wwid);
352 350
353 351 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
354 352 uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
355 353
356 354 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
357 355 uint16_t *handle, mptsas_target_t **pptgt);
358 356 static void mptsas_update_phymask(mptsas_t *mpt);
359 357
360 358 static int mptsas_flush_led_status(mptsas_t *mpt, mptsas_enclosure_t *mep,
361 359 uint16_t idx);
362 360 static int mptsas_send_sep(mptsas_t *mpt, mptsas_enclosure_t *mep, uint16_t idx,
363 361 uint32_t *status, uint8_t cmd);
364 362 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
365 363 mptsas_phymask_t *phymask);
366 364 static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr,
367 365 mptsas_phymask_t phymask);
368 366
369 367
370 368 /*
371 369 * Enumeration / DR functions
372 370 */
373 371 static void mptsas_config_all(dev_info_t *pdip);
374 372 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
375 373 dev_info_t **lundip);
376 374 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
377 375 dev_info_t **lundip);
378 376
379 377 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
380 378 static int mptsas_offline_target(dev_info_t *pdip, char *name);
381 379
382 380 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
383 381 dev_info_t **dip);
384 382
385 383 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
386 384 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
387 385 dev_info_t **dip, mptsas_target_t *ptgt);
388 386
389 387 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
↓ open down ↓ |
273 lines elided |
↑ open up ↑ |
390 388 dev_info_t **dip, mptsas_target_t *ptgt, int lun);
391 389
392 390 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
393 391 char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
394 392 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
395 393 char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
396 394 int lun);
397 395
398 396 static void mptsas_offline_missed_luns(dev_info_t *pdip,
399 397 uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
400 -static int mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
401 - mdi_pathinfo_t *rpip, uint_t flags);
398 +static int mptsas_offline_lun(dev_info_t *rdip, mdi_pathinfo_t *rpip);
402 399
403 400 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
404 401 dev_info_t **smp_dip);
405 -static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
406 - uint_t flags);
402 +static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node);
407 403
408 404 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
409 405 int mode, int *rval);
410 406 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
411 407 int mode, int *rval);
412 408 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
413 409 int mode, int *rval);
414 410 static void mptsas_record_event(void *args);
415 411 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
416 412 int mode);
417 413
418 414 mptsas_target_t *mptsas_tgt_alloc(refhash_t *, uint16_t, uint64_t,
419 415 uint32_t, mptsas_phymask_t, uint8_t);
420 416 static mptsas_smp_t *mptsas_smp_alloc(mptsas_t *, mptsas_smp_t *);
421 417 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
422 418 dev_info_t **smp_dip);
423 419
424 420 /*
425 421 * Power management functions
426 422 */
427 423 static int mptsas_get_pci_cap(mptsas_t *mpt);
428 424 static int mptsas_init_pm(mptsas_t *mpt);
429 425
430 426 /*
431 427 * MPT MSI tunable:
432 428 *
433 429 * By default MSI is enabled on all supported platforms.
434 430 */
435 431 boolean_t mptsas_enable_msi = B_TRUE;
436 432 boolean_t mptsas_physical_bind_failed_page_83 = B_FALSE;
437 433
438 434 /*
439 435 * Global switch for use of MPI2.5 FAST PATH.
440 436 * We don't really know what FAST PATH actually does, so if it is suspected
441 437 * to cause problems it can be turned off by setting this variable to B_FALSE.
442 438 */
443 439 boolean_t mptsas_use_fastpath = B_TRUE;
444 440
445 441 static int mptsas_register_intrs(mptsas_t *);
446 442 static void mptsas_unregister_intrs(mptsas_t *);
447 443 static int mptsas_add_intrs(mptsas_t *, int);
448 444 static void mptsas_rem_intrs(mptsas_t *);
449 445
450 446 /*
451 447 * FMA Prototypes
452 448 */
453 449 static void mptsas_fm_init(mptsas_t *mpt);
454 450 static void mptsas_fm_fini(mptsas_t *mpt);
455 451 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
456 452
457 453 extern pri_t minclsyspri, maxclsyspri;
458 454
459 455 /*
460 456 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
461 457 * under this device that the paths to a physical device are created when
462 458 * MPxIO is used.
463 459 */
464 460 extern dev_info_t *scsi_vhci_dip;
465 461
466 462 /*
467 463 * Tunable timeout value for Inquiry VPD page 0x83
468 464 * By default the value is 30 seconds.
469 465 */
470 466 int mptsas_inq83_retry_timeout = 30;
471 467
472 468 /*
473 469 * This is used to allocate memory for message frame storage, not for
474 470 * data I/O DMA. All message frames must be stored in the first 4G of
475 471 * physical memory.
476 472 */
477 473 ddi_dma_attr_t mptsas_dma_attrs = {
478 474 DMA_ATTR_V0, /* attribute layout version */
479 475 0x0ull, /* address low - should be 0 (longlong) */
480 476 0xffffffffull, /* address high - 32-bit max range */
481 477 0x00ffffffull, /* count max - max DMA object size */
482 478 4, /* allocation alignment requirements */
483 479 0x78, /* burstsizes - binary encoded values */
484 480 1, /* minxfer - gran. of DMA engine */
485 481 0x00ffffffull, /* maxxfer - gran. of DMA engine */
486 482 0xffffffffull, /* max segment size (DMA boundary) */
487 483 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
488 484 512, /* granularity - device transfer size */
489 485 0 /* flags, set to 0 */
490 486 };
491 487
492 488 /*
493 489 * This is used for data I/O DMA memory allocation. (full 64-bit DMA
494 490 * physical addresses are supported.)
495 491 */
496 492 ddi_dma_attr_t mptsas_dma_attrs64 = {
497 493 DMA_ATTR_V0, /* attribute layout version */
498 494 0x0ull, /* address low - should be 0 (longlong) */
499 495 0xffffffffffffffffull, /* address high - 64-bit max */
500 496 0x00ffffffull, /* count max - max DMA object size */
501 497 4, /* allocation alignment requirements */
502 498 0x78, /* burstsizes - binary encoded values */
503 499 1, /* minxfer - gran. of DMA engine */
504 500 0x00ffffffull, /* maxxfer - gran. of DMA engine */
505 501 0xffffffffull, /* max segment size (DMA boundary) */
506 502 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
507 503 512, /* granularity - device transfer size */
508 504 0 /* flags, set to 0 */
509 505 };
510 506
511 507 ddi_device_acc_attr_t mptsas_dev_attr = {
512 508 DDI_DEVICE_ATTR_V1,
513 509 DDI_STRUCTURE_LE_ACC,
514 510 DDI_STRICTORDER_ACC,
515 511 DDI_DEFAULT_ACC
516 512 };
517 513
518 514 static struct cb_ops mptsas_cb_ops = {
519 515 scsi_hba_open, /* open */
520 516 scsi_hba_close, /* close */
521 517 nodev, /* strategy */
522 518 nodev, /* print */
523 519 nodev, /* dump */
524 520 nodev, /* read */
525 521 nodev, /* write */
526 522 mptsas_ioctl, /* ioctl */
527 523 nodev, /* devmap */
528 524 nodev, /* mmap */
529 525 nodev, /* segmap */
530 526 nochpoll, /* chpoll */
531 527 ddi_prop_op, /* cb_prop_op */
532 528 NULL, /* streamtab */
533 529 D_MP, /* cb_flag */
534 530 CB_REV, /* rev */
535 531 nodev, /* aread */
536 532 nodev /* awrite */
537 533 };
538 534
539 535 static struct dev_ops mptsas_ops = {
540 536 DEVO_REV, /* devo_rev, */
541 537 0, /* refcnt */
542 538 ddi_no_info, /* info */
543 539 nulldev, /* identify */
544 540 nulldev, /* probe */
545 541 mptsas_attach, /* attach */
546 542 mptsas_detach, /* detach */
547 543 #ifdef __sparc
548 544 mptsas_reset,
549 545 #else
550 546 nodev, /* reset */
551 547 #endif /* __sparc */
552 548 &mptsas_cb_ops, /* driver operations */
553 549 NULL, /* bus operations */
554 550 mptsas_power, /* power management */
555 551 #ifdef __sparc
556 552 ddi_quiesce_not_needed
557 553 #else
558 554 mptsas_quiesce /* quiesce */
559 555 #endif /* __sparc */
560 556 };
561 557
562 558
563 559 #define MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24"
564 560
565 561 static struct modldrv modldrv = {
566 562 &mod_driverops, /* Type of module. This one is a driver */
567 563 MPTSAS_MOD_STRING, /* Name of the module. */
568 564 &mptsas_ops, /* driver ops */
569 565 };
570 566
571 567 static struct modlinkage modlinkage = {
572 568 MODREV_1, &modldrv, NULL
573 569 };
574 570 #define TARGET_PROP "target"
575 571 #define LUN_PROP "lun"
576 572 #define LUN64_PROP "lun64"
577 573 #define SAS_PROP "sas-mpt"
578 574 #define MDI_GUID "wwn"
579 575 #define NDI_GUID "guid"
580 576 #define MPTSAS_DEV_GONE "mptsas_dev_gone"
581 577
582 578 /*
583 579 * Local static data
584 580 */
585 581 #if defined(MPTSAS_DEBUG)
586 582 /*
587 583 * Flags to indicate which debug messages are to be printed and which go to the
588 584 * debug log ring buffer. Default is to not print anything, and to log
589 585 * everything except the watchsubr() output which normally happens every second.
590 586 */
591 587 uint32_t mptsas_debugprt_flags = 0x0;
592 588 uint32_t mptsas_debuglog_flags = ~(1U << 30);
593 589 #endif /* defined(MPTSAS_DEBUG) */
594 590 uint32_t mptsas_debug_resets = 0;
595 591
596 592 static kmutex_t mptsas_global_mutex;
597 593 static void *mptsas_state; /* soft state ptr */
598 594 static krwlock_t mptsas_global_rwlock;
599 595
600 596 static kmutex_t mptsas_log_mutex;
601 597 static char mptsas_log_buf[256];
602 598 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex, mptsas_log_buf))
603 599
604 600 static mptsas_t *mptsas_head, *mptsas_tail;
605 601 static clock_t mptsas_scsi_watchdog_tick;
606 602 static clock_t mptsas_tick;
607 603 static timeout_id_t mptsas_reset_watch;
608 604 static timeout_id_t mptsas_timeout_id;
609 605 static int mptsas_timeouts_enabled = 0;
610 606
611 607 /*
612 608 * Default length for extended auto request sense buffers.
613 609 * All sense buffers need to be under the same alloc because there
614 610 * is only one common top 32bits (of 64bits) address register.
615 611 * Most requests only require 32 bytes, but some request >256.
616 612 * We use rmalloc()/rmfree() on this additional memory to manage the
617 613 * "extended" requests.
618 614 */
619 615 int mptsas_extreq_sense_bufsize = 256*64;
620 616
621 617 /*
622 618 * We believe that all software resrictions of having to run with DMA
623 619 * attributes to limit allocation to the first 4G are removed.
624 620 * However, this flag remains to enable quick switchback should suspicious
625 621 * problems emerge.
626 622 * Note that scsi_alloc_consistent_buf() does still adhere to allocating
627 623 * 32 bit addressable memory, but we can cope if that is changed now.
628 624 */
629 625 int mptsas_use_64bit_msgaddr = 1;
630 626
631 627 /*
632 628 * warlock directives
633 629 */
634 630 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \
635 631 mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status))
636 632 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt))
637 633 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
638 634 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private))
639 635 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private))
640 636
641 637 /*
642 638 * SM - HBA statics
643 639 */
644 640 char *mptsas_driver_rev = MPTSAS_MOD_STRING;
645 641
646 642 #ifdef MPTSAS_DEBUG
647 643 void debug_enter(char *);
648 644 #endif
649 645
650 646 /*
651 647 * Notes:
652 648 * - scsi_hba_init(9F) initializes SCSI HBA modules
653 649 * - must call scsi_hba_fini(9F) if modload() fails
654 650 */
655 651 int
656 652 _init(void)
657 653 {
658 654 int status;
659 655 /* CONSTCOND */
660 656 ASSERT(NO_COMPETING_THREADS);
661 657
662 658 NDBG0(("_init"));
663 659
664 660 status = ddi_soft_state_init(&mptsas_state, MPTSAS_SIZE,
665 661 MPTSAS_INITIAL_SOFT_SPACE);
666 662 if (status != 0) {
667 663 return (status);
668 664 }
669 665
670 666 if ((status = scsi_hba_init(&modlinkage)) != 0) {
671 667 ddi_soft_state_fini(&mptsas_state);
672 668 return (status);
673 669 }
674 670
675 671 mutex_init(&mptsas_global_mutex, NULL, MUTEX_DRIVER, NULL);
676 672 rw_init(&mptsas_global_rwlock, NULL, RW_DRIVER, NULL);
677 673 mutex_init(&mptsas_log_mutex, NULL, MUTEX_DRIVER, NULL);
678 674
679 675 if ((status = mod_install(&modlinkage)) != 0) {
680 676 mutex_destroy(&mptsas_log_mutex);
681 677 rw_destroy(&mptsas_global_rwlock);
682 678 mutex_destroy(&mptsas_global_mutex);
683 679 ddi_soft_state_fini(&mptsas_state);
684 680 scsi_hba_fini(&modlinkage);
685 681 }
686 682
687 683 return (status);
688 684 }
689 685
690 686 /*
691 687 * Notes:
692 688 * - scsi_hba_fini(9F) uninitializes SCSI HBA modules
693 689 */
694 690 int
695 691 _fini(void)
696 692 {
697 693 int status;
698 694 /* CONSTCOND */
699 695 ASSERT(NO_COMPETING_THREADS);
700 696
701 697 NDBG0(("_fini"));
702 698
703 699 if ((status = mod_remove(&modlinkage)) == 0) {
704 700 ddi_soft_state_fini(&mptsas_state);
705 701 scsi_hba_fini(&modlinkage);
706 702 mutex_destroy(&mptsas_global_mutex);
707 703 rw_destroy(&mptsas_global_rwlock);
708 704 mutex_destroy(&mptsas_log_mutex);
709 705 }
710 706 return (status);
711 707 }
712 708
713 709 /*
714 710 * The loadable-module _info(9E) entry point
715 711 */
716 712 int
717 713 _info(struct modinfo *modinfop)
718 714 {
719 715 /* CONSTCOND */
720 716 ASSERT(NO_COMPETING_THREADS);
721 717 NDBG0(("mptsas _info"));
722 718
723 719 return (mod_info(&modlinkage, modinfop));
724 720 }
725 721
726 722 static int
727 723 mptsas_target_eval_devhdl(const void *op, void *arg)
728 724 {
729 725 uint16_t dh = *(uint16_t *)arg;
730 726 const mptsas_target_t *tp = op;
731 727
732 728 return ((int)tp->m_devhdl - (int)dh);
733 729 }
734 730
735 731 static int
736 732 mptsas_target_eval_nowwn(const void *op, void *arg)
737 733 {
738 734 uint8_t phy = *(uint8_t *)arg;
739 735 const mptsas_target_t *tp = op;
740 736
741 737 if (tp->m_addr.mta_wwn != 0)
742 738 return (-1);
743 739
744 740 return ((int)tp->m_phynum - (int)phy);
745 741 }
746 742
747 743 static int
748 744 mptsas_smp_eval_devhdl(const void *op, void *arg)
749 745 {
750 746 uint16_t dh = *(uint16_t *)arg;
751 747 const mptsas_smp_t *sp = op;
752 748
753 749 return ((int)sp->m_devhdl - (int)dh);
754 750 }
755 751
756 752 static uint64_t
757 753 mptsas_target_addr_hash(const void *tp)
758 754 {
759 755 const mptsas_target_addr_t *tap = tp;
760 756
761 757 return ((tap->mta_wwn & 0xffffffffffffULL) |
762 758 ((uint64_t)tap->mta_phymask << 48));
763 759 }
764 760
765 761 static int
766 762 mptsas_target_addr_cmp(const void *a, const void *b)
767 763 {
768 764 const mptsas_target_addr_t *aap = a;
769 765 const mptsas_target_addr_t *bap = b;
770 766
771 767 if (aap->mta_wwn < bap->mta_wwn)
772 768 return (-1);
773 769 if (aap->mta_wwn > bap->mta_wwn)
774 770 return (1);
775 771 return ((int)bap->mta_phymask - (int)aap->mta_phymask);
776 772 }
777 773
778 774 static uint64_t
779 775 mptsas_tmp_target_hash(const void *tp)
780 776 {
781 777 return ((uint64_t)(uintptr_t)tp);
782 778 }
783 779
784 780 static int
785 781 mptsas_tmp_target_cmp(const void *a, const void *b)
786 782 {
787 783 if (a > b)
788 784 return (1);
789 785 if (b < a)
790 786 return (-1);
791 787
792 788 return (0);
793 789 }
794 790
795 791 static void
796 792 mptsas_target_free(void *op)
797 793 {
798 794 kmem_free(op, sizeof (mptsas_target_t));
799 795 }
800 796
801 797 static void
802 798 mptsas_smp_free(void *op)
803 799 {
804 800 kmem_free(op, sizeof (mptsas_smp_t));
805 801 }
806 802
807 803 static void
808 804 mptsas_destroy_hashes(mptsas_t *mpt)
809 805 {
810 806 mptsas_target_t *tp;
811 807 mptsas_smp_t *sp;
812 808
813 809 for (tp = refhash_first(mpt->m_targets); tp != NULL;
814 810 tp = refhash_next(mpt->m_targets, tp)) {
815 811 refhash_remove(mpt->m_targets, tp);
816 812 }
817 813 for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
818 814 sp = refhash_next(mpt->m_smp_targets, sp)) {
819 815 refhash_remove(mpt->m_smp_targets, sp);
820 816 }
821 817 refhash_destroy(mpt->m_tmp_targets);
822 818 refhash_destroy(mpt->m_targets);
823 819 refhash_destroy(mpt->m_smp_targets);
824 820 mpt->m_targets = NULL;
825 821 mpt->m_smp_targets = NULL;
826 822 }
827 823
828 824 static int
829 825 mptsas_iport_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
830 826 {
831 827 dev_info_t *pdip;
832 828 mptsas_t *mpt;
833 829 scsi_hba_tran_t *hba_tran;
834 830 char *iport = NULL;
835 831 char phymask[MPTSAS_MAX_PHYS];
836 832 mptsas_phymask_t phy_mask = 0;
837 833 int dynamic_port = 0;
838 834 uint32_t page_address;
839 835 char initiator_wwnstr[MPTSAS_WWN_STRLEN];
840 836 int rval = DDI_FAILURE;
841 837 int i = 0;
842 838 uint8_t numphys = 0;
843 839 uint8_t phy_id;
844 840 uint8_t phy_port = 0;
845 841 uint16_t attached_devhdl = 0;
846 842 uint32_t dev_info;
847 843 uint64_t attached_sas_wwn;
848 844 uint16_t dev_hdl;
849 845 uint16_t pdev_hdl;
850 846 uint16_t bay_num, enclosure, io_flags;
851 847 char attached_wwnstr[MPTSAS_WWN_STRLEN];
852 848
853 849 /* CONSTCOND */
854 850 ASSERT(NO_COMPETING_THREADS);
855 851
856 852 switch (cmd) {
857 853 case DDI_ATTACH:
858 854 break;
859 855
860 856 case DDI_RESUME:
861 857 /*
862 858 * If this a scsi-iport node, nothing to do here.
863 859 */
864 860 return (DDI_SUCCESS);
865 861
866 862 default:
867 863 return (DDI_FAILURE);
868 864 }
869 865
870 866 pdip = ddi_get_parent(dip);
871 867
872 868 if ((hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SCSI_DEVICE)) ==
873 869 NULL) {
874 870 cmn_err(CE_WARN, "Failed attach iport because fail to "
875 871 "get tran vector for the HBA node");
876 872 return (DDI_FAILURE);
877 873 }
878 874
879 875 mpt = TRAN2MPT(hba_tran);
880 876 ASSERT(mpt != NULL);
881 877 if (mpt == NULL)
882 878 return (DDI_FAILURE);
883 879
884 880 if ((hba_tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) ==
885 881 NULL) {
886 882 mptsas_log(mpt, CE_WARN, "Failed attach iport because fail to "
887 883 "get tran vector for the iport node");
888 884 return (DDI_FAILURE);
889 885 }
890 886
891 887 /*
892 888 * Overwrite parent's tran_hba_private to iport's tran vector
893 889 */
894 890 hba_tran->tran_hba_private = mpt;
895 891
896 892 ddi_report_dev(dip);
897 893
898 894 /*
899 895 * Get SAS address for initiator port according dev_handle
900 896 */
901 897 iport = ddi_get_name_addr(dip);
902 898 if (iport && strncmp(iport, "v0", 2) == 0) {
903 899 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
904 900 MPTSAS_VIRTUAL_PORT, 1) !=
905 901 DDI_PROP_SUCCESS) {
906 902 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
907 903 MPTSAS_VIRTUAL_PORT);
908 904 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
909 905 "prop update failed");
910 906 return (DDI_FAILURE);
911 907 }
912 908 return (DDI_SUCCESS);
913 909 }
914 910
915 911 mutex_enter(&mpt->m_mutex);
916 912 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
917 913 bzero(phymask, sizeof (phymask));
918 914 (void) sprintf(phymask,
919 915 "%x", mpt->m_phy_info[i].phy_mask);
920 916 if (strcmp(phymask, iport) == 0) {
921 917 break;
922 918 }
923 919 }
924 920
925 921 if (i == MPTSAS_MAX_PHYS) {
926 922 mptsas_log(mpt, CE_WARN, "Failed attach port %s because port"
927 923 "seems not exist", iport);
928 924 mutex_exit(&mpt->m_mutex);
929 925 return (DDI_FAILURE);
930 926 }
931 927
932 928 phy_mask = mpt->m_phy_info[i].phy_mask;
933 929
934 930 if (mpt->m_phy_info[i].port_flags & AUTO_PORT_CONFIGURATION)
935 931 dynamic_port = 1;
936 932 else
937 933 dynamic_port = 0;
938 934
939 935 /*
940 936 * Update PHY info for smhba
941 937 */
942 938 if (mptsas_smhba_phy_init(mpt)) {
943 939 mutex_exit(&mpt->m_mutex);
944 940 mptsas_log(mpt, CE_WARN, "mptsas phy update "
945 941 "failed");
946 942 return (DDI_FAILURE);
947 943 }
948 944
949 945 mutex_exit(&mpt->m_mutex);
950 946
951 947 numphys = 0;
952 948 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
953 949 if ((phy_mask >> i) & 0x01) {
954 950 numphys++;
955 951 }
956 952 }
957 953
958 954 bzero(initiator_wwnstr, sizeof (initiator_wwnstr));
959 955 (void) sprintf(initiator_wwnstr, "w%016"PRIx64,
960 956 mpt->un.m_base_wwid);
961 957
962 958 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
963 959 SCSI_ADDR_PROP_INITIATOR_PORT, initiator_wwnstr) !=
964 960 DDI_PROP_SUCCESS) {
965 961 (void) ddi_prop_remove(DDI_DEV_T_NONE,
966 962 dip, SCSI_ADDR_PROP_INITIATOR_PORT);
967 963 mptsas_log(mpt, CE_WARN, "mptsas Initiator port "
968 964 "prop update failed");
969 965 return (DDI_FAILURE);
970 966 }
971 967 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
972 968 MPTSAS_NUM_PHYS, numphys) !=
973 969 DDI_PROP_SUCCESS) {
974 970 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, MPTSAS_NUM_PHYS);
975 971 return (DDI_FAILURE);
976 972 }
977 973
978 974 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
979 975 "phymask", phy_mask) !=
980 976 DDI_PROP_SUCCESS) {
981 977 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "phymask");
982 978 mptsas_log(mpt, CE_WARN, "mptsas phy mask "
983 979 "prop update failed");
984 980 return (DDI_FAILURE);
985 981 }
986 982
987 983 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
988 984 "dynamic-port", dynamic_port) !=
989 985 DDI_PROP_SUCCESS) {
990 986 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "dynamic-port");
991 987 mptsas_log(mpt, CE_WARN, "mptsas dynamic port "
992 988 "prop update failed");
993 989 return (DDI_FAILURE);
994 990 }
995 991 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
996 992 MPTSAS_VIRTUAL_PORT, 0) !=
997 993 DDI_PROP_SUCCESS) {
998 994 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
999 995 MPTSAS_VIRTUAL_PORT);
1000 996 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
1001 997 "prop update failed");
1002 998 return (DDI_FAILURE);
1003 999 }
1004 1000 mptsas_smhba_set_all_phy_props(mpt, dip, numphys, phy_mask,
1005 1001 &attached_devhdl);
1006 1002
1007 1003 mutex_enter(&mpt->m_mutex);
1008 1004 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
1009 1005 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)attached_devhdl;
1010 1006 rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
1011 1007 &attached_sas_wwn, &dev_info, &phy_port, &phy_id,
1012 1008 &pdev_hdl, &bay_num, &enclosure, &io_flags);
1013 1009 if (rval != DDI_SUCCESS) {
1014 1010 mptsas_log(mpt, CE_WARN,
1015 1011 "Failed to get device page0 for handle:%d",
1016 1012 attached_devhdl);
1017 1013 mutex_exit(&mpt->m_mutex);
1018 1014 return (DDI_FAILURE);
1019 1015 }
1020 1016
1021 1017 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1022 1018 bzero(phymask, sizeof (phymask));
1023 1019 (void) sprintf(phymask, "%x", mpt->m_phy_info[i].phy_mask);
1024 1020 if (strcmp(phymask, iport) == 0) {
1025 1021 (void) sprintf(&mpt->m_phy_info[i].smhba_info.path[0],
1026 1022 "%x",
1027 1023 mpt->m_phy_info[i].phy_mask);
1028 1024 }
1029 1025 }
1030 1026 mutex_exit(&mpt->m_mutex);
1031 1027
1032 1028 bzero(attached_wwnstr, sizeof (attached_wwnstr));
1033 1029 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
1034 1030 attached_sas_wwn);
1035 1031 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
1036 1032 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
1037 1033 DDI_PROP_SUCCESS) {
1038 1034 (void) ddi_prop_remove(DDI_DEV_T_NONE,
1039 1035 dip, SCSI_ADDR_PROP_ATTACHED_PORT);
1040 1036 return (DDI_FAILURE);
1041 1037 }
1042 1038
1043 1039 /* Create kstats for each phy on this iport */
1044 1040
1045 1041 mptsas_create_phy_stats(mpt, iport, dip);
1046 1042
1047 1043 /*
1048 1044 * register sas hba iport with mdi (MPxIO/vhci)
1049 1045 */
1050 1046 if (mdi_phci_register(MDI_HCI_CLASS_SCSI,
1051 1047 dip, 0) == MDI_SUCCESS) {
1052 1048 mpt->m_mpxio_enable = TRUE;
1053 1049 }
1054 1050 return (DDI_SUCCESS);
1055 1051 }
1056 1052
1057 1053 /*
1058 1054 * Notes:
1059 1055 * Set up all device state and allocate data structures,
1060 1056 * mutexes, condition variables, etc. for device operation.
1061 1057 * Add interrupts needed.
1062 1058 * Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
1063 1059 */
1064 1060 static int
1065 1061 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1066 1062 {
1067 1063 mptsas_t *mpt = NULL;
1068 1064 int instance, i, j;
1069 1065 int doneq_thread_num;
1070 1066 char intr_added = 0;
1071 1067 char map_setup = 0;
1072 1068 char config_setup = 0;
1073 1069 char hba_attach_setup = 0;
1074 1070 char smp_attach_setup = 0;
1075 1071 char enc_attach_setup = 0;
1076 1072 char mutex_init_done = 0;
1077 1073 char event_taskq_create = 0;
1078 1074 char dr_taskq_create = 0;
1079 1075 char doneq_thread_create = 0;
1080 1076 char added_watchdog = 0;
1081 1077 scsi_hba_tran_t *hba_tran;
1082 1078 uint_t mem_bar = MEM_SPACE;
1083 1079 int rval = DDI_FAILURE;
1084 1080
1085 1081 /* CONSTCOND */
1086 1082 ASSERT(NO_COMPETING_THREADS);
1087 1083
1088 1084 if (scsi_hba_iport_unit_address(dip)) {
1089 1085 return (mptsas_iport_attach(dip, cmd));
1090 1086 }
1091 1087
1092 1088 switch (cmd) {
1093 1089 case DDI_ATTACH:
1094 1090 break;
1095 1091
1096 1092 case DDI_RESUME:
1097 1093 if ((hba_tran = ddi_get_driver_private(dip)) == NULL)
1098 1094 return (DDI_FAILURE);
1099 1095
1100 1096 mpt = TRAN2MPT(hba_tran);
1101 1097
1102 1098 if (!mpt) {
1103 1099 return (DDI_FAILURE);
1104 1100 }
1105 1101
1106 1102 /*
1107 1103 * Reset hardware and softc to "no outstanding commands"
1108 1104 * Note that a check condition can result on first command
1109 1105 * to a target.
1110 1106 */
1111 1107 mutex_enter(&mpt->m_mutex);
1112 1108
1113 1109 /*
1114 1110 * raise power.
1115 1111 */
1116 1112 if (mpt->m_options & MPTSAS_OPT_PM) {
1117 1113 mutex_exit(&mpt->m_mutex);
1118 1114 (void) pm_busy_component(dip, 0);
1119 1115 rval = pm_power_has_changed(dip, 0, PM_LEVEL_D0);
1120 1116 if (rval == DDI_SUCCESS) {
1121 1117 mutex_enter(&mpt->m_mutex);
1122 1118 } else {
1123 1119 /*
1124 1120 * The pm_raise_power() call above failed,
1125 1121 * and that can only occur if we were unable
1126 1122 * to reset the hardware. This is probably
1127 1123 * due to unhealty hardware, and because
1128 1124 * important filesystems(such as the root
1129 1125 * filesystem) could be on the attached disks,
1130 1126 * it would not be a good idea to continue,
1131 1127 * as we won't be entirely certain we are
1132 1128 * writing correct data. So we panic() here
1133 1129 * to not only prevent possible data corruption,
1134 1130 * but to give developers or end users a hope
1135 1131 * of identifying and correcting any problems.
1136 1132 */
1137 1133 fm_panic("mptsas could not reset hardware "
1138 1134 "during resume");
1139 1135 }
1140 1136 }
1141 1137
1142 1138 mpt->m_suspended = 0;
1143 1139
1144 1140 /*
1145 1141 * Reinitialize ioc
1146 1142 */
1147 1143 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1148 1144 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
1149 1145 mutex_exit(&mpt->m_mutex);
1150 1146 if (mpt->m_options & MPTSAS_OPT_PM) {
1151 1147 (void) pm_idle_component(dip, 0);
1152 1148 }
1153 1149 fm_panic("mptsas init chip fail during resume");
1154 1150 }
1155 1151 /*
1156 1152 * mptsas_update_driver_data needs interrupts so enable them
1157 1153 * first.
1158 1154 */
1159 1155 MPTSAS_ENABLE_INTR(mpt);
1160 1156 mptsas_update_driver_data(mpt);
1161 1157
1162 1158 /* start requests, if possible */
1163 1159 mptsas_restart_hba(mpt);
1164 1160
1165 1161 mutex_exit(&mpt->m_mutex);
1166 1162
1167 1163 /*
1168 1164 * Restart watch thread
1169 1165 */
1170 1166 mutex_enter(&mptsas_global_mutex);
1171 1167 if (mptsas_timeout_id == 0) {
1172 1168 mptsas_timeout_id = timeout(mptsas_watch, NULL,
1173 1169 mptsas_tick);
1174 1170 mptsas_timeouts_enabled = 1;
1175 1171 }
1176 1172 mutex_exit(&mptsas_global_mutex);
1177 1173
1178 1174 /* report idle status to pm framework */
1179 1175 if (mpt->m_options & MPTSAS_OPT_PM) {
1180 1176 (void) pm_idle_component(dip, 0);
1181 1177 }
1182 1178
1183 1179 return (DDI_SUCCESS);
1184 1180
1185 1181 default:
1186 1182 return (DDI_FAILURE);
1187 1183
1188 1184 }
1189 1185
1190 1186 instance = ddi_get_instance(dip);
1191 1187
1192 1188 /*
1193 1189 * Allocate softc information.
1194 1190 */
1195 1191 if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
1196 1192 mptsas_log(NULL, CE_WARN,
1197 1193 "mptsas%d: cannot allocate soft state", instance);
1198 1194 goto fail;
1199 1195 }
1200 1196
1201 1197 mpt = ddi_get_soft_state(mptsas_state, instance);
1202 1198
1203 1199 if (mpt == NULL) {
1204 1200 mptsas_log(NULL, CE_WARN,
1205 1201 "mptsas%d: cannot get soft state", instance);
1206 1202 goto fail;
1207 1203 }
1208 1204
1209 1205 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
1210 1206 scsi_size_clean(dip);
1211 1207
1212 1208 mpt->m_dip = dip;
1213 1209 mpt->m_instance = instance;
1214 1210
1215 1211 /* Make a per-instance copy of the structures */
1216 1212 mpt->m_io_dma_attr = mptsas_dma_attrs64;
1217 1213 if (mptsas_use_64bit_msgaddr) {
1218 1214 mpt->m_msg_dma_attr = mptsas_dma_attrs64;
1219 1215 } else {
1220 1216 mpt->m_msg_dma_attr = mptsas_dma_attrs;
1221 1217 }
1222 1218 mpt->m_reg_acc_attr = mptsas_dev_attr;
1223 1219 mpt->m_dev_acc_attr = mptsas_dev_attr;
1224 1220
1225 1221 /*
1226 1222 * Size of individual request sense buffer
1227 1223 */
1228 1224 mpt->m_req_sense_size = EXTCMDS_STATUS_SIZE;
1229 1225
1230 1226 /*
1231 1227 * Initialize FMA
1232 1228 */
1233 1229 mpt->m_fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, mpt->m_dip,
1234 1230 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
1235 1231 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1236 1232 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1237 1233
1238 1234 mptsas_fm_init(mpt);
1239 1235
1240 1236 if (mptsas_alloc_handshake_msg(mpt,
1241 1237 sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) {
1242 1238 mptsas_log(mpt, CE_WARN, "cannot initialize handshake msg.");
1243 1239 goto fail;
1244 1240 }
1245 1241
1246 1242 /*
1247 1243 * Setup configuration space
1248 1244 */
1249 1245 if (mptsas_config_space_init(mpt) == FALSE) {
1250 1246 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init failed");
1251 1247 goto fail;
1252 1248 }
1253 1249 config_setup++;
1254 1250
1255 1251 if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&mpt->m_reg,
1256 1252 0, 0, &mpt->m_reg_acc_attr, &mpt->m_datap) != DDI_SUCCESS) {
1257 1253 mptsas_log(mpt, CE_WARN, "map setup failed");
1258 1254 goto fail;
1259 1255 }
1260 1256 map_setup++;
1261 1257
1262 1258 /*
1263 1259 * A taskq is created for dealing with the event handler
1264 1260 */
1265 1261 if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
1266 1262 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1267 1263 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
1268 1264 goto fail;
1269 1265 }
1270 1266 event_taskq_create++;
1271 1267
1272 1268 /*
1273 1269 * A taskq is created for dealing with dr events
1274 1270 */
1275 1271 if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1276 1272 "mptsas_dr_taskq",
1277 1273 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1278 1274 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1279 1275 "failed");
1280 1276 goto fail;
1281 1277 }
1282 1278 dr_taskq_create++;
1283 1279
1284 1280 mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1285 1281 0, "mptsas_doneq_thread_threshold_prop", 10);
1286 1282 mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1287 1283 0, "mptsas_doneq_length_threshold_prop", 8);
1288 1284 mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1289 1285 0, "mptsas_doneq_thread_n_prop", 8);
1290 1286
1291 1287 if (mpt->m_doneq_thread_n) {
1292 1288 cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1293 1289 mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1294 1290
1295 1291 mutex_enter(&mpt->m_doneq_mutex);
1296 1292 mpt->m_doneq_thread_id =
1297 1293 kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1298 1294 * mpt->m_doneq_thread_n, KM_SLEEP);
1299 1295
1300 1296 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1301 1297 cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1302 1298 CV_DRIVER, NULL);
1303 1299 mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1304 1300 MUTEX_DRIVER, NULL);
1305 1301 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1306 1302 mpt->m_doneq_thread_id[j].flag |=
1307 1303 MPTSAS_DONEQ_THREAD_ACTIVE;
1308 1304 mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1309 1305 mpt->m_doneq_thread_id[j].arg.t = j;
1310 1306 mpt->m_doneq_thread_id[j].threadp =
1311 1307 thread_create(NULL, 0, mptsas_doneq_thread,
1312 1308 &mpt->m_doneq_thread_id[j].arg,
1313 1309 0, &p0, TS_RUN, minclsyspri);
1314 1310 mpt->m_doneq_thread_id[j].donetail =
1315 1311 &mpt->m_doneq_thread_id[j].doneq;
1316 1312 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1317 1313 }
1318 1314 mutex_exit(&mpt->m_doneq_mutex);
1319 1315 doneq_thread_create++;
1320 1316 }
1321 1317
1322 1318 /*
1323 1319 * Disable hardware interrupt since we're not ready to
1324 1320 * handle it yet.
1325 1321 */
1326 1322 MPTSAS_DISABLE_INTR(mpt);
1327 1323 if (mptsas_register_intrs(mpt) == FALSE)
1328 1324 goto fail;
1329 1325 intr_added++;
1330 1326
1331 1327 /* Initialize mutex used in interrupt handler */
1332 1328 mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1333 1329 DDI_INTR_PRI(mpt->m_intr_pri));
1334 1330 mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1335 1331 mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1336 1332 DDI_INTR_PRI(mpt->m_intr_pri));
1337 1333 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1338 1334 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1339 1335 NULL, MUTEX_DRIVER,
1340 1336 DDI_INTR_PRI(mpt->m_intr_pri));
1341 1337 }
1342 1338
1343 1339 cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1344 1340 cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1345 1341 cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1346 1342 cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1347 1343 cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1348 1344 cv_init(&mpt->m_extreq_sense_refcount_cv, NULL, CV_DRIVER, NULL);
1349 1345 mutex_init_done++;
1350 1346
1351 1347 mutex_enter(&mpt->m_mutex);
1352 1348 /*
1353 1349 * Initialize power management component
1354 1350 */
1355 1351 if (mpt->m_options & MPTSAS_OPT_PM) {
1356 1352 if (mptsas_init_pm(mpt)) {
1357 1353 mutex_exit(&mpt->m_mutex);
1358 1354 mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1359 1355 "failed");
1360 1356 goto fail;
1361 1357 }
1362 1358 }
1363 1359
1364 1360 /*
1365 1361 * Initialize chip using Message Unit Reset, if allowed
1366 1362 */
1367 1363 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1368 1364 if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1369 1365 mutex_exit(&mpt->m_mutex);
1370 1366 mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1371 1367 goto fail;
1372 1368 }
1373 1369
1374 1370 mpt->m_targets = refhash_create(MPTSAS_TARGET_BUCKET_COUNT,
1375 1371 mptsas_target_addr_hash, mptsas_target_addr_cmp,
1376 1372 mptsas_target_free, sizeof (mptsas_target_t),
1377 1373 offsetof(mptsas_target_t, m_link),
1378 1374 offsetof(mptsas_target_t, m_addr), KM_SLEEP);
1379 1375
1380 1376 /*
1381 1377 * The refhash for temporary targets uses the address of the target
1382 1378 * struct itself as tag, so the tag offset is 0. See the implementation
1383 1379 * of mptsas_tmp_target_hash() and mptsas_tmp_target_cmp().
1384 1380 */
1385 1381 mpt->m_tmp_targets = refhash_create(MPTSAS_TMP_TARGET_BUCKET_COUNT,
1386 1382 mptsas_tmp_target_hash, mptsas_tmp_target_cmp,
1387 1383 mptsas_target_free, sizeof (mptsas_target_t),
1388 1384 offsetof(mptsas_target_t, m_link), 0, KM_SLEEP);
1389 1385
1390 1386 /*
1391 1387 * Fill in the phy_info structure and get the base WWID
1392 1388 */
1393 1389 if (mptsas_get_manufacture_page5(mpt) == DDI_FAILURE) {
1394 1390 mptsas_log(mpt, CE_WARN,
1395 1391 "mptsas_get_manufacture_page5 failed!");
1396 1392 goto fail;
1397 1393 }
1398 1394
1399 1395 if (mptsas_get_sas_io_unit_page_hndshk(mpt)) {
1400 1396 mptsas_log(mpt, CE_WARN,
1401 1397 "mptsas_get_sas_io_unit_page_hndshk failed!");
1402 1398 goto fail;
1403 1399 }
1404 1400
1405 1401 if (mptsas_get_manufacture_page0(mpt) == DDI_FAILURE) {
1406 1402 mptsas_log(mpt, CE_WARN,
1407 1403 "mptsas_get_manufacture_page0 failed!");
1408 1404 goto fail;
1409 1405 }
1410 1406
1411 1407 mutex_exit(&mpt->m_mutex);
1412 1408
1413 1409 /*
1414 1410 * Register the iport for multiple port HBA
1415 1411 */
1416 1412 mptsas_iport_register(mpt);
1417 1413
1418 1414 /*
1419 1415 * initialize SCSI HBA transport structure
1420 1416 */
1421 1417 if (mptsas_hba_setup(mpt) == FALSE)
1422 1418 goto fail;
1423 1419 hba_attach_setup++;
1424 1420
1425 1421 if (mptsas_smp_setup(mpt) == FALSE)
1426 1422 goto fail;
1427 1423 smp_attach_setup++;
1428 1424
1429 1425 if (mptsas_enc_setup(mpt) == FALSE)
1430 1426 goto fail;
1431 1427 enc_attach_setup++;
1432 1428
1433 1429 if (mptsas_cache_create(mpt) == FALSE)
1434 1430 goto fail;
1435 1431
1436 1432 mpt->m_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1437 1433 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
1438 1434 if (mpt->m_scsi_reset_delay == 0) {
1439 1435 mptsas_log(mpt, CE_NOTE,
1440 1436 "scsi_reset_delay of 0 is not recommended,"
1441 1437 " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1442 1438 mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1443 1439 }
1444 1440
1445 1441 /*
1446 1442 * Initialize the wait and done FIFO queue
1447 1443 */
1448 1444 mpt->m_donetail = &mpt->m_doneq;
1449 1445 mpt->m_waitqtail = &mpt->m_waitq;
1450 1446 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
1451 1447 mpt->m_tx_draining = 0;
1452 1448
1453 1449 /*
1454 1450 * ioc cmd queue initialize
1455 1451 */
1456 1452 mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1457 1453 mpt->m_dev_handle = 0xFFFF;
1458 1454
1459 1455 MPTSAS_ENABLE_INTR(mpt);
1460 1456
1461 1457 /*
1462 1458 * enable event notification
1463 1459 */
1464 1460 mutex_enter(&mpt->m_mutex);
1465 1461 if (mptsas_ioc_enable_event_notification(mpt)) {
1466 1462 mutex_exit(&mpt->m_mutex);
1467 1463 goto fail;
1468 1464 }
1469 1465 mutex_exit(&mpt->m_mutex);
1470 1466
1471 1467 /*
1472 1468 * used for mptsas_watch
1473 1469 */
1474 1470 mptsas_list_add(mpt);
1475 1471
1476 1472 mutex_enter(&mptsas_global_mutex);
1477 1473 if (mptsas_timeouts_enabled == 0) {
1478 1474 mptsas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
1479 1475 dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
1480 1476
1481 1477 mptsas_tick = mptsas_scsi_watchdog_tick *
1482 1478 drv_usectohz((clock_t)1000000);
1483 1479
1484 1480 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
1485 1481 mptsas_timeouts_enabled = 1;
1486 1482 }
1487 1483 mutex_exit(&mptsas_global_mutex);
1488 1484 added_watchdog++;
1489 1485
1490 1486 /*
1491 1487 * Initialize PHY info for smhba.
1492 1488 * This requires watchdog to be enabled otherwise if interrupts
1493 1489 * don't work the system will hang.
1494 1490 */
1495 1491 if (mptsas_smhba_setup(mpt)) {
1496 1492 mptsas_log(mpt, CE_WARN, "mptsas phy initialization "
1497 1493 "failed");
1498 1494 goto fail;
1499 1495 }
1500 1496
1501 1497 /* Check all dma handles allocated in attach */
1502 1498 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl)
1503 1499 != DDI_SUCCESS) ||
1504 1500 (mptsas_check_dma_handle(mpt->m_dma_req_sense_hdl)
1505 1501 != DDI_SUCCESS) ||
1506 1502 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl)
1507 1503 != DDI_SUCCESS) ||
1508 1504 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl)
1509 1505 != DDI_SUCCESS) ||
1510 1506 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl)
1511 1507 != DDI_SUCCESS) ||
1512 1508 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl)
1513 1509 != DDI_SUCCESS)) {
1514 1510 goto fail;
1515 1511 }
1516 1512
1517 1513 /* Check all acc handles allocated in attach */
1518 1514 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
1519 1515 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl)
1520 1516 != DDI_SUCCESS) ||
1521 1517 (mptsas_check_acc_handle(mpt->m_acc_req_sense_hdl)
1522 1518 != DDI_SUCCESS) ||
1523 1519 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl)
1524 1520 != DDI_SUCCESS) ||
1525 1521 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl)
1526 1522 != DDI_SUCCESS) ||
1527 1523 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl)
1528 1524 != DDI_SUCCESS) ||
1529 1525 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl)
1530 1526 != DDI_SUCCESS) ||
1531 1527 (mptsas_check_acc_handle(mpt->m_config_handle)
1532 1528 != DDI_SUCCESS)) {
1533 1529 goto fail;
1534 1530 }
1535 1531
1536 1532 /*
1537 1533 * After this point, we are not going to fail the attach.
1538 1534 */
1539 1535
1540 1536 /* Print message of HBA present */
1541 1537 ddi_report_dev(dip);
1542 1538
1543 1539 /* report idle status to pm framework */
1544 1540 if (mpt->m_options & MPTSAS_OPT_PM) {
1545 1541 (void) pm_idle_component(dip, 0);
1546 1542 }
1547 1543
1548 1544 return (DDI_SUCCESS);
1549 1545
1550 1546 fail:
1551 1547 mptsas_log(mpt, CE_WARN, "attach failed");
1552 1548 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
1553 1549 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
1554 1550 if (mpt) {
1555 1551 /* deallocate in reverse order */
1556 1552 if (added_watchdog) {
1557 1553 mptsas_list_del(mpt);
1558 1554 mutex_enter(&mptsas_global_mutex);
1559 1555
1560 1556 if (mptsas_timeout_id && (mptsas_head == NULL)) {
1561 1557 timeout_id_t tid = mptsas_timeout_id;
1562 1558 mptsas_timeouts_enabled = 0;
1563 1559 mptsas_timeout_id = 0;
1564 1560 mutex_exit(&mptsas_global_mutex);
1565 1561 (void) untimeout(tid);
1566 1562 mutex_enter(&mptsas_global_mutex);
1567 1563 }
1568 1564 mutex_exit(&mptsas_global_mutex);
1569 1565 }
1570 1566
1571 1567 mptsas_cache_destroy(mpt);
1572 1568
1573 1569 if (smp_attach_setup) {
1574 1570 mptsas_smp_teardown(mpt);
1575 1571 }
1576 1572 if (enc_attach_setup) {
1577 1573 mptsas_enc_teardown(mpt);
1578 1574 }
1579 1575 if (hba_attach_setup) {
1580 1576 mptsas_hba_teardown(mpt);
1581 1577 }
1582 1578
1583 1579 if (mpt->m_tmp_targets)
1584 1580 refhash_destroy(mpt->m_tmp_targets);
1585 1581 if (mpt->m_targets)
1586 1582 refhash_destroy(mpt->m_targets);
1587 1583 if (mpt->m_smp_targets)
1588 1584 refhash_destroy(mpt->m_smp_targets);
1589 1585
1590 1586 if (mpt->m_active) {
1591 1587 mptsas_free_active_slots(mpt);
1592 1588 }
1593 1589 if (intr_added) {
1594 1590 mptsas_unregister_intrs(mpt);
1595 1591 }
1596 1592
1597 1593 if (doneq_thread_create) {
1598 1594 mutex_enter(&mpt->m_doneq_mutex);
1599 1595 doneq_thread_num = mpt->m_doneq_thread_n;
1600 1596 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1601 1597 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1602 1598 mpt->m_doneq_thread_id[j].flag &=
1603 1599 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1604 1600 cv_signal(&mpt->m_doneq_thread_id[j].cv);
1605 1601 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1606 1602 }
1607 1603 while (mpt->m_doneq_thread_n) {
1608 1604 cv_wait(&mpt->m_doneq_thread_cv,
1609 1605 &mpt->m_doneq_mutex);
1610 1606 }
1611 1607 for (j = 0; j < doneq_thread_num; j++) {
1612 1608 cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1613 1609 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1614 1610 }
1615 1611 kmem_free(mpt->m_doneq_thread_id,
1616 1612 sizeof (mptsas_doneq_thread_list_t)
1617 1613 * doneq_thread_num);
1618 1614 mutex_exit(&mpt->m_doneq_mutex);
1619 1615 cv_destroy(&mpt->m_doneq_thread_cv);
1620 1616 mutex_destroy(&mpt->m_doneq_mutex);
1621 1617 }
1622 1618 if (event_taskq_create) {
1623 1619 ddi_taskq_destroy(mpt->m_event_taskq);
1624 1620 }
1625 1621 if (dr_taskq_create) {
1626 1622 ddi_taskq_destroy(mpt->m_dr_taskq);
1627 1623 }
1628 1624 if (mutex_init_done) {
1629 1625 mutex_destroy(&mpt->m_tx_waitq_mutex);
1630 1626 mutex_destroy(&mpt->m_passthru_mutex);
1631 1627 mutex_destroy(&mpt->m_mutex);
1632 1628 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1633 1629 mutex_destroy(
1634 1630 &mpt->m_phy_info[i].smhba_info.phy_mutex);
1635 1631 }
1636 1632 cv_destroy(&mpt->m_cv);
1637 1633 cv_destroy(&mpt->m_passthru_cv);
1638 1634 cv_destroy(&mpt->m_fw_cv);
1639 1635 cv_destroy(&mpt->m_config_cv);
1640 1636 cv_destroy(&mpt->m_fw_diag_cv);
1641 1637 cv_destroy(&mpt->m_extreq_sense_refcount_cv);
1642 1638 }
1643 1639
1644 1640 if (map_setup) {
1645 1641 mptsas_cfg_fini(mpt);
1646 1642 }
1647 1643 if (config_setup) {
1648 1644 mptsas_config_space_fini(mpt);
1649 1645 }
1650 1646 mptsas_free_handshake_msg(mpt);
1651 1647 mptsas_hba_fini(mpt);
1652 1648
1653 1649 mptsas_fm_fini(mpt);
1654 1650 ddi_soft_state_free(mptsas_state, instance);
1655 1651 ddi_prop_remove_all(dip);
1656 1652 }
1657 1653 return (DDI_FAILURE);
1658 1654 }
1659 1655
1660 1656 static int
1661 1657 mptsas_suspend(dev_info_t *devi)
1662 1658 {
1663 1659 mptsas_t *mpt, *g;
1664 1660 scsi_hba_tran_t *tran;
1665 1661
1666 1662 if (scsi_hba_iport_unit_address(devi)) {
1667 1663 return (DDI_SUCCESS);
1668 1664 }
1669 1665
1670 1666 if ((tran = ddi_get_driver_private(devi)) == NULL)
1671 1667 return (DDI_SUCCESS);
1672 1668
1673 1669 mpt = TRAN2MPT(tran);
1674 1670 if (!mpt) {
1675 1671 return (DDI_SUCCESS);
1676 1672 }
1677 1673
1678 1674 mutex_enter(&mpt->m_mutex);
1679 1675
1680 1676 if (mpt->m_suspended++) {
1681 1677 mutex_exit(&mpt->m_mutex);
1682 1678 return (DDI_SUCCESS);
1683 1679 }
1684 1680
1685 1681 /*
1686 1682 * Cancel timeout threads for this mpt
1687 1683 */
1688 1684 if (mpt->m_quiesce_timeid) {
1689 1685 timeout_id_t tid = mpt->m_quiesce_timeid;
1690 1686 mpt->m_quiesce_timeid = 0;
1691 1687 mutex_exit(&mpt->m_mutex);
1692 1688 (void) untimeout(tid);
1693 1689 mutex_enter(&mpt->m_mutex);
1694 1690 }
1695 1691
1696 1692 if (mpt->m_restart_cmd_timeid) {
1697 1693 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1698 1694 mpt->m_restart_cmd_timeid = 0;
1699 1695 mutex_exit(&mpt->m_mutex);
1700 1696 (void) untimeout(tid);
1701 1697 mutex_enter(&mpt->m_mutex);
1702 1698 }
1703 1699
1704 1700 mutex_exit(&mpt->m_mutex);
1705 1701
1706 1702 (void) pm_idle_component(mpt->m_dip, 0);
1707 1703
1708 1704 /*
1709 1705 * Cancel watch threads if all mpts suspended
1710 1706 */
1711 1707 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1712 1708 for (g = mptsas_head; g != NULL; g = g->m_next) {
1713 1709 if (!g->m_suspended)
1714 1710 break;
1715 1711 }
1716 1712 rw_exit(&mptsas_global_rwlock);
1717 1713
1718 1714 mutex_enter(&mptsas_global_mutex);
1719 1715 if (g == NULL) {
1720 1716 timeout_id_t tid;
1721 1717
1722 1718 mptsas_timeouts_enabled = 0;
1723 1719 if (mptsas_timeout_id) {
1724 1720 tid = mptsas_timeout_id;
1725 1721 mptsas_timeout_id = 0;
1726 1722 mutex_exit(&mptsas_global_mutex);
1727 1723 (void) untimeout(tid);
1728 1724 mutex_enter(&mptsas_global_mutex);
1729 1725 }
1730 1726 if (mptsas_reset_watch) {
1731 1727 tid = mptsas_reset_watch;
1732 1728 mptsas_reset_watch = 0;
1733 1729 mutex_exit(&mptsas_global_mutex);
1734 1730 (void) untimeout(tid);
1735 1731 mutex_enter(&mptsas_global_mutex);
1736 1732 }
1737 1733 }
1738 1734 mutex_exit(&mptsas_global_mutex);
1739 1735
1740 1736 mutex_enter(&mpt->m_mutex);
1741 1737
1742 1738 /*
1743 1739 * If this mpt is not in full power(PM_LEVEL_D0), just return.
1744 1740 */
1745 1741 if ((mpt->m_options & MPTSAS_OPT_PM) &&
1746 1742 (mpt->m_power_level != PM_LEVEL_D0)) {
1747 1743 mutex_exit(&mpt->m_mutex);
1748 1744 return (DDI_SUCCESS);
1749 1745 }
1750 1746
1751 1747 /* Disable HBA interrupts in hardware */
1752 1748 MPTSAS_DISABLE_INTR(mpt);
1753 1749 /*
1754 1750 * Send RAID action system shutdown to sync IR
1755 1751 */
1756 1752 mptsas_raid_action_system_shutdown(mpt);
1757 1753
1758 1754 mutex_exit(&mpt->m_mutex);
1759 1755
1760 1756 /* drain the taskq */
1761 1757 ddi_taskq_wait(mpt->m_event_taskq);
1762 1758 ddi_taskq_wait(mpt->m_dr_taskq);
1763 1759
1764 1760 return (DDI_SUCCESS);
1765 1761 }
1766 1762
1767 1763 #ifdef __sparc
1768 1764 /*ARGSUSED*/
1769 1765 static int
1770 1766 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd)
1771 1767 {
1772 1768 mptsas_t *mpt;
1773 1769 scsi_hba_tran_t *tran;
1774 1770
1775 1771 /*
1776 1772 * If this call is for iport, just return.
1777 1773 */
1778 1774 if (scsi_hba_iport_unit_address(devi))
1779 1775 return (DDI_SUCCESS);
1780 1776
1781 1777 if ((tran = ddi_get_driver_private(devi)) == NULL)
1782 1778 return (DDI_SUCCESS);
1783 1779
1784 1780 if ((mpt = TRAN2MPT(tran)) == NULL)
1785 1781 return (DDI_SUCCESS);
1786 1782
1787 1783 /*
1788 1784 * Send RAID action system shutdown to sync IR. Disable HBA
1789 1785 * interrupts in hardware first.
1790 1786 */
1791 1787 MPTSAS_DISABLE_INTR(mpt);
1792 1788 mptsas_raid_action_system_shutdown(mpt);
1793 1789
1794 1790 return (DDI_SUCCESS);
1795 1791 }
1796 1792 #else /* __sparc */
1797 1793 /*
1798 1794 * quiesce(9E) entry point.
1799 1795 *
1800 1796 * This function is called when the system is single-threaded at high
1801 1797 * PIL with preemption disabled. Therefore, this function must not be
1802 1798 * blocked.
1803 1799 *
1804 1800 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1805 1801 * DDI_FAILURE indicates an error condition and should almost never happen.
1806 1802 */
1807 1803 static int
1808 1804 mptsas_quiesce(dev_info_t *devi)
1809 1805 {
1810 1806 mptsas_t *mpt;
1811 1807 scsi_hba_tran_t *tran;
1812 1808
1813 1809 /*
1814 1810 * If this call is for iport, just return.
1815 1811 */
1816 1812 if (scsi_hba_iport_unit_address(devi))
1817 1813 return (DDI_SUCCESS);
1818 1814
1819 1815 if ((tran = ddi_get_driver_private(devi)) == NULL)
1820 1816 return (DDI_SUCCESS);
1821 1817
1822 1818 if ((mpt = TRAN2MPT(tran)) == NULL)
1823 1819 return (DDI_SUCCESS);
1824 1820
1825 1821 /* Disable HBA interrupts in hardware */
1826 1822 MPTSAS_DISABLE_INTR(mpt);
1827 1823 /* Send RAID action system shutdonw to sync IR */
1828 1824 mptsas_raid_action_system_shutdown(mpt);
1829 1825
1830 1826 return (DDI_SUCCESS);
1831 1827 }
1832 1828 #endif /* __sparc */
1833 1829
1834 1830 /*
1835 1831 * detach(9E). Remove all device allocations and system resources;
1836 1832 * disable device interrupts.
1837 1833 * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
1838 1834 */
1839 1835 static int
1840 1836 mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1841 1837 {
1842 1838 /* CONSTCOND */
1843 1839 ASSERT(NO_COMPETING_THREADS);
1844 1840 NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi, (void *)cmd));
1845 1841
1846 1842 switch (cmd) {
1847 1843 case DDI_DETACH:
1848 1844 return (mptsas_do_detach(devi));
1849 1845
1850 1846 case DDI_SUSPEND:
1851 1847 return (mptsas_suspend(devi));
1852 1848
1853 1849 default:
1854 1850 return (DDI_FAILURE);
1855 1851 }
1856 1852 /* NOTREACHED */
1857 1853 }
1858 1854
1859 1855 static int
1860 1856 mptsas_do_detach(dev_info_t *dip)
1861 1857 {
1862 1858 mptsas_t *mpt;
1863 1859 scsi_hba_tran_t *tran;
1864 1860 int circ = 0;
1865 1861 int circ1 = 0;
1866 1862 mdi_pathinfo_t *pip = NULL;
1867 1863 int i;
1868 1864 int doneq_thread_num = 0;
1869 1865
1870 1866 NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1871 1867
1872 1868 if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1873 1869 return (DDI_FAILURE);
1874 1870
1875 1871 mpt = TRAN2MPT(tran);
1876 1872 if (!mpt) {
1877 1873 return (DDI_FAILURE);
1878 1874 }
1879 1875 /*
1880 1876 * Still have pathinfo child, should not detach mpt driver
1881 1877 */
1882 1878 if (scsi_hba_iport_unit_address(dip)) {
1883 1879 if (mpt->m_mpxio_enable) {
1884 1880 /*
1885 1881 * MPxIO enabled for the iport
1886 1882 */
1887 1883 ndi_devi_enter(scsi_vhci_dip, &circ1);
1888 1884 ndi_devi_enter(dip, &circ);
1889 1885 while ((pip = mdi_get_next_client_path(dip, NULL)) !=
1890 1886 NULL) {
1891 1887 if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1892 1888 continue;
1893 1889 }
1894 1890 ndi_devi_exit(dip, circ);
1895 1891 ndi_devi_exit(scsi_vhci_dip, circ1);
1896 1892 NDBG12(("detach failed because of "
1897 1893 "outstanding path info"));
1898 1894 return (DDI_FAILURE);
1899 1895 }
1900 1896 ndi_devi_exit(dip, circ);
1901 1897 ndi_devi_exit(scsi_vhci_dip, circ1);
1902 1898 (void) mdi_phci_unregister(dip, 0);
1903 1899 }
1904 1900
1905 1901 ddi_prop_remove_all(dip);
1906 1902
1907 1903 return (DDI_SUCCESS);
1908 1904 }
1909 1905
1910 1906 /* Make sure power level is D0 before accessing registers */
1911 1907 if (mpt->m_options & MPTSAS_OPT_PM) {
1912 1908 (void) pm_busy_component(dip, 0);
1913 1909 if (mpt->m_power_level != PM_LEVEL_D0) {
1914 1910 if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1915 1911 DDI_SUCCESS) {
1916 1912 mptsas_log(mpt, CE_WARN,
1917 1913 "mptsas%d: Raise power request failed.",
1918 1914 mpt->m_instance);
1919 1915 (void) pm_idle_component(dip, 0);
1920 1916 return (DDI_FAILURE);
1921 1917 }
1922 1918 }
1923 1919 }
1924 1920
1925 1921 /*
1926 1922 * Send RAID action system shutdown to sync IR. After action, send a
1927 1923 * Message Unit Reset. Since after that DMA resource will be freed,
1928 1924 * set ioc to READY state will avoid HBA initiated DMA operation.
1929 1925 */
1930 1926 mutex_enter(&mpt->m_mutex);
1931 1927 MPTSAS_DISABLE_INTR(mpt);
1932 1928 mptsas_raid_action_system_shutdown(mpt);
1933 1929 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1934 1930 (void) mptsas_ioc_reset(mpt, FALSE);
1935 1931 mutex_exit(&mpt->m_mutex);
1936 1932 mptsas_rem_intrs(mpt);
1937 1933 ddi_taskq_destroy(mpt->m_event_taskq);
1938 1934 ddi_taskq_destroy(mpt->m_dr_taskq);
1939 1935
1940 1936 if (mpt->m_doneq_thread_n) {
1941 1937 mutex_enter(&mpt->m_doneq_mutex);
1942 1938 doneq_thread_num = mpt->m_doneq_thread_n;
1943 1939 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1944 1940 mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1945 1941 mpt->m_doneq_thread_id[i].flag &=
1946 1942 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1947 1943 cv_signal(&mpt->m_doneq_thread_id[i].cv);
1948 1944 mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1949 1945 }
1950 1946 while (mpt->m_doneq_thread_n) {
1951 1947 cv_wait(&mpt->m_doneq_thread_cv,
1952 1948 &mpt->m_doneq_mutex);
1953 1949 }
1954 1950 for (i = 0; i < doneq_thread_num; i++) {
1955 1951 cv_destroy(&mpt->m_doneq_thread_id[i].cv);
1956 1952 mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);
1957 1953 }
1958 1954 kmem_free(mpt->m_doneq_thread_id,
1959 1955 sizeof (mptsas_doneq_thread_list_t)
1960 1956 * doneq_thread_num);
1961 1957 mutex_exit(&mpt->m_doneq_mutex);
1962 1958 cv_destroy(&mpt->m_doneq_thread_cv);
1963 1959 mutex_destroy(&mpt->m_doneq_mutex);
1964 1960 }
1965 1961
1966 1962 scsi_hba_reset_notify_tear_down(mpt->m_reset_notify_listf);
1967 1963
1968 1964 mptsas_list_del(mpt);
1969 1965
1970 1966 /*
1971 1967 * Cancel timeout threads for this mpt
1972 1968 */
1973 1969 mutex_enter(&mpt->m_mutex);
1974 1970 if (mpt->m_quiesce_timeid) {
1975 1971 timeout_id_t tid = mpt->m_quiesce_timeid;
1976 1972 mpt->m_quiesce_timeid = 0;
1977 1973 mutex_exit(&mpt->m_mutex);
1978 1974 (void) untimeout(tid);
1979 1975 mutex_enter(&mpt->m_mutex);
1980 1976 }
1981 1977
1982 1978 if (mpt->m_restart_cmd_timeid) {
1983 1979 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1984 1980 mpt->m_restart_cmd_timeid = 0;
1985 1981 mutex_exit(&mpt->m_mutex);
1986 1982 (void) untimeout(tid);
1987 1983 mutex_enter(&mpt->m_mutex);
1988 1984 }
1989 1985
1990 1986 mutex_exit(&mpt->m_mutex);
1991 1987
1992 1988 /*
1993 1989 * last mpt? ... if active, CANCEL watch threads.
1994 1990 */
1995 1991 mutex_enter(&mptsas_global_mutex);
1996 1992 if (mptsas_head == NULL) {
1997 1993 timeout_id_t tid;
1998 1994 /*
1999 1995 * Clear mptsas_timeouts_enable so that the watch thread
2000 1996 * gets restarted on DDI_ATTACH
2001 1997 */
2002 1998 mptsas_timeouts_enabled = 0;
2003 1999 if (mptsas_timeout_id) {
2004 2000 tid = mptsas_timeout_id;
2005 2001 mptsas_timeout_id = 0;
2006 2002 mutex_exit(&mptsas_global_mutex);
2007 2003 (void) untimeout(tid);
2008 2004 mutex_enter(&mptsas_global_mutex);
2009 2005 }
2010 2006 if (mptsas_reset_watch) {
2011 2007 tid = mptsas_reset_watch;
2012 2008 mptsas_reset_watch = 0;
2013 2009 mutex_exit(&mptsas_global_mutex);
2014 2010 (void) untimeout(tid);
2015 2011 mutex_enter(&mptsas_global_mutex);
2016 2012 }
2017 2013 }
2018 2014 mutex_exit(&mptsas_global_mutex);
2019 2015
2020 2016 /*
2021 2017 * Delete Phy stats
2022 2018 */
2023 2019 mptsas_destroy_phy_stats(mpt);
2024 2020
2025 2021 mptsas_destroy_hashes(mpt);
2026 2022
2027 2023 /*
2028 2024 * Delete nt_active.
2029 2025 */
2030 2026 mutex_enter(&mpt->m_mutex);
2031 2027 mptsas_free_active_slots(mpt);
2032 2028 mutex_exit(&mpt->m_mutex);
2033 2029
2034 2030 /* deallocate everything that was allocated in mptsas_attach */
2035 2031 mptsas_cache_destroy(mpt);
2036 2032
2037 2033 mptsas_hba_fini(mpt);
2038 2034 mptsas_cfg_fini(mpt);
2039 2035
2040 2036 /* Lower the power informing PM Framework */
2041 2037 if (mpt->m_options & MPTSAS_OPT_PM) {
2042 2038 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
2043 2039 mptsas_log(mpt, CE_WARN,
2044 2040 "!mptsas%d: Lower power request failed "
2045 2041 "during detach, ignoring.",
2046 2042 mpt->m_instance);
2047 2043 }
2048 2044
2049 2045 mutex_destroy(&mpt->m_tx_waitq_mutex);
2050 2046 mutex_destroy(&mpt->m_passthru_mutex);
2051 2047 mutex_destroy(&mpt->m_mutex);
2052 2048 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
2053 2049 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
2054 2050 }
2055 2051 cv_destroy(&mpt->m_cv);
2056 2052 cv_destroy(&mpt->m_passthru_cv);
2057 2053 cv_destroy(&mpt->m_fw_cv);
2058 2054 cv_destroy(&mpt->m_config_cv);
2059 2055 cv_destroy(&mpt->m_fw_diag_cv);
2060 2056 cv_destroy(&mpt->m_extreq_sense_refcount_cv);
2061 2057
2062 2058 mptsas_smp_teardown(mpt);
2063 2059 mptsas_enc_teardown(mpt);
2064 2060 mptsas_hba_teardown(mpt);
2065 2061
2066 2062 mptsas_config_space_fini(mpt);
2067 2063
2068 2064 mptsas_free_handshake_msg(mpt);
2069 2065
2070 2066 mptsas_fm_fini(mpt);
2071 2067 ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
2072 2068 ddi_prop_remove_all(dip);
2073 2069
2074 2070 return (DDI_SUCCESS);
2075 2071 }
2076 2072
2077 2073 static void
2078 2074 mptsas_list_add(mptsas_t *mpt)
2079 2075 {
2080 2076 rw_enter(&mptsas_global_rwlock, RW_WRITER);
2081 2077
2082 2078 if (mptsas_head == NULL) {
2083 2079 mptsas_head = mpt;
2084 2080 } else {
2085 2081 mptsas_tail->m_next = mpt;
2086 2082 }
2087 2083 mptsas_tail = mpt;
2088 2084 rw_exit(&mptsas_global_rwlock);
2089 2085 }
2090 2086
2091 2087 static void
2092 2088 mptsas_list_del(mptsas_t *mpt)
2093 2089 {
2094 2090 mptsas_t *m;
2095 2091 /*
2096 2092 * Remove device instance from the global linked list
2097 2093 */
2098 2094 rw_enter(&mptsas_global_rwlock, RW_WRITER);
2099 2095 if (mptsas_head == mpt) {
2100 2096 m = mptsas_head = mpt->m_next;
2101 2097 } else {
2102 2098 for (m = mptsas_head; m != NULL; m = m->m_next) {
2103 2099 if (m->m_next == mpt) {
2104 2100 m->m_next = mpt->m_next;
2105 2101 break;
2106 2102 }
2107 2103 }
2108 2104 if (m == NULL) {
2109 2105 mptsas_log(mpt, CE_PANIC, "Not in softc list!");
2110 2106 }
2111 2107 }
2112 2108
2113 2109 if (mptsas_tail == mpt) {
2114 2110 mptsas_tail = m;
2115 2111 }
2116 2112 rw_exit(&mptsas_global_rwlock);
2117 2113 }
2118 2114
2119 2115 static int
2120 2116 mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size)
2121 2117 {
2122 2118 ddi_dma_attr_t task_dma_attrs;
2123 2119
2124 2120 mpt->m_hshk_dma_size = 0;
2125 2121 task_dma_attrs = mpt->m_msg_dma_attr;
2126 2122 task_dma_attrs.dma_attr_sgllen = 1;
2127 2123 task_dma_attrs.dma_attr_granular = (uint32_t)(alloc_size);
2128 2124
2129 2125 /* allocate Task Management ddi_dma resources */
2130 2126 if (mptsas_dma_addr_create(mpt, task_dma_attrs,
2131 2127 &mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl, &mpt->m_hshk_memp,
2132 2128 alloc_size, NULL) == FALSE) {
2133 2129 return (DDI_FAILURE);
2134 2130 }
2135 2131 mpt->m_hshk_dma_size = alloc_size;
2136 2132
2137 2133 return (DDI_SUCCESS);
2138 2134 }
2139 2135
2140 2136 static void
2141 2137 mptsas_free_handshake_msg(mptsas_t *mpt)
2142 2138 {
2143 2139 if (mpt->m_hshk_dma_size == 0)
2144 2140 return;
2145 2141 mptsas_dma_addr_destroy(&mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl);
2146 2142 mpt->m_hshk_dma_size = 0;
2147 2143 }
2148 2144
2149 2145 static int
2150 2146 mptsas_hba_setup(mptsas_t *mpt)
2151 2147 {
2152 2148 scsi_hba_tran_t *hba_tran;
2153 2149 int tran_flags;
2154 2150
2155 2151 /* Allocate a transport structure */
2156 2152 hba_tran = mpt->m_tran = scsi_hba_tran_alloc(mpt->m_dip,
2157 2153 SCSI_HBA_CANSLEEP);
2158 2154 ASSERT(mpt->m_tran != NULL);
2159 2155
2160 2156 hba_tran->tran_hba_private = mpt;
2161 2157 hba_tran->tran_tgt_private = NULL;
2162 2158
2163 2159 hba_tran->tran_tgt_init = mptsas_scsi_tgt_init;
2164 2160 hba_tran->tran_tgt_free = mptsas_scsi_tgt_free;
2165 2161
2166 2162 hba_tran->tran_start = mptsas_scsi_start;
2167 2163 hba_tran->tran_reset = mptsas_scsi_reset;
2168 2164 hba_tran->tran_abort = mptsas_scsi_abort;
2169 2165 hba_tran->tran_getcap = mptsas_scsi_getcap;
2170 2166 hba_tran->tran_setcap = mptsas_scsi_setcap;
2171 2167 hba_tran->tran_init_pkt = mptsas_scsi_init_pkt;
2172 2168 hba_tran->tran_destroy_pkt = mptsas_scsi_destroy_pkt;
2173 2169
2174 2170 hba_tran->tran_dmafree = mptsas_scsi_dmafree;
2175 2171 hba_tran->tran_sync_pkt = mptsas_scsi_sync_pkt;
2176 2172 hba_tran->tran_reset_notify = mptsas_scsi_reset_notify;
2177 2173
2178 2174 hba_tran->tran_get_bus_addr = mptsas_get_bus_addr;
2179 2175 hba_tran->tran_get_name = mptsas_get_name;
2180 2176
2181 2177 hba_tran->tran_quiesce = mptsas_scsi_quiesce;
2182 2178 hba_tran->tran_unquiesce = mptsas_scsi_unquiesce;
2183 2179 hba_tran->tran_bus_reset = NULL;
2184 2180
2185 2181 hba_tran->tran_add_eventcall = NULL;
2186 2182 hba_tran->tran_get_eventcookie = NULL;
2187 2183 hba_tran->tran_post_event = NULL;
2188 2184 hba_tran->tran_remove_eventcall = NULL;
2189 2185
2190 2186 hba_tran->tran_bus_config = mptsas_bus_config;
2191 2187
2192 2188 hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
2193 2189
2194 2190 /*
2195 2191 * All children of the HBA are iports. We need tran was cloned.
2196 2192 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
2197 2193 * inherited to iport's tran vector.
2198 2194 */
2199 2195 tran_flags = (SCSI_HBA_HBA | SCSI_HBA_TRAN_CLONE);
2200 2196
2201 2197 if (scsi_hba_attach_setup(mpt->m_dip, &mpt->m_msg_dma_attr,
2202 2198 hba_tran, tran_flags) != DDI_SUCCESS) {
2203 2199 mptsas_log(mpt, CE_WARN, "hba attach setup failed");
2204 2200 scsi_hba_tran_free(hba_tran);
2205 2201 mpt->m_tran = NULL;
2206 2202 return (FALSE);
2207 2203 }
2208 2204 return (TRUE);
2209 2205 }
2210 2206
2211 2207 static void
2212 2208 mptsas_hba_teardown(mptsas_t *mpt)
2213 2209 {
2214 2210 (void) scsi_hba_detach(mpt->m_dip);
2215 2211 if (mpt->m_tran != NULL) {
2216 2212 scsi_hba_tran_free(mpt->m_tran);
2217 2213 mpt->m_tran = NULL;
2218 2214 }
2219 2215 }
2220 2216
2221 2217 static void
2222 2218 mptsas_iport_register(mptsas_t *mpt)
2223 2219 {
2224 2220 int i, j;
2225 2221 mptsas_phymask_t mask = 0x0;
2226 2222 /*
2227 2223 * initial value of mask is 0
2228 2224 */
2229 2225 mutex_enter(&mpt->m_mutex);
2230 2226 for (i = 0; i < mpt->m_num_phys; i++) {
2231 2227 mptsas_phymask_t phy_mask = 0x0;
2232 2228 char phy_mask_name[MPTSAS_MAX_PHYS];
2233 2229 uint8_t current_port;
2234 2230
2235 2231 if (mpt->m_phy_info[i].attached_devhdl == 0)
2236 2232 continue;
2237 2233
2238 2234 bzero(phy_mask_name, sizeof (phy_mask_name));
2239 2235
2240 2236 current_port = mpt->m_phy_info[i].port_num;
2241 2237
2242 2238 if ((mask & (1 << i)) != 0)
2243 2239 continue;
2244 2240
2245 2241 for (j = 0; j < mpt->m_num_phys; j++) {
2246 2242 if (mpt->m_phy_info[j].attached_devhdl &&
2247 2243 (mpt->m_phy_info[j].port_num == current_port)) {
2248 2244 phy_mask |= (1 << j);
2249 2245 }
2250 2246 }
2251 2247 mask = mask | phy_mask;
2252 2248
2253 2249 for (j = 0; j < mpt->m_num_phys; j++) {
2254 2250 if ((phy_mask >> j) & 0x01) {
2255 2251 mpt->m_phy_info[j].phy_mask = phy_mask;
2256 2252 }
2257 2253 }
2258 2254
2259 2255 (void) sprintf(phy_mask_name, "%x", phy_mask);
2260 2256
2261 2257 mutex_exit(&mpt->m_mutex);
2262 2258 /*
2263 2259 * register a iport
2264 2260 */
2265 2261 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
2266 2262 mutex_enter(&mpt->m_mutex);
2267 2263 }
2268 2264 mutex_exit(&mpt->m_mutex);
2269 2265 /*
2270 2266 * register a virtual port for RAID volume always
2271 2267 */
2272 2268 (void) scsi_hba_iport_register(mpt->m_dip, "v0");
2273 2269
2274 2270 }
2275 2271
2276 2272 static int
2277 2273 mptsas_smp_setup(mptsas_t *mpt)
2278 2274 {
2279 2275 mpt->m_smptran = smp_hba_tran_alloc(mpt->m_dip);
2280 2276 ASSERT(mpt->m_smptran != NULL);
2281 2277 mpt->m_smptran->smp_tran_hba_private = mpt;
2282 2278 mpt->m_smptran->smp_tran_start = mptsas_smp_start;
2283 2279 if (smp_hba_attach_setup(mpt->m_dip, mpt->m_smptran) != DDI_SUCCESS) {
2284 2280 mptsas_log(mpt, CE_WARN, "smp attach setup failed");
2285 2281 smp_hba_tran_free(mpt->m_smptran);
2286 2282 mpt->m_smptran = NULL;
2287 2283 return (FALSE);
2288 2284 }
2289 2285 /*
2290 2286 * Initialize smp hash table
2291 2287 */
2292 2288 mpt->m_smp_targets = refhash_create(MPTSAS_SMP_BUCKET_COUNT,
2293 2289 mptsas_target_addr_hash, mptsas_target_addr_cmp,
2294 2290 mptsas_smp_free, sizeof (mptsas_smp_t),
2295 2291 offsetof(mptsas_smp_t, m_link), offsetof(mptsas_smp_t, m_addr),
2296 2292 KM_SLEEP);
2297 2293 mpt->m_smp_devhdl = 0xFFFF;
2298 2294
2299 2295 return (TRUE);
2300 2296 }
2301 2297
2302 2298 static void
2303 2299 mptsas_smp_teardown(mptsas_t *mpt)
2304 2300 {
2305 2301 (void) smp_hba_detach(mpt->m_dip);
2306 2302 if (mpt->m_smptran != NULL) {
2307 2303 smp_hba_tran_free(mpt->m_smptran);
2308 2304 mpt->m_smptran = NULL;
2309 2305 }
2310 2306 mpt->m_smp_devhdl = 0;
2311 2307 }
2312 2308
2313 2309 static int
2314 2310 mptsas_enc_setup(mptsas_t *mpt)
2315 2311 {
2316 2312 list_create(&mpt->m_enclosures, sizeof (mptsas_enclosure_t),
2317 2313 offsetof(mptsas_enclosure_t, me_link));
2318 2314 return (TRUE);
2319 2315 }
2320 2316
2321 2317 static void
2322 2318 mptsas_enc_free(mptsas_enclosure_t *mep)
2323 2319 {
2324 2320 if (mep == NULL)
2325 2321 return;
2326 2322 if (mep->me_slotleds != NULL) {
2327 2323 VERIFY3U(mep->me_nslots, >, 0);
2328 2324 kmem_free(mep->me_slotleds, sizeof (uint8_t) * mep->me_nslots);
2329 2325 }
2330 2326 kmem_free(mep, sizeof (mptsas_enclosure_t));
2331 2327 }
2332 2328
2333 2329 static void
2334 2330 mptsas_enc_teardown(mptsas_t *mpt)
2335 2331 {
2336 2332 mptsas_enclosure_t *mep;
2337 2333
2338 2334 while ((mep = list_remove_head(&mpt->m_enclosures)) != NULL) {
2339 2335 mptsas_enc_free(mep);
2340 2336 }
2341 2337 list_destroy(&mpt->m_enclosures);
2342 2338 }
2343 2339
2344 2340 static mptsas_enclosure_t *
2345 2341 mptsas_enc_lookup(mptsas_t *mpt, uint16_t hdl)
2346 2342 {
2347 2343 mptsas_enclosure_t *mep;
2348 2344
2349 2345 ASSERT(MUTEX_HELD(&mpt->m_mutex));
2350 2346
2351 2347 for (mep = list_head(&mpt->m_enclosures); mep != NULL;
2352 2348 mep = list_next(&mpt->m_enclosures, mep)) {
2353 2349 if (hdl == mep->me_enchdl) {
2354 2350 return (mep);
2355 2351 }
2356 2352 }
2357 2353
2358 2354 return (NULL);
2359 2355 }
2360 2356
2361 2357 static int
2362 2358 mptsas_cache_create(mptsas_t *mpt)
2363 2359 {
2364 2360 int instance = mpt->m_instance;
2365 2361 char buf[64];
2366 2362
2367 2363 /*
2368 2364 * create kmem cache for packets
2369 2365 */
2370 2366 (void) sprintf(buf, "mptsas%d_cache", instance);
2371 2367 mpt->m_kmem_cache = kmem_cache_create(buf,
2372 2368 sizeof (struct mptsas_cmd) + scsi_pkt_size(), 8,
2373 2369 mptsas_kmem_cache_constructor, mptsas_kmem_cache_destructor,
2374 2370 NULL, (void *)mpt, NULL, 0);
2375 2371
2376 2372 if (mpt->m_kmem_cache == NULL) {
2377 2373 mptsas_log(mpt, CE_WARN, "creating kmem cache failed");
2378 2374 return (FALSE);
2379 2375 }
2380 2376
2381 2377 /*
2382 2378 * create kmem cache for extra SGL frames if SGL cannot
2383 2379 * be accomodated into main request frame.
2384 2380 */
2385 2381 (void) sprintf(buf, "mptsas%d_cache_frames", instance);
2386 2382 mpt->m_cache_frames = kmem_cache_create(buf,
2387 2383 sizeof (mptsas_cache_frames_t), 8,
2388 2384 mptsas_cache_frames_constructor, mptsas_cache_frames_destructor,
2389 2385 NULL, (void *)mpt, NULL, 0);
2390 2386
2391 2387 if (mpt->m_cache_frames == NULL) {
2392 2388 mptsas_log(mpt, CE_WARN, "creating cache for frames failed");
2393 2389 return (FALSE);
2394 2390 }
2395 2391
2396 2392 return (TRUE);
2397 2393 }
2398 2394
2399 2395 static void
2400 2396 mptsas_cache_destroy(mptsas_t *mpt)
2401 2397 {
2402 2398 /* deallocate in reverse order */
2403 2399 if (mpt->m_cache_frames) {
2404 2400 kmem_cache_destroy(mpt->m_cache_frames);
2405 2401 mpt->m_cache_frames = NULL;
2406 2402 }
2407 2403 if (mpt->m_kmem_cache) {
2408 2404 kmem_cache_destroy(mpt->m_kmem_cache);
2409 2405 mpt->m_kmem_cache = NULL;
2410 2406 }
2411 2407 }
2412 2408
2413 2409 static int
2414 2410 mptsas_power(dev_info_t *dip, int component, int level)
2415 2411 {
2416 2412 #ifndef __lock_lint
2417 2413 _NOTE(ARGUNUSED(component))
2418 2414 #endif
2419 2415 mptsas_t *mpt;
2420 2416 int rval = DDI_SUCCESS;
2421 2417 int polls = 0;
2422 2418 uint32_t ioc_status;
2423 2419
2424 2420 if (scsi_hba_iport_unit_address(dip) != 0)
2425 2421 return (DDI_SUCCESS);
2426 2422
2427 2423 mpt = ddi_get_soft_state(mptsas_state, ddi_get_instance(dip));
2428 2424 if (mpt == NULL) {
2429 2425 return (DDI_FAILURE);
2430 2426 }
2431 2427
2432 2428 mutex_enter(&mpt->m_mutex);
2433 2429
2434 2430 /*
2435 2431 * If the device is busy, don't lower its power level
2436 2432 */
2437 2433 if (mpt->m_busy && (mpt->m_power_level > level)) {
2438 2434 mutex_exit(&mpt->m_mutex);
2439 2435 return (DDI_FAILURE);
2440 2436 }
2441 2437 switch (level) {
2442 2438 case PM_LEVEL_D0:
2443 2439 NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
2444 2440 MPTSAS_POWER_ON(mpt);
2445 2441 /*
2446 2442 * Wait up to 30 seconds for IOC to come out of reset.
2447 2443 */
2448 2444 while (((ioc_status = ddi_get32(mpt->m_datap,
2449 2445 &mpt->m_reg->Doorbell)) &
2450 2446 MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2451 2447 if (polls++ > 3000) {
2452 2448 break;
2453 2449 }
2454 2450 delay(drv_usectohz(10000));
2455 2451 }
2456 2452 /*
2457 2453 * If IOC is not in operational state, try to hard reset it.
2458 2454 */
2459 2455 if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2460 2456 MPI2_IOC_STATE_OPERATIONAL) {
2461 2457 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2462 2458 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2463 2459 mptsas_log(mpt, CE_WARN,
2464 2460 "mptsas_power: hard reset failed");
2465 2461 mutex_exit(&mpt->m_mutex);
2466 2462 return (DDI_FAILURE);
2467 2463 }
2468 2464 }
2469 2465 mpt->m_power_level = PM_LEVEL_D0;
2470 2466 break;
2471 2467 case PM_LEVEL_D3:
2472 2468 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2473 2469 MPTSAS_POWER_OFF(mpt);
2474 2470 break;
2475 2471 default:
2476 2472 mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2477 2473 mpt->m_instance, level);
2478 2474 rval = DDI_FAILURE;
2479 2475 break;
2480 2476 }
2481 2477 mutex_exit(&mpt->m_mutex);
2482 2478 return (rval);
2483 2479 }
2484 2480
2485 2481 /*
2486 2482 * Initialize configuration space and figure out which
2487 2483 * chip and revison of the chip the mpt driver is using.
2488 2484 */
2489 2485 static int
2490 2486 mptsas_config_space_init(mptsas_t *mpt)
2491 2487 {
2492 2488 NDBG0(("mptsas_config_space_init"));
2493 2489
2494 2490 if (mpt->m_config_handle != NULL)
2495 2491 return (TRUE);
2496 2492
2497 2493 if (pci_config_setup(mpt->m_dip,
2498 2494 &mpt->m_config_handle) != DDI_SUCCESS) {
2499 2495 mptsas_log(mpt, CE_WARN, "cannot map configuration space.");
2500 2496 return (FALSE);
2501 2497 }
2502 2498
2503 2499 /*
2504 2500 * This is a workaround for a XMITS ASIC bug which does not
2505 2501 * drive the CBE upper bits.
2506 2502 */
2507 2503 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) &
2508 2504 PCI_STAT_PERROR) {
2509 2505 pci_config_put16(mpt->m_config_handle, PCI_CONF_STAT,
2510 2506 PCI_STAT_PERROR);
2511 2507 }
2512 2508
2513 2509 mptsas_setup_cmd_reg(mpt);
2514 2510
2515 2511 /*
2516 2512 * Get the chip device id:
2517 2513 */
2518 2514 mpt->m_devid = pci_config_get16(mpt->m_config_handle, PCI_CONF_DEVID);
2519 2515
2520 2516 /*
2521 2517 * Save the revision.
2522 2518 */
2523 2519 mpt->m_revid = pci_config_get8(mpt->m_config_handle, PCI_CONF_REVID);
2524 2520
2525 2521 /*
2526 2522 * Save the SubSystem Vendor and Device IDs
2527 2523 */
2528 2524 mpt->m_svid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBVENID);
2529 2525 mpt->m_ssid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBSYSID);
2530 2526
2531 2527 /*
2532 2528 * Set the latency timer to 0x40 as specified by the upa -> pci
2533 2529 * bridge chip design team. This may be done by the sparc pci
2534 2530 * bus nexus driver, but the driver should make sure the latency
2535 2531 * timer is correct for performance reasons.
2536 2532 */
2537 2533 pci_config_put8(mpt->m_config_handle, PCI_CONF_LATENCY_TIMER,
2538 2534 MPTSAS_LATENCY_TIMER);
2539 2535
2540 2536 (void) mptsas_get_pci_cap(mpt);
2541 2537 return (TRUE);
2542 2538 }
2543 2539
2544 2540 static void
2545 2541 mptsas_config_space_fini(mptsas_t *mpt)
2546 2542 {
2547 2543 if (mpt->m_config_handle != NULL) {
2548 2544 mptsas_disable_bus_master(mpt);
2549 2545 pci_config_teardown(&mpt->m_config_handle);
2550 2546 mpt->m_config_handle = NULL;
2551 2547 }
2552 2548 }
2553 2549
2554 2550 static void
2555 2551 mptsas_setup_cmd_reg(mptsas_t *mpt)
2556 2552 {
2557 2553 ushort_t cmdreg;
2558 2554
2559 2555 /*
2560 2556 * Set the command register to the needed values.
2561 2557 */
2562 2558 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2563 2559 cmdreg |= (PCI_COMM_ME | PCI_COMM_SERR_ENABLE |
2564 2560 PCI_COMM_PARITY_DETECT | PCI_COMM_MAE);
2565 2561 cmdreg &= ~PCI_COMM_IO;
2566 2562 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2567 2563 }
2568 2564
2569 2565 static void
2570 2566 mptsas_disable_bus_master(mptsas_t *mpt)
2571 2567 {
2572 2568 ushort_t cmdreg;
2573 2569
2574 2570 /*
2575 2571 * Clear the master enable bit in the PCI command register.
2576 2572 * This prevents any bus mastering activity like DMA.
2577 2573 */
2578 2574 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2579 2575 cmdreg &= ~PCI_COMM_ME;
2580 2576 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2581 2577 }
2582 2578
2583 2579 int
2584 2580 mptsas_dma_alloc(mptsas_t *mpt, mptsas_dma_alloc_state_t *dma_statep)
2585 2581 {
2586 2582 ddi_dma_attr_t attrs;
2587 2583
2588 2584 attrs = mpt->m_io_dma_attr;
2589 2585 attrs.dma_attr_sgllen = 1;
2590 2586
2591 2587 ASSERT(dma_statep != NULL);
2592 2588
2593 2589 if (mptsas_dma_addr_create(mpt, attrs, &dma_statep->handle,
2594 2590 &dma_statep->accessp, &dma_statep->memp, dma_statep->size,
2595 2591 &dma_statep->cookie) == FALSE) {
2596 2592 return (DDI_FAILURE);
2597 2593 }
2598 2594
2599 2595 return (DDI_SUCCESS);
2600 2596 }
2601 2597
2602 2598 void
2603 2599 mptsas_dma_free(mptsas_dma_alloc_state_t *dma_statep)
2604 2600 {
2605 2601 ASSERT(dma_statep != NULL);
2606 2602 mptsas_dma_addr_destroy(&dma_statep->handle, &dma_statep->accessp);
2607 2603 dma_statep->size = 0;
2608 2604 }
2609 2605
2610 2606 int
2611 2607 mptsas_do_dma(mptsas_t *mpt, uint32_t size, int var, int (*callback)())
2612 2608 {
2613 2609 ddi_dma_attr_t attrs;
2614 2610 ddi_dma_handle_t dma_handle;
2615 2611 caddr_t memp;
2616 2612 ddi_acc_handle_t accessp;
2617 2613 int rval;
2618 2614
2619 2615 ASSERT(mutex_owned(&mpt->m_mutex));
2620 2616
2621 2617 attrs = mpt->m_msg_dma_attr;
2622 2618 attrs.dma_attr_sgllen = 1;
2623 2619 attrs.dma_attr_granular = size;
2624 2620
2625 2621 if (mptsas_dma_addr_create(mpt, attrs, &dma_handle,
2626 2622 &accessp, &memp, size, NULL) == FALSE) {
2627 2623 return (DDI_FAILURE);
2628 2624 }
2629 2625
2630 2626 rval = (*callback) (mpt, memp, var, accessp);
2631 2627
2632 2628 if ((mptsas_check_dma_handle(dma_handle) != DDI_SUCCESS) ||
2633 2629 (mptsas_check_acc_handle(accessp) != DDI_SUCCESS)) {
2634 2630 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
2635 2631 rval = DDI_FAILURE;
2636 2632 }
2637 2633
2638 2634 mptsas_dma_addr_destroy(&dma_handle, &accessp);
2639 2635 return (rval);
2640 2636
2641 2637 }
2642 2638
2643 2639 static int
2644 2640 mptsas_alloc_request_frames(mptsas_t *mpt)
2645 2641 {
2646 2642 ddi_dma_attr_t frame_dma_attrs;
2647 2643 caddr_t memp;
2648 2644 ddi_dma_cookie_t cookie;
2649 2645 size_t mem_size;
2650 2646
2651 2647 /*
2652 2648 * re-alloc when it has already alloced
2653 2649 */
2654 2650 if (mpt->m_dma_req_frame_hdl)
2655 2651 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2656 2652 &mpt->m_acc_req_frame_hdl);
2657 2653
2658 2654 /*
2659 2655 * The size of the request frame pool is:
2660 2656 * Number of Request Frames * Request Frame Size
2661 2657 */
2662 2658 mem_size = mpt->m_max_requests * mpt->m_req_frame_size;
2663 2659
2664 2660 /*
2665 2661 * set the DMA attributes. System Request Message Frames must be
2666 2662 * aligned on a 16-byte boundry.
2667 2663 */
2668 2664 frame_dma_attrs = mpt->m_msg_dma_attr;
2669 2665 frame_dma_attrs.dma_attr_align = 16;
2670 2666 frame_dma_attrs.dma_attr_sgllen = 1;
2671 2667
2672 2668 /*
2673 2669 * allocate the request frame pool.
2674 2670 */
2675 2671 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2676 2672 &mpt->m_dma_req_frame_hdl, &mpt->m_acc_req_frame_hdl, &memp,
2677 2673 mem_size, &cookie) == FALSE) {
2678 2674 return (DDI_FAILURE);
2679 2675 }
2680 2676
2681 2677 /*
2682 2678 * Store the request frame memory address. This chip uses this
2683 2679 * address to dma to and from the driver's frame. The second
2684 2680 * address is the address mpt uses to fill in the frame.
2685 2681 */
2686 2682 mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2687 2683 mpt->m_req_frame = memp;
2688 2684
2689 2685 /*
2690 2686 * Clear the request frame pool.
2691 2687 */
2692 2688 bzero(mpt->m_req_frame, mem_size);
2693 2689
2694 2690 return (DDI_SUCCESS);
2695 2691 }
2696 2692
2697 2693 static int
2698 2694 mptsas_alloc_sense_bufs(mptsas_t *mpt)
2699 2695 {
2700 2696 ddi_dma_attr_t sense_dma_attrs;
2701 2697 caddr_t memp;
2702 2698 ddi_dma_cookie_t cookie;
2703 2699 size_t mem_size;
2704 2700 int num_extrqsense_bufs;
2705 2701
2706 2702 ASSERT(mpt->m_extreq_sense_refcount == 0);
2707 2703
2708 2704 /*
2709 2705 * re-alloc when it has already alloced
2710 2706 */
2711 2707 if (mpt->m_dma_req_sense_hdl) {
2712 2708 rmfreemap(mpt->m_erqsense_map);
2713 2709 mptsas_dma_addr_destroy(&mpt->m_dma_req_sense_hdl,
2714 2710 &mpt->m_acc_req_sense_hdl);
2715 2711 }
2716 2712
2717 2713 /*
2718 2714 * The size of the request sense pool is:
2719 2715 * (Number of Request Frames - 2 ) * Request Sense Size +
2720 2716 * extra memory for extended sense requests.
2721 2717 */
2722 2718 mem_size = ((mpt->m_max_requests - 2) * mpt->m_req_sense_size) +
2723 2719 mptsas_extreq_sense_bufsize;
2724 2720
2725 2721 /*
2726 2722 * set the DMA attributes. ARQ buffers
2727 2723 * aligned on a 16-byte boundry.
2728 2724 */
2729 2725 sense_dma_attrs = mpt->m_msg_dma_attr;
2730 2726 sense_dma_attrs.dma_attr_align = 16;
2731 2727 sense_dma_attrs.dma_attr_sgllen = 1;
2732 2728
2733 2729 /*
2734 2730 * allocate the request sense buffer pool.
2735 2731 */
2736 2732 if (mptsas_dma_addr_create(mpt, sense_dma_attrs,
2737 2733 &mpt->m_dma_req_sense_hdl, &mpt->m_acc_req_sense_hdl, &memp,
2738 2734 mem_size, &cookie) == FALSE) {
2739 2735 return (DDI_FAILURE);
2740 2736 }
2741 2737
2742 2738 /*
2743 2739 * Store the request sense base memory address. This chip uses this
2744 2740 * address to dma the request sense data. The second
2745 2741 * address is the address mpt uses to access the data.
2746 2742 * The third is the base for the extended rqsense buffers.
2747 2743 */
2748 2744 mpt->m_req_sense_dma_addr = cookie.dmac_laddress;
2749 2745 mpt->m_req_sense = memp;
2750 2746 memp += (mpt->m_max_requests - 2) * mpt->m_req_sense_size;
2751 2747 mpt->m_extreq_sense = memp;
2752 2748
2753 2749 /*
2754 2750 * The extra memory is divided up into multiples of the base
2755 2751 * buffer size in order to allocate via rmalloc().
2756 2752 * Note that the rmallocmap cannot start at zero!
2757 2753 */
2758 2754 num_extrqsense_bufs = mptsas_extreq_sense_bufsize /
2759 2755 mpt->m_req_sense_size;
2760 2756 mpt->m_erqsense_map = rmallocmap_wait(num_extrqsense_bufs);
2761 2757 rmfree(mpt->m_erqsense_map, num_extrqsense_bufs, 1);
2762 2758
2763 2759 /*
2764 2760 * Clear the pool.
2765 2761 */
2766 2762 bzero(mpt->m_req_sense, mem_size);
2767 2763
2768 2764 return (DDI_SUCCESS);
2769 2765 }
2770 2766
2771 2767 static int
2772 2768 mptsas_alloc_reply_frames(mptsas_t *mpt)
2773 2769 {
2774 2770 ddi_dma_attr_t frame_dma_attrs;
2775 2771 caddr_t memp;
2776 2772 ddi_dma_cookie_t cookie;
2777 2773 size_t mem_size;
2778 2774
2779 2775 /*
2780 2776 * re-alloc when it has already alloced
2781 2777 */
2782 2778 if (mpt->m_dma_reply_frame_hdl) {
2783 2779 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2784 2780 &mpt->m_acc_reply_frame_hdl);
2785 2781 }
2786 2782
2787 2783 /*
2788 2784 * The size of the reply frame pool is:
2789 2785 * Number of Reply Frames * Reply Frame Size
2790 2786 */
2791 2787 mem_size = mpt->m_max_replies * mpt->m_reply_frame_size;
2792 2788
2793 2789 /*
2794 2790 * set the DMA attributes. System Reply Message Frames must be
2795 2791 * aligned on a 4-byte boundry. This is the default.
2796 2792 */
2797 2793 frame_dma_attrs = mpt->m_msg_dma_attr;
2798 2794 frame_dma_attrs.dma_attr_sgllen = 1;
2799 2795
2800 2796 /*
2801 2797 * allocate the reply frame pool
2802 2798 */
2803 2799 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2804 2800 &mpt->m_dma_reply_frame_hdl, &mpt->m_acc_reply_frame_hdl, &memp,
2805 2801 mem_size, &cookie) == FALSE) {
2806 2802 return (DDI_FAILURE);
2807 2803 }
2808 2804
2809 2805 /*
2810 2806 * Store the reply frame memory address. This chip uses this
2811 2807 * address to dma to and from the driver's frame. The second
2812 2808 * address is the address mpt uses to process the frame.
2813 2809 */
2814 2810 mpt->m_reply_frame_dma_addr = cookie.dmac_laddress;
2815 2811 mpt->m_reply_frame = memp;
2816 2812
2817 2813 /*
2818 2814 * Clear the reply frame pool.
2819 2815 */
2820 2816 bzero(mpt->m_reply_frame, mem_size);
2821 2817
2822 2818 return (DDI_SUCCESS);
2823 2819 }
2824 2820
2825 2821 static int
2826 2822 mptsas_alloc_free_queue(mptsas_t *mpt)
2827 2823 {
2828 2824 ddi_dma_attr_t frame_dma_attrs;
2829 2825 caddr_t memp;
2830 2826 ddi_dma_cookie_t cookie;
2831 2827 size_t mem_size;
2832 2828
2833 2829 /*
2834 2830 * re-alloc when it has already alloced
2835 2831 */
2836 2832 if (mpt->m_dma_free_queue_hdl) {
2837 2833 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2838 2834 &mpt->m_acc_free_queue_hdl);
2839 2835 }
2840 2836
2841 2837 /*
2842 2838 * The reply free queue size is:
2843 2839 * Reply Free Queue Depth * 4
2844 2840 * The "4" is the size of one 32 bit address (low part of 64-bit
2845 2841 * address)
2846 2842 */
2847 2843 mem_size = mpt->m_free_queue_depth * 4;
2848 2844
2849 2845 /*
2850 2846 * set the DMA attributes The Reply Free Queue must be aligned on a
2851 2847 * 16-byte boundry.
2852 2848 */
2853 2849 frame_dma_attrs = mpt->m_msg_dma_attr;
2854 2850 frame_dma_attrs.dma_attr_align = 16;
2855 2851 frame_dma_attrs.dma_attr_sgllen = 1;
2856 2852
2857 2853 /*
2858 2854 * allocate the reply free queue
2859 2855 */
2860 2856 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2861 2857 &mpt->m_dma_free_queue_hdl, &mpt->m_acc_free_queue_hdl, &memp,
2862 2858 mem_size, &cookie) == FALSE) {
2863 2859 return (DDI_FAILURE);
2864 2860 }
2865 2861
2866 2862 /*
2867 2863 * Store the reply free queue memory address. This chip uses this
2868 2864 * address to read from the reply free queue. The second address
2869 2865 * is the address mpt uses to manage the queue.
2870 2866 */
2871 2867 mpt->m_free_queue_dma_addr = cookie.dmac_laddress;
2872 2868 mpt->m_free_queue = memp;
2873 2869
2874 2870 /*
2875 2871 * Clear the reply free queue memory.
2876 2872 */
2877 2873 bzero(mpt->m_free_queue, mem_size);
2878 2874
2879 2875 return (DDI_SUCCESS);
2880 2876 }
2881 2877
2882 2878 static int
2883 2879 mptsas_alloc_post_queue(mptsas_t *mpt)
2884 2880 {
2885 2881 ddi_dma_attr_t frame_dma_attrs;
2886 2882 caddr_t memp;
2887 2883 ddi_dma_cookie_t cookie;
2888 2884 size_t mem_size;
2889 2885
2890 2886 /*
2891 2887 * re-alloc when it has already alloced
2892 2888 */
2893 2889 if (mpt->m_dma_post_queue_hdl) {
2894 2890 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2895 2891 &mpt->m_acc_post_queue_hdl);
2896 2892 }
2897 2893
2898 2894 /*
2899 2895 * The reply descriptor post queue size is:
2900 2896 * Reply Descriptor Post Queue Depth * 8
2901 2897 * The "8" is the size of each descriptor (8 bytes or 64 bits).
2902 2898 */
2903 2899 mem_size = mpt->m_post_queue_depth * 8;
2904 2900
2905 2901 /*
2906 2902 * set the DMA attributes. The Reply Descriptor Post Queue must be
2907 2903 * aligned on a 16-byte boundry.
2908 2904 */
2909 2905 frame_dma_attrs = mpt->m_msg_dma_attr;
2910 2906 frame_dma_attrs.dma_attr_align = 16;
2911 2907 frame_dma_attrs.dma_attr_sgllen = 1;
2912 2908
2913 2909 /*
2914 2910 * allocate the reply post queue
2915 2911 */
2916 2912 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2917 2913 &mpt->m_dma_post_queue_hdl, &mpt->m_acc_post_queue_hdl, &memp,
2918 2914 mem_size, &cookie) == FALSE) {
2919 2915 return (DDI_FAILURE);
2920 2916 }
2921 2917
2922 2918 /*
2923 2919 * Store the reply descriptor post queue memory address. This chip
2924 2920 * uses this address to write to the reply descriptor post queue. The
2925 2921 * second address is the address mpt uses to manage the queue.
2926 2922 */
2927 2923 mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
2928 2924 mpt->m_post_queue = memp;
2929 2925
2930 2926 /*
2931 2927 * Clear the reply post queue memory.
2932 2928 */
2933 2929 bzero(mpt->m_post_queue, mem_size);
2934 2930
2935 2931 return (DDI_SUCCESS);
2936 2932 }
2937 2933
2938 2934 static void
2939 2935 mptsas_alloc_reply_args(mptsas_t *mpt)
2940 2936 {
2941 2937 if (mpt->m_replyh_args == NULL) {
2942 2938 mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
2943 2939 mpt->m_max_replies, KM_SLEEP);
2944 2940 }
2945 2941 }
2946 2942
2947 2943 static int
2948 2944 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2949 2945 {
2950 2946 mptsas_cache_frames_t *frames = NULL;
2951 2947 if (cmd->cmd_extra_frames == NULL) {
2952 2948 frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
2953 2949 if (frames == NULL) {
2954 2950 return (DDI_FAILURE);
2955 2951 }
2956 2952 cmd->cmd_extra_frames = frames;
2957 2953 }
2958 2954 return (DDI_SUCCESS);
2959 2955 }
2960 2956
2961 2957 static void
2962 2958 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2963 2959 {
2964 2960 if (cmd->cmd_extra_frames) {
2965 2961 kmem_cache_free(mpt->m_cache_frames,
2966 2962 (void *)cmd->cmd_extra_frames);
2967 2963 cmd->cmd_extra_frames = NULL;
2968 2964 }
2969 2965 }
2970 2966
2971 2967 static void
2972 2968 mptsas_cfg_fini(mptsas_t *mpt)
2973 2969 {
2974 2970 NDBG0(("mptsas_cfg_fini"));
2975 2971 ddi_regs_map_free(&mpt->m_datap);
2976 2972 }
2977 2973
2978 2974 static void
2979 2975 mptsas_hba_fini(mptsas_t *mpt)
2980 2976 {
2981 2977 NDBG0(("mptsas_hba_fini"));
2982 2978
2983 2979 /*
2984 2980 * Free up any allocated memory
2985 2981 */
2986 2982 if (mpt->m_dma_req_frame_hdl) {
2987 2983 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2988 2984 &mpt->m_acc_req_frame_hdl);
2989 2985 }
2990 2986
2991 2987 if (mpt->m_dma_req_sense_hdl) {
2992 2988 rmfreemap(mpt->m_erqsense_map);
2993 2989 mptsas_dma_addr_destroy(&mpt->m_dma_req_sense_hdl,
2994 2990 &mpt->m_acc_req_sense_hdl);
2995 2991 }
2996 2992
2997 2993 if (mpt->m_dma_reply_frame_hdl) {
2998 2994 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2999 2995 &mpt->m_acc_reply_frame_hdl);
3000 2996 }
3001 2997
3002 2998 if (mpt->m_dma_free_queue_hdl) {
3003 2999 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
3004 3000 &mpt->m_acc_free_queue_hdl);
3005 3001 }
3006 3002
3007 3003 if (mpt->m_dma_post_queue_hdl) {
3008 3004 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
3009 3005 &mpt->m_acc_post_queue_hdl);
3010 3006 }
3011 3007
3012 3008 if (mpt->m_replyh_args != NULL) {
3013 3009 kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
3014 3010 * mpt->m_max_replies);
3015 3011 }
3016 3012 }
3017 3013
3018 3014 static int
3019 3015 mptsas_name_child(dev_info_t *lun_dip, char *name, int len)
3020 3016 {
3021 3017 int lun = 0;
3022 3018 char *sas_wwn = NULL;
3023 3019 int phynum = -1;
3024 3020 int reallen = 0;
3025 3021
3026 3022 /* Get the target num */
3027 3023 lun = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
3028 3024 LUN_PROP, 0);
3029 3025
3030 3026 if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
3031 3027 DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
3032 3028 /*
3033 3029 * Stick in the address of form "pPHY,LUN"
3034 3030 */
3035 3031 reallen = snprintf(name, len, "p%x,%x", phynum, lun);
3036 3032 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
3037 3033 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn)
3038 3034 == DDI_PROP_SUCCESS) {
3039 3035 /*
3040 3036 * Stick in the address of the form "wWWN,LUN"
3041 3037 */
3042 3038 reallen = snprintf(name, len, "%s,%x", sas_wwn, lun);
3043 3039 ddi_prop_free(sas_wwn);
3044 3040 } else {
3045 3041 return (DDI_FAILURE);
3046 3042 }
3047 3043
3048 3044 ASSERT(reallen < len);
3049 3045 if (reallen >= len) {
3050 3046 mptsas_log(0, CE_WARN, "!mptsas_get_name: name parameter "
3051 3047 "length too small, it needs to be %d bytes", reallen + 1);
3052 3048 }
3053 3049 return (DDI_SUCCESS);
3054 3050 }
3055 3051
3056 3052 /*
3057 3053 * tran_tgt_init(9E) - target device instance initialization
3058 3054 */
3059 3055 static int
3060 3056 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3061 3057 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
3062 3058 {
3063 3059 #ifndef __lock_lint
3064 3060 _NOTE(ARGUNUSED(hba_tran))
3065 3061 #endif
3066 3062
3067 3063 /*
3068 3064 * At this point, the scsi_device structure already exists
3069 3065 * and has been initialized.
3070 3066 *
3071 3067 * Use this function to allocate target-private data structures,
3072 3068 * if needed by this HBA. Add revised flow-control and queue
3073 3069 * properties for child here, if desired and if you can tell they
3074 3070 * support tagged queueing by now.
3075 3071 */
3076 3072 mptsas_t *mpt;
3077 3073 int lun = sd->sd_address.a_lun;
3078 3074 mdi_pathinfo_t *pip = NULL;
3079 3075 mptsas_tgt_private_t *tgt_private = NULL;
3080 3076 mptsas_target_t *ptgt = NULL;
3081 3077 char *psas_wwn = NULL;
3082 3078 mptsas_phymask_t phymask = 0;
3083 3079 uint64_t sas_wwn = 0;
3084 3080 mptsas_target_addr_t addr;
3085 3081 mpt = SDEV2MPT(sd);
3086 3082
3087 3083 ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
3088 3084
3089 3085 NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
3090 3086 (void *)hba_dip, (void *)tgt_dip, lun));
3091 3087
3092 3088 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
3093 3089 (void) ndi_merge_node(tgt_dip, mptsas_name_child);
3094 3090 ddi_set_name_addr(tgt_dip, NULL);
3095 3091 return (DDI_FAILURE);
3096 3092 }
3097 3093 /*
3098 3094 * phymask is 0 means the virtual port for RAID
3099 3095 */
3100 3096 phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
3101 3097 "phymask", 0);
3102 3098 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
3103 3099 if ((pip = (void *)(sd->sd_private)) == NULL) {
3104 3100 /*
3105 3101 * Very bad news if this occurs. Somehow scsi_vhci has
3106 3102 * lost the pathinfo node for this target.
3107 3103 */
3108 3104 return (DDI_NOT_WELL_FORMED);
3109 3105 }
3110 3106
3111 3107 if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
3112 3108 DDI_PROP_SUCCESS) {
3113 3109 mptsas_log(mpt, CE_WARN, "Get lun property failed\n");
3114 3110 return (DDI_FAILURE);
3115 3111 }
3116 3112
3117 3113 if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
3118 3114 &psas_wwn) == MDI_SUCCESS) {
3119 3115 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
3120 3116 sas_wwn = 0;
3121 3117 }
3122 3118 (void) mdi_prop_free(psas_wwn);
3123 3119 }
3124 3120 } else {
3125 3121 lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
3126 3122 DDI_PROP_DONTPASS, LUN_PROP, 0);
3127 3123 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
3128 3124 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
3129 3125 DDI_PROP_SUCCESS) {
3130 3126 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
3131 3127 sas_wwn = 0;
3132 3128 }
3133 3129 ddi_prop_free(psas_wwn);
3134 3130 } else {
3135 3131 sas_wwn = 0;
3136 3132 }
3137 3133 }
3138 3134
3139 3135 ASSERT((sas_wwn != 0) || (phymask != 0));
3140 3136 addr.mta_wwn = sas_wwn;
3141 3137 addr.mta_phymask = phymask;
3142 3138 mutex_enter(&mpt->m_mutex);
3143 3139 ptgt = refhash_lookup(mpt->m_targets, &addr);
3144 3140 mutex_exit(&mpt->m_mutex);
3145 3141 if (ptgt == NULL) {
3146 3142 mptsas_log(mpt, CE_WARN, "!tgt_init: target doesn't exist or "
3147 3143 "gone already! phymask:%x, saswwn %"PRIx64, phymask,
3148 3144 sas_wwn);
3149 3145 return (DDI_FAILURE);
3150 3146 }
3151 3147 if (hba_tran->tran_tgt_private == NULL) {
3152 3148 tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
3153 3149 KM_SLEEP);
3154 3150 tgt_private->t_lun = lun;
3155 3151 tgt_private->t_private = ptgt;
3156 3152 hba_tran->tran_tgt_private = tgt_private;
3157 3153 }
3158 3154
3159 3155 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
3160 3156 return (DDI_SUCCESS);
3161 3157 }
3162 3158 mutex_enter(&mpt->m_mutex);
3163 3159
3164 3160 if (ptgt->m_deviceinfo &
3165 3161 (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
3166 3162 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
3167 3163 uchar_t *inq89 = NULL;
3168 3164 int inq89_len = 0x238;
3169 3165 int reallen = 0;
3170 3166 int rval = 0;
3171 3167 struct sata_id *sid = NULL;
3172 3168 char model[SATA_ID_MODEL_LEN + 1];
3173 3169 char fw[SATA_ID_FW_LEN + 1];
3174 3170 char *vid, *pid;
3175 3171
3176 3172 mutex_exit(&mpt->m_mutex);
3177 3173 /*
3178 3174 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
3179 3175 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
3180 3176 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
3181 3177 */
3182 3178 inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
3183 3179 rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
3184 3180 inq89, inq89_len, &reallen, 1);
3185 3181
3186 3182 if (rval != 0) {
3187 3183 if (inq89 != NULL) {
3188 3184 kmem_free(inq89, inq89_len);
3189 3185 }
3190 3186
3191 3187 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
3192 3188 "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
3193 3189 return (DDI_SUCCESS);
3194 3190 }
3195 3191 sid = (void *)(&inq89[60]);
3196 3192
3197 3193 swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
3198 3194 swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
3199 3195
3200 3196 model[SATA_ID_MODEL_LEN] = 0;
3201 3197 fw[SATA_ID_FW_LEN] = 0;
3202 3198
3203 3199 sata_split_model(model, &vid, &pid);
3204 3200
3205 3201 /*
3206 3202 * override SCSA "inquiry-*" properties
3207 3203 */
3208 3204 if (vid)
3209 3205 (void) scsi_device_prop_update_inqstring(sd,
3210 3206 INQUIRY_VENDOR_ID, vid, strlen(vid));
3211 3207 if (pid)
3212 3208 (void) scsi_device_prop_update_inqstring(sd,
3213 3209 INQUIRY_PRODUCT_ID, pid, strlen(pid));
3214 3210 (void) scsi_device_prop_update_inqstring(sd,
3215 3211 INQUIRY_REVISION_ID, fw, strlen(fw));
3216 3212
3217 3213 if (inq89 != NULL) {
3218 3214 kmem_free(inq89, inq89_len);
3219 3215 }
3220 3216 } else {
3221 3217 mutex_exit(&mpt->m_mutex);
3222 3218 }
3223 3219
3224 3220 return (DDI_SUCCESS);
3225 3221 }
3226 3222 /*
3227 3223 * tran_tgt_free(9E) - target device instance deallocation
3228 3224 */
3229 3225 static void
3230 3226 mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3231 3227 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
3232 3228 {
3233 3229 #ifndef __lock_lint
3234 3230 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran, sd))
3235 3231 #endif
3236 3232
3237 3233 mptsas_tgt_private_t *tgt_private = hba_tran->tran_tgt_private;
3238 3234
3239 3235 if (tgt_private != NULL) {
3240 3236 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
3241 3237 hba_tran->tran_tgt_private = NULL;
3242 3238 }
3243 3239 }
3244 3240
3245 3241 /*
3246 3242 * scsi_pkt handling
3247 3243 *
3248 3244 * Visible to the external world via the transport structure.
3249 3245 */
3250 3246
3251 3247 /*
3252 3248 * Notes:
3253 3249 * - transport the command to the addressed SCSI target/lun device
3254 3250 * - normal operation is to schedule the command to be transported,
3255 3251 * and return TRAN_ACCEPT if this is successful.
3256 3252 * - if NO_INTR, tran_start must poll device for command completion
3257 3253 */
3258 3254 static int
3259 3255 mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
3260 3256 {
3261 3257 #ifndef __lock_lint
3262 3258 _NOTE(ARGUNUSED(ap))
3263 3259 #endif
3264 3260 mptsas_t *mpt = PKT2MPT(pkt);
3265 3261 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3266 3262 int rval;
3267 3263 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3268 3264
3269 3265 NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt));
3270 3266 ASSERT(ptgt);
3271 3267 if (ptgt == NULL)
3272 3268 return (TRAN_FATAL_ERROR);
3273 3269
3274 3270 /*
3275 3271 * prepare the pkt before taking mutex.
3276 3272 */
3277 3273 rval = mptsas_prepare_pkt(cmd);
3278 3274 if (rval != TRAN_ACCEPT) {
3279 3275 return (rval);
3280 3276 }
3281 3277
3282 3278 /*
3283 3279 * Send the command to target/lun, however your HBA requires it.
3284 3280 * If busy, return TRAN_BUSY; if there's some other formatting error
3285 3281 * in the packet, return TRAN_BADPKT; otherwise, fall through to the
3286 3282 * return of TRAN_ACCEPT.
3287 3283 *
3288 3284 * Remember that access to shared resources, including the mptsas_t
3289 3285 * data structure and the HBA hardware registers, must be protected
3290 3286 * with mutexes, here and everywhere.
3291 3287 *
3292 3288 * Also remember that at interrupt time, you'll get an argument
3293 3289 * to the interrupt handler which is a pointer to your mptsas_t
3294 3290 * structure; you'll have to remember which commands are outstanding
3295 3291 * and which scsi_pkt is the currently-running command so the
3296 3292 * interrupt handler can refer to the pkt to set completion
3297 3293 * status, call the target driver back through pkt_comp, etc.
3298 3294 *
3299 3295 * If the instance lock is held by other thread, don't spin to wait
3300 3296 * for it. Instead, queue the cmd and next time when the instance lock
3301 3297 * is not held, accept all the queued cmd. A extra tx_waitq is
3302 3298 * introduced to protect the queue.
3303 3299 *
3304 3300 * The polled cmd will not be queud and accepted as usual.
3305 3301 *
3306 3302 * Under the tx_waitq mutex, record whether a thread is draining
3307 3303 * the tx_waitq. An IO requesting thread that finds the instance
3308 3304 * mutex contended appends to the tx_waitq and while holding the
3309 3305 * tx_wait mutex, if the draining flag is not set, sets it and then
3310 3306 * proceeds to spin for the instance mutex. This scheme ensures that
3311 3307 * the last cmd in a burst be processed.
3312 3308 *
3313 3309 * we enable this feature only when the helper threads are enabled,
3314 3310 * at which we think the loads are heavy.
3315 3311 *
3316 3312 * per instance mutex m_tx_waitq_mutex is introduced to protect the
3317 3313 * m_tx_waitqtail, m_tx_waitq, m_tx_draining.
3318 3314 */
3319 3315
3320 3316 if (mpt->m_doneq_thread_n) {
3321 3317 if (mutex_tryenter(&mpt->m_mutex) != 0) {
3322 3318 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3323 3319 mutex_exit(&mpt->m_mutex);
3324 3320 } else if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3325 3321 mutex_enter(&mpt->m_mutex);
3326 3322 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3327 3323 mutex_exit(&mpt->m_mutex);
3328 3324 } else {
3329 3325 mutex_enter(&mpt->m_tx_waitq_mutex);
3330 3326 /*
3331 3327 * ptgt->m_dr_flag is protected by m_mutex or
3332 3328 * m_tx_waitq_mutex. In this case, m_tx_waitq_mutex
3333 3329 * is acquired.
3334 3330 */
3335 3331 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3336 3332 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3337 3333 /*
3338 3334 * The command should be allowed to
3339 3335 * retry by returning TRAN_BUSY to
3340 3336 * to stall the I/O's which come from
3341 3337 * scsi_vhci since the device/path is
3342 3338 * in unstable state now.
3343 3339 */
3344 3340 mutex_exit(&mpt->m_tx_waitq_mutex);
3345 3341 return (TRAN_BUSY);
3346 3342 } else {
3347 3343 /*
3348 3344 * The device is offline, just fail the
3349 3345 * command by returning
3350 3346 * TRAN_FATAL_ERROR.
3351 3347 */
3352 3348 mutex_exit(&mpt->m_tx_waitq_mutex);
3353 3349 return (TRAN_FATAL_ERROR);
3354 3350 }
3355 3351 }
3356 3352 if (mpt->m_tx_draining) {
3357 3353 cmd->cmd_flags |= CFLAG_TXQ;
3358 3354 *mpt->m_tx_waitqtail = cmd;
3359 3355 mpt->m_tx_waitqtail = &cmd->cmd_linkp;
3360 3356 mutex_exit(&mpt->m_tx_waitq_mutex);
3361 3357 } else { /* drain the queue */
3362 3358 mpt->m_tx_draining = 1;
3363 3359 mutex_exit(&mpt->m_tx_waitq_mutex);
3364 3360 mutex_enter(&mpt->m_mutex);
3365 3361 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3366 3362 mutex_exit(&mpt->m_mutex);
3367 3363 }
3368 3364 }
3369 3365 } else {
3370 3366 mutex_enter(&mpt->m_mutex);
3371 3367 /*
3372 3368 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3373 3369 * in this case, m_mutex is acquired.
3374 3370 */
3375 3371 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3376 3372 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3377 3373 /*
3378 3374 * commands should be allowed to retry by
3379 3375 * returning TRAN_BUSY to stall the I/O's
3380 3376 * which come from scsi_vhci since the device/
3381 3377 * path is in unstable state now.
3382 3378 */
3383 3379 mutex_exit(&mpt->m_mutex);
3384 3380 return (TRAN_BUSY);
3385 3381 } else {
3386 3382 /*
3387 3383 * The device is offline, just fail the
3388 3384 * command by returning TRAN_FATAL_ERROR.
3389 3385 */
3390 3386 mutex_exit(&mpt->m_mutex);
3391 3387 return (TRAN_FATAL_ERROR);
3392 3388 }
3393 3389 }
3394 3390 rval = mptsas_accept_pkt(mpt, cmd);
3395 3391 mutex_exit(&mpt->m_mutex);
3396 3392 }
3397 3393
3398 3394 return (rval);
3399 3395 }
3400 3396
3401 3397 /*
3402 3398 * Accept all the queued cmds(if any) before accept the current one.
3403 3399 */
3404 3400 static int
3405 3401 mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3406 3402 {
3407 3403 int rval;
3408 3404 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3409 3405
3410 3406 ASSERT(mutex_owned(&mpt->m_mutex));
3411 3407 /*
3412 3408 * The call to mptsas_accept_tx_waitq() must always be performed
3413 3409 * because that is where mpt->m_tx_draining is cleared.
3414 3410 */
3415 3411 mutex_enter(&mpt->m_tx_waitq_mutex);
3416 3412 mptsas_accept_tx_waitq(mpt);
3417 3413 mutex_exit(&mpt->m_tx_waitq_mutex);
3418 3414 /*
3419 3415 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3420 3416 * in this case, m_mutex is acquired.
3421 3417 */
3422 3418 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3423 3419 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3424 3420 /*
3425 3421 * The command should be allowed to retry by returning
3426 3422 * TRAN_BUSY to stall the I/O's which come from
3427 3423 * scsi_vhci since the device/path is in unstable state
3428 3424 * now.
3429 3425 */
3430 3426 return (TRAN_BUSY);
3431 3427 } else {
3432 3428 /*
3433 3429 * The device is offline, just fail the command by
3434 3430 * return TRAN_FATAL_ERROR.
3435 3431 */
3436 3432 return (TRAN_FATAL_ERROR);
3437 3433 }
3438 3434 }
3439 3435 rval = mptsas_accept_pkt(mpt, cmd);
3440 3436
3441 3437 return (rval);
3442 3438 }
3443 3439
3444 3440 static int
3445 3441 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3446 3442 {
3447 3443 int rval = TRAN_ACCEPT;
3448 3444 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3449 3445
3450 3446 NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3451 3447
3452 3448 ASSERT(mutex_owned(&mpt->m_mutex));
3453 3449
3454 3450 if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3455 3451 rval = mptsas_prepare_pkt(cmd);
3456 3452 if (rval != TRAN_ACCEPT) {
3457 3453 cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3458 3454 return (rval);
3459 3455 }
3460 3456 }
3461 3457
3462 3458 /*
3463 3459 * reset the throttle if we were draining
3464 3460 */
3465 3461 if ((ptgt->m_t_ncmds == 0) &&
3466 3462 (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3467 3463 NDBG23(("reset throttle"));
3468 3464 ASSERT(ptgt->m_reset_delay == 0);
3469 3465 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3470 3466 }
3471 3467
3472 3468 /*
3473 3469 * If HBA is being reset, the DevHandles are being re-initialized,
3474 3470 * which means that they could be invalid even if the target is still
3475 3471 * attached. Check if being reset and if DevHandle is being
3476 3472 * re-initialized. If this is the case, return BUSY so the I/O can be
3477 3473 * retried later.
3478 3474 */
3479 3475 if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3480 3476 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
3481 3477 if (cmd->cmd_flags & CFLAG_TXQ) {
3482 3478 mptsas_doneq_add(mpt, cmd);
3483 3479 mptsas_doneq_empty(mpt);
3484 3480 return (rval);
3485 3481 } else {
3486 3482 return (TRAN_BUSY);
3487 3483 }
3488 3484 }
3489 3485
3490 3486 /*
3491 3487 * If device handle has already been invalidated, just
3492 3488 * fail the command. In theory, command from scsi_vhci
3493 3489 * client is impossible send down command with invalid
3494 3490 * devhdl since devhdl is set after path offline, target
3495 3491 * driver is not suppose to select a offlined path.
3496 3492 */
3497 3493 if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3498 3494 NDBG3(("rejecting command, it might because invalid devhdl "
3499 3495 "request."));
3500 3496 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3501 3497 if (cmd->cmd_flags & CFLAG_TXQ) {
3502 3498 mptsas_doneq_add(mpt, cmd);
3503 3499 mptsas_doneq_empty(mpt);
3504 3500 return (rval);
3505 3501 } else {
3506 3502 return (TRAN_FATAL_ERROR);
3507 3503 }
3508 3504 }
3509 3505 /*
3510 3506 * The first case is the normal case. mpt gets a command from the
3511 3507 * target driver and starts it.
3512 3508 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3513 3509 * commands is m_max_requests - 2.
3514 3510 */
3515 3511 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
3516 3512 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
3517 3513 (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3518 3514 (ptgt->m_reset_delay == 0) &&
3519 3515 (ptgt->m_t_nwait == 0) &&
3520 3516 ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3521 3517 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3522 3518 (void) mptsas_start_cmd(mpt, cmd);
3523 3519 } else {
3524 3520 mptsas_waitq_add(mpt, cmd);
3525 3521 }
3526 3522 } else {
3527 3523 /*
3528 3524 * Add this pkt to the work queue
3529 3525 */
3530 3526 mptsas_waitq_add(mpt, cmd);
3531 3527
3532 3528 if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3533 3529 (void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3534 3530
3535 3531 /*
3536 3532 * Only flush the doneq if this is not a TM
3537 3533 * cmd. For TM cmds the flushing of the
3538 3534 * doneq will be done in those routines.
3539 3535 */
3540 3536 if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
3541 3537 mptsas_doneq_empty(mpt);
3542 3538 }
3543 3539 }
3544 3540 }
3545 3541 return (rval);
3546 3542 }
3547 3543
3548 3544 int
3549 3545 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3550 3546 {
3551 3547 mptsas_slots_t *slots = mpt->m_active;
3552 3548 uint_t slot, start_rotor;
3553 3549 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3554 3550
3555 3551 ASSERT(MUTEX_HELD(&mpt->m_mutex));
3556 3552
3557 3553 /*
3558 3554 * Account for reserved TM request slot and reserved SMID of 0.
3559 3555 */
3560 3556 ASSERT(slots->m_n_normal == (mpt->m_max_requests - 2));
3561 3557
3562 3558 /*
3563 3559 * Find the next available slot, beginning at m_rotor. If no slot is
3564 3560 * available, we'll return FALSE to indicate that. This mechanism
3565 3561 * considers only the normal slots, not the reserved slot 0 nor the
3566 3562 * task management slot m_n_normal + 1. The rotor is left to point to
3567 3563 * the normal slot after the one we select, unless we select the last
3568 3564 * normal slot in which case it returns to slot 1.
3569 3565 */
3570 3566 start_rotor = slots->m_rotor;
3571 3567 do {
3572 3568 slot = slots->m_rotor++;
3573 3569 if (slots->m_rotor > slots->m_n_normal)
3574 3570 slots->m_rotor = 1;
3575 3571
3576 3572 if (slots->m_rotor == start_rotor)
3577 3573 break;
3578 3574 } while (slots->m_slot[slot] != NULL);
3579 3575
3580 3576 if (slots->m_slot[slot] != NULL)
3581 3577 return (FALSE);
3582 3578
3583 3579 ASSERT(slot != 0 && slot <= slots->m_n_normal);
3584 3580
3585 3581 cmd->cmd_slot = slot;
3586 3582 slots->m_slot[slot] = cmd;
3587 3583 mpt->m_ncmds++;
3588 3584
3589 3585 /*
3590 3586 * only increment per target ncmds if this is not a
3591 3587 * command that has no target associated with it (i.e. a
3592 3588 * event acknoledgment)
3593 3589 */
3594 3590 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3595 3591 /*
3596 3592 * Expiration time is set in mptsas_start_cmd
3597 3593 */
3598 3594 ptgt->m_t_ncmds++;
3599 3595 cmd->cmd_active_expiration = 0;
3600 3596 } else {
3601 3597 /*
3602 3598 * Initialize expiration time for passthrough commands,
3603 3599 */
3604 3600 cmd->cmd_active_expiration = gethrtime() +
3605 3601 (hrtime_t)cmd->cmd_pkt->pkt_time * NANOSEC;
3606 3602 }
3607 3603 return (TRUE);
3608 3604 }
3609 3605
3610 3606 /*
3611 3607 * prepare the pkt:
3612 3608 * the pkt may have been resubmitted or just reused so
3613 3609 * initialize some fields and do some checks.
3614 3610 */
3615 3611 static int
3616 3612 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3617 3613 {
3618 3614 struct scsi_pkt *pkt = CMD2PKT(cmd);
3619 3615
3620 3616 NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3621 3617
3622 3618 /*
3623 3619 * Reinitialize some fields that need it; the packet may
3624 3620 * have been resubmitted
3625 3621 */
3626 3622 pkt->pkt_reason = CMD_CMPLT;
3627 3623 pkt->pkt_state = 0;
3628 3624 pkt->pkt_statistics = 0;
3629 3625 pkt->pkt_resid = 0;
3630 3626 cmd->cmd_age = 0;
3631 3627 cmd->cmd_pkt_flags = pkt->pkt_flags;
3632 3628
3633 3629 /*
3634 3630 * zero status byte.
3635 3631 */
3636 3632 *(pkt->pkt_scbp) = 0;
3637 3633
3638 3634 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3639 3635 pkt->pkt_resid = cmd->cmd_dmacount;
3640 3636
3641 3637 /*
3642 3638 * consistent packets need to be sync'ed first
3643 3639 * (only for data going out)
3644 3640 */
3645 3641 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
3646 3642 (cmd->cmd_flags & CFLAG_DMASEND)) {
3647 3643 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
3648 3644 DDI_DMA_SYNC_FORDEV);
3649 3645 }
3650 3646 }
3651 3647
3652 3648 cmd->cmd_flags =
3653 3649 (cmd->cmd_flags & ~(CFLAG_TRANFLAG)) |
3654 3650 CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
3655 3651
3656 3652 return (TRAN_ACCEPT);
3657 3653 }
3658 3654
3659 3655 /*
3660 3656 * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3661 3657 *
3662 3658 * One of three possibilities:
3663 3659 * - allocate scsi_pkt
3664 3660 * - allocate scsi_pkt and DMA resources
3665 3661 * - allocate DMA resources to an already-allocated pkt
3666 3662 */
3667 3663 static struct scsi_pkt *
3668 3664 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3669 3665 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3670 3666 int (*callback)(), caddr_t arg)
3671 3667 {
3672 3668 mptsas_cmd_t *cmd, *new_cmd;
3673 3669 mptsas_t *mpt = ADDR2MPT(ap);
3674 3670 uint_t oldcookiec;
3675 3671 mptsas_target_t *ptgt = NULL;
3676 3672 int rval;
3677 3673 mptsas_tgt_private_t *tgt_private;
3678 3674 int kf;
3679 3675
3680 3676 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3681 3677
3682 3678 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3683 3679 tran_tgt_private;
3684 3680 ASSERT(tgt_private != NULL);
3685 3681 if (tgt_private == NULL) {
3686 3682 return (NULL);
3687 3683 }
3688 3684 ptgt = tgt_private->t_private;
3689 3685 ASSERT(ptgt != NULL);
3690 3686 if (ptgt == NULL)
3691 3687 return (NULL);
3692 3688 ap->a_target = ptgt->m_devhdl;
3693 3689 ap->a_lun = tgt_private->t_lun;
3694 3690
3695 3691 ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3696 3692 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3697 3693 statuslen *= 100; tgtlen *= 4;
3698 3694 #endif
3699 3695 NDBG3(("mptsas_scsi_init_pkt:\n"
3700 3696 "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3701 3697 ap->a_target, (void *)pkt, (void *)bp,
3702 3698 cmdlen, statuslen, tgtlen, flags));
3703 3699
3704 3700 /*
3705 3701 * Allocate the new packet.
3706 3702 */
3707 3703 if (pkt == NULL) {
3708 3704 ddi_dma_handle_t save_dma_handle;
3709 3705
3710 3706 cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3711 3707 if (cmd == NULL)
3712 3708 return (NULL);
3713 3709
3714 3710 save_dma_handle = cmd->cmd_dmahandle;
3715 3711 bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3716 3712 cmd->cmd_dmahandle = save_dma_handle;
3717 3713
3718 3714 pkt = (void *)((uchar_t *)cmd +
3719 3715 sizeof (struct mptsas_cmd));
3720 3716 pkt->pkt_ha_private = (opaque_t)cmd;
3721 3717 pkt->pkt_address = *ap;
3722 3718 pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3723 3719 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3724 3720 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3725 3721 cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3726 3722 cmd->cmd_cdblen = (uchar_t)cmdlen;
3727 3723 cmd->cmd_scblen = statuslen;
3728 3724 cmd->cmd_rqslen = SENSE_LENGTH;
3729 3725 cmd->cmd_tgt_addr = ptgt;
3730 3726
3731 3727 if ((cmdlen > sizeof (cmd->cmd_cdb)) ||
3732 3728 (tgtlen > PKT_PRIV_LEN) ||
3733 3729 (statuslen > EXTCMDS_STATUS_SIZE)) {
3734 3730 int failure;
3735 3731
3736 3732 /*
3737 3733 * We are going to allocate external packet space which
3738 3734 * might include the sense data buffer for DMA so we
3739 3735 * need to increase the reference counter here. In a
3740 3736 * case the HBA is in reset we just simply free the
3741 3737 * allocated packet and bail out.
3742 3738 */
3743 3739 mutex_enter(&mpt->m_mutex);
3744 3740 if (mpt->m_in_reset) {
3745 3741 mutex_exit(&mpt->m_mutex);
3746 3742
3747 3743 cmd->cmd_flags = CFLAG_FREE;
3748 3744 kmem_cache_free(mpt->m_kmem_cache, cmd);
3749 3745 return (NULL);
3750 3746 }
3751 3747 mpt->m_extreq_sense_refcount++;
3752 3748 ASSERT(mpt->m_extreq_sense_refcount > 0);
3753 3749 mutex_exit(&mpt->m_mutex);
3754 3750
3755 3751 /*
3756 3752 * if extern alloc fails, all will be
3757 3753 * deallocated, including cmd
3758 3754 */
3759 3755 failure = mptsas_pkt_alloc_extern(mpt, cmd,
3760 3756 cmdlen, tgtlen, statuslen, kf);
3761 3757
3762 3758 if (failure != 0 || cmd->cmd_extrqslen == 0) {
3763 3759 /*
3764 3760 * If the external packet space allocation
3765 3761 * failed, or we didn't allocate the sense
3766 3762 * data buffer for DMA we need to decrease the
3767 3763 * reference counter.
3768 3764 */
3769 3765 mutex_enter(&mpt->m_mutex);
3770 3766 ASSERT(mpt->m_extreq_sense_refcount > 0);
3771 3767 mpt->m_extreq_sense_refcount--;
3772 3768 if (mpt->m_extreq_sense_refcount == 0)
3773 3769 cv_broadcast(
3774 3770 &mpt->m_extreq_sense_refcount_cv);
3775 3771 mutex_exit(&mpt->m_mutex);
3776 3772
3777 3773 if (failure != 0) {
3778 3774 /*
3779 3775 * if extern allocation fails, it will
3780 3776 * deallocate the new pkt as well
3781 3777 */
3782 3778 return (NULL);
3783 3779 }
3784 3780 }
3785 3781 }
3786 3782 new_cmd = cmd;
3787 3783
3788 3784 } else {
3789 3785 cmd = PKT2CMD(pkt);
3790 3786 new_cmd = NULL;
3791 3787 }
3792 3788
3793 3789
3794 3790 /* grab cmd->cmd_cookiec here as oldcookiec */
3795 3791
3796 3792 oldcookiec = cmd->cmd_cookiec;
3797 3793
3798 3794 /*
3799 3795 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3800 3796 * greater than 0 and we'll need to grab the next dma window
3801 3797 */
3802 3798 /*
3803 3799 * SLM-not doing extra command frame right now; may add later
3804 3800 */
3805 3801
3806 3802 if (cmd->cmd_nwin > 0) {
3807 3803
3808 3804 /*
3809 3805 * Make sure we havn't gone past the the total number
3810 3806 * of windows
3811 3807 */
3812 3808 if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3813 3809 return (NULL);
3814 3810 }
3815 3811 if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3816 3812 &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
3817 3813 &cmd->cmd_cookie, &cmd->cmd_cookiec) == DDI_FAILURE) {
3818 3814 return (NULL);
3819 3815 }
3820 3816 goto get_dma_cookies;
3821 3817 }
3822 3818
3823 3819
3824 3820 if (flags & PKT_XARQ) {
3825 3821 cmd->cmd_flags |= CFLAG_XARQ;
3826 3822 }
3827 3823
3828 3824 /*
3829 3825 * DMA resource allocation. This version assumes your
3830 3826 * HBA has some sort of bus-mastering or onboard DMA capability, with a
3831 3827 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
3832 3828 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
3833 3829 */
3834 3830 if (bp && (bp->b_bcount != 0) &&
3835 3831 (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
3836 3832
3837 3833 int cnt, dma_flags;
3838 3834 mptti_t *dmap; /* ptr to the S/G list */
3839 3835
3840 3836 /*
3841 3837 * Set up DMA memory and position to the next DMA segment.
3842 3838 */
3843 3839 ASSERT(cmd->cmd_dmahandle != NULL);
3844 3840
3845 3841 if (bp->b_flags & B_READ) {
3846 3842 dma_flags = DDI_DMA_READ;
3847 3843 cmd->cmd_flags &= ~CFLAG_DMASEND;
3848 3844 } else {
3849 3845 dma_flags = DDI_DMA_WRITE;
3850 3846 cmd->cmd_flags |= CFLAG_DMASEND;
3851 3847 }
3852 3848 if (flags & PKT_CONSISTENT) {
3853 3849 cmd->cmd_flags |= CFLAG_CMDIOPB;
3854 3850 dma_flags |= DDI_DMA_CONSISTENT;
3855 3851 }
3856 3852
3857 3853 if (flags & PKT_DMA_PARTIAL) {
3858 3854 dma_flags |= DDI_DMA_PARTIAL;
3859 3855 }
3860 3856
3861 3857 /*
3862 3858 * workaround for byte hole issue on psycho and
3863 3859 * schizo pre 2.1
3864 3860 */
3865 3861 if ((bp->b_flags & B_READ) && ((bp->b_flags &
3866 3862 (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
3867 3863 ((uintptr_t)bp->b_un.b_addr & 0x7)) {
3868 3864 dma_flags |= DDI_DMA_CONSISTENT;
3869 3865 }
3870 3866
3871 3867 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
3872 3868 dma_flags, callback, arg,
3873 3869 &cmd->cmd_cookie, &cmd->cmd_cookiec);
3874 3870 if (rval == DDI_DMA_PARTIAL_MAP) {
3875 3871 (void) ddi_dma_numwin(cmd->cmd_dmahandle,
3876 3872 &cmd->cmd_nwin);
3877 3873 cmd->cmd_winindex = 0;
3878 3874 (void) ddi_dma_getwin(cmd->cmd_dmahandle,
3879 3875 cmd->cmd_winindex, &cmd->cmd_dma_offset,
3880 3876 &cmd->cmd_dma_len, &cmd->cmd_cookie,
3881 3877 &cmd->cmd_cookiec);
3882 3878 } else if (rval && (rval != DDI_DMA_MAPPED)) {
3883 3879 switch (rval) {
3884 3880 case DDI_DMA_NORESOURCES:
3885 3881 bioerror(bp, 0);
3886 3882 break;
3887 3883 case DDI_DMA_BADATTR:
3888 3884 case DDI_DMA_NOMAPPING:
3889 3885 bioerror(bp, EFAULT);
3890 3886 break;
3891 3887 case DDI_DMA_TOOBIG:
3892 3888 default:
3893 3889 bioerror(bp, EINVAL);
3894 3890 break;
3895 3891 }
3896 3892 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3897 3893 if (new_cmd) {
3898 3894 mptsas_scsi_destroy_pkt(ap, pkt);
3899 3895 }
3900 3896 return ((struct scsi_pkt *)NULL);
3901 3897 }
3902 3898
3903 3899 get_dma_cookies:
3904 3900 cmd->cmd_flags |= CFLAG_DMAVALID;
3905 3901 ASSERT(cmd->cmd_cookiec > 0);
3906 3902
3907 3903 if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
3908 3904 mptsas_log(mpt, CE_NOTE, "large cookiec received %d\n",
3909 3905 cmd->cmd_cookiec);
3910 3906 bioerror(bp, EINVAL);
3911 3907 if (new_cmd) {
3912 3908 mptsas_scsi_destroy_pkt(ap, pkt);
3913 3909 }
3914 3910 return ((struct scsi_pkt *)NULL);
3915 3911 }
3916 3912
3917 3913 /*
3918 3914 * Allocate extra SGL buffer if needed.
3919 3915 */
3920 3916 if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
3921 3917 (cmd->cmd_extra_frames == NULL)) {
3922 3918 if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
3923 3919 DDI_FAILURE) {
3924 3920 mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
3925 3921 "failed");
3926 3922 bioerror(bp, ENOMEM);
3927 3923 if (new_cmd) {
3928 3924 mptsas_scsi_destroy_pkt(ap, pkt);
3929 3925 }
3930 3926 return ((struct scsi_pkt *)NULL);
3931 3927 }
3932 3928 }
3933 3929
3934 3930 /*
3935 3931 * Always use scatter-gather transfer
3936 3932 * Use the loop below to store physical addresses of
3937 3933 * DMA segments, from the DMA cookies, into your HBA's
3938 3934 * scatter-gather list.
3939 3935 * We need to ensure we have enough kmem alloc'd
3940 3936 * for the sg entries since we are no longer using an
3941 3937 * array inside mptsas_cmd_t.
3942 3938 *
3943 3939 * We check cmd->cmd_cookiec against oldcookiec so
3944 3940 * the scatter-gather list is correctly allocated
3945 3941 */
3946 3942
3947 3943 if (oldcookiec != cmd->cmd_cookiec) {
3948 3944 if (cmd->cmd_sg != (mptti_t *)NULL) {
3949 3945 kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
3950 3946 oldcookiec);
3951 3947 cmd->cmd_sg = NULL;
3952 3948 }
3953 3949 }
3954 3950
3955 3951 if (cmd->cmd_sg == (mptti_t *)NULL) {
3956 3952 cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3957 3953 cmd->cmd_cookiec), kf);
3958 3954
3959 3955 if (cmd->cmd_sg == (mptti_t *)NULL) {
3960 3956 mptsas_log(mpt, CE_WARN,
3961 3957 "unable to kmem_alloc enough memory "
3962 3958 "for scatter/gather list");
3963 3959 /*
3964 3960 * if we have an ENOMEM condition we need to behave
3965 3961 * the same way as the rest of this routine
3966 3962 */
3967 3963
3968 3964 bioerror(bp, ENOMEM);
3969 3965 if (new_cmd) {
3970 3966 mptsas_scsi_destroy_pkt(ap, pkt);
3971 3967 }
3972 3968 return ((struct scsi_pkt *)NULL);
3973 3969 }
3974 3970 }
3975 3971
3976 3972 dmap = cmd->cmd_sg;
3977 3973
3978 3974 ASSERT(cmd->cmd_cookie.dmac_size != 0);
3979 3975
3980 3976 /*
3981 3977 * store the first segment into the S/G list
3982 3978 */
3983 3979 dmap->count = cmd->cmd_cookie.dmac_size;
3984 3980 dmap->addr.address64.Low = (uint32_t)
3985 3981 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3986 3982 dmap->addr.address64.High = (uint32_t)
3987 3983 (cmd->cmd_cookie.dmac_laddress >> 32);
3988 3984
3989 3985 /*
3990 3986 * dmacount counts the size of the dma for this window
3991 3987 * (if partial dma is being used). totaldmacount
3992 3988 * keeps track of the total amount of dma we have
3993 3989 * transferred for all the windows (needed to calculate
3994 3990 * the resid value below).
3995 3991 */
3996 3992 cmd->cmd_dmacount = cmd->cmd_cookie.dmac_size;
3997 3993 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3998 3994
3999 3995 /*
4000 3996 * We already stored the first DMA scatter gather segment,
4001 3997 * start at 1 if we need to store more.
4002 3998 */
4003 3999 for (cnt = 1; cnt < cmd->cmd_cookiec; cnt++) {
4004 4000 /*
4005 4001 * Get next DMA cookie
4006 4002 */
4007 4003 ddi_dma_nextcookie(cmd->cmd_dmahandle,
4008 4004 &cmd->cmd_cookie);
4009 4005 dmap++;
4010 4006
4011 4007 cmd->cmd_dmacount += cmd->cmd_cookie.dmac_size;
4012 4008 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
4013 4009
4014 4010 /*
4015 4011 * store the segment parms into the S/G list
4016 4012 */
4017 4013 dmap->count = cmd->cmd_cookie.dmac_size;
4018 4014 dmap->addr.address64.Low = (uint32_t)
4019 4015 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
4020 4016 dmap->addr.address64.High = (uint32_t)
4021 4017 (cmd->cmd_cookie.dmac_laddress >> 32);
4022 4018 }
4023 4019
4024 4020 /*
4025 4021 * If this was partially allocated we set the resid
4026 4022 * the amount of data NOT transferred in this window
4027 4023 * If there is only one window, the resid will be 0
4028 4024 */
4029 4025 pkt->pkt_resid = (bp->b_bcount - cmd->cmd_totaldmacount);
4030 4026 NDBG3(("mptsas_scsi_init_pkt: cmd_dmacount=%d.",
4031 4027 cmd->cmd_dmacount));
4032 4028 }
4033 4029 return (pkt);
4034 4030 }
4035 4031
4036 4032 /*
4037 4033 * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
4038 4034 *
4039 4035 * Notes:
4040 4036 * - also frees DMA resources if allocated
4041 4037 * - implicit DMA synchonization
4042 4038 */
4043 4039 static void
4044 4040 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4045 4041 {
4046 4042 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4047 4043 mptsas_t *mpt = ADDR2MPT(ap);
4048 4044
4049 4045 NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
4050 4046 ap->a_target, (void *)pkt));
4051 4047
4052 4048 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4053 4049 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4054 4050 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4055 4051 }
4056 4052
4057 4053 if (cmd->cmd_sg) {
4058 4054 kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
4059 4055 cmd->cmd_sg = NULL;
4060 4056 }
4061 4057
4062 4058 mptsas_free_extra_sgl_frame(mpt, cmd);
4063 4059
4064 4060 if ((cmd->cmd_flags &
4065 4061 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
4066 4062 CFLAG_SCBEXTERN)) == 0) {
4067 4063 cmd->cmd_flags = CFLAG_FREE;
4068 4064 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4069 4065 } else {
4070 4066 boolean_t extrqslen = cmd->cmd_extrqslen != 0;
4071 4067
4072 4068 mptsas_pkt_destroy_extern(mpt, cmd);
4073 4069
4074 4070 /*
4075 4071 * If the packet had the sense data buffer for DMA allocated we
4076 4072 * need to decrease the reference counter.
4077 4073 */
4078 4074 if (extrqslen) {
4079 4075 mutex_enter(&mpt->m_mutex);
4080 4076 ASSERT(mpt->m_extreq_sense_refcount > 0);
4081 4077 mpt->m_extreq_sense_refcount--;
4082 4078 if (mpt->m_extreq_sense_refcount == 0)
4083 4079 cv_broadcast(&mpt->m_extreq_sense_refcount_cv);
4084 4080 mutex_exit(&mpt->m_mutex);
4085 4081 }
4086 4082 }
4087 4083 }
4088 4084
4089 4085 /*
4090 4086 * kmem cache constructor and destructor:
4091 4087 * When constructing, we bzero the cmd and allocate the dma handle
4092 4088 * When destructing, just free the dma handle
4093 4089 */
4094 4090 static int
4095 4091 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
4096 4092 {
4097 4093 mptsas_cmd_t *cmd = buf;
4098 4094 mptsas_t *mpt = cdrarg;
4099 4095 int (*callback)(caddr_t);
4100 4096
4101 4097 callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
4102 4098
4103 4099 NDBG4(("mptsas_kmem_cache_constructor"));
4104 4100
4105 4101 /*
4106 4102 * allocate a dma handle
4107 4103 */
4108 4104 if ((ddi_dma_alloc_handle(mpt->m_dip, &mpt->m_io_dma_attr, callback,
4109 4105 NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
4110 4106 cmd->cmd_dmahandle = NULL;
4111 4107 return (-1);
4112 4108 }
4113 4109 return (0);
4114 4110 }
4115 4111
4116 4112 static void
4117 4113 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
4118 4114 {
4119 4115 #ifndef __lock_lint
4120 4116 _NOTE(ARGUNUSED(cdrarg))
4121 4117 #endif
4122 4118 mptsas_cmd_t *cmd = buf;
4123 4119
4124 4120 NDBG4(("mptsas_kmem_cache_destructor"));
4125 4121
4126 4122 if (cmd->cmd_dmahandle) {
4127 4123 ddi_dma_free_handle(&cmd->cmd_dmahandle);
4128 4124 cmd->cmd_dmahandle = NULL;
4129 4125 }
4130 4126 }
4131 4127
4132 4128 static int
4133 4129 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
4134 4130 {
4135 4131 mptsas_cache_frames_t *p = buf;
4136 4132 mptsas_t *mpt = cdrarg;
4137 4133 ddi_dma_attr_t frame_dma_attr;
4138 4134 size_t mem_size, alloc_len;
4139 4135 ddi_dma_cookie_t cookie;
4140 4136 uint_t ncookie;
4141 4137 int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
4142 4138 ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
4143 4139
4144 4140 frame_dma_attr = mpt->m_msg_dma_attr;
4145 4141 frame_dma_attr.dma_attr_align = 0x10;
4146 4142 frame_dma_attr.dma_attr_sgllen = 1;
4147 4143
4148 4144 if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
4149 4145 &p->m_dma_hdl) != DDI_SUCCESS) {
4150 4146 mptsas_log(mpt, CE_WARN, "Unable to allocate dma handle for"
4151 4147 " extra SGL.");
4152 4148 return (DDI_FAILURE);
4153 4149 }
4154 4150
4155 4151 mem_size = (mpt->m_max_request_frames - 1) * mpt->m_req_frame_size;
4156 4152
4157 4153 if (ddi_dma_mem_alloc(p->m_dma_hdl, mem_size, &mpt->m_dev_acc_attr,
4158 4154 DDI_DMA_CONSISTENT, callback, NULL, (caddr_t *)&p->m_frames_addr,
4159 4155 &alloc_len, &p->m_acc_hdl) != DDI_SUCCESS) {
4160 4156 ddi_dma_free_handle(&p->m_dma_hdl);
4161 4157 p->m_dma_hdl = NULL;
4162 4158 mptsas_log(mpt, CE_WARN, "Unable to allocate dma memory for"
4163 4159 " extra SGL.");
4164 4160 return (DDI_FAILURE);
4165 4161 }
4166 4162
4167 4163 if (ddi_dma_addr_bind_handle(p->m_dma_hdl, NULL, p->m_frames_addr,
4168 4164 alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
4169 4165 &cookie, &ncookie) != DDI_DMA_MAPPED) {
4170 4166 (void) ddi_dma_mem_free(&p->m_acc_hdl);
4171 4167 ddi_dma_free_handle(&p->m_dma_hdl);
4172 4168 p->m_dma_hdl = NULL;
4173 4169 mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources for"
4174 4170 " extra SGL");
4175 4171 return (DDI_FAILURE);
4176 4172 }
4177 4173
4178 4174 /*
4179 4175 * Store the SGL memory address. This chip uses this
4180 4176 * address to dma to and from the driver. The second
4181 4177 * address is the address mpt uses to fill in the SGL.
4182 4178 */
4183 4179 p->m_phys_addr = cookie.dmac_laddress;
4184 4180
4185 4181 return (DDI_SUCCESS);
4186 4182 }
4187 4183
4188 4184 static void
4189 4185 mptsas_cache_frames_destructor(void *buf, void *cdrarg)
4190 4186 {
4191 4187 #ifndef __lock_lint
4192 4188 _NOTE(ARGUNUSED(cdrarg))
4193 4189 #endif
4194 4190 mptsas_cache_frames_t *p = buf;
4195 4191 if (p->m_dma_hdl != NULL) {
4196 4192 (void) ddi_dma_unbind_handle(p->m_dma_hdl);
4197 4193 (void) ddi_dma_mem_free(&p->m_acc_hdl);
4198 4194 ddi_dma_free_handle(&p->m_dma_hdl);
4199 4195 p->m_phys_addr = NULL;
4200 4196 p->m_frames_addr = NULL;
4201 4197 p->m_dma_hdl = NULL;
4202 4198 p->m_acc_hdl = NULL;
4203 4199 }
4204 4200
4205 4201 }
4206 4202
4207 4203 /*
4208 4204 * Figure out if we need to use a different method for the request
4209 4205 * sense buffer and allocate from the map if necessary.
4210 4206 */
4211 4207 static boolean_t
4212 4208 mptsas_cmdarqsize(mptsas_t *mpt, mptsas_cmd_t *cmd, size_t senselength, int kf)
4213 4209 {
4214 4210 if (senselength > mpt->m_req_sense_size) {
4215 4211 unsigned long i;
4216 4212
4217 4213 /* Sense length is limited to an 8 bit value in MPI Spec. */
4218 4214 if (senselength > 255)
4219 4215 senselength = 255;
4220 4216 cmd->cmd_extrqschunks = (senselength +
4221 4217 (mpt->m_req_sense_size - 1))/mpt->m_req_sense_size;
4222 4218 i = (kf == KM_SLEEP ? rmalloc_wait : rmalloc)
4223 4219 (mpt->m_erqsense_map, cmd->cmd_extrqschunks);
4224 4220
4225 4221 if (i == 0)
4226 4222 return (B_FALSE);
4227 4223
4228 4224 cmd->cmd_extrqslen = (uint16_t)senselength;
4229 4225 cmd->cmd_extrqsidx = i - 1;
4230 4226 cmd->cmd_arq_buf = mpt->m_extreq_sense +
4231 4227 (cmd->cmd_extrqsidx * mpt->m_req_sense_size);
4232 4228 } else {
4233 4229 cmd->cmd_rqslen = (uchar_t)senselength;
4234 4230 }
4235 4231
4236 4232 return (B_TRUE);
4237 4233 }
4238 4234
4239 4235 /*
4240 4236 * allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
4241 4237 * for non-standard length cdb, pkt_private, status areas
4242 4238 * if allocation fails, then deallocate all external space and the pkt
4243 4239 */
4244 4240 /* ARGSUSED */
4245 4241 static int
4246 4242 mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
4247 4243 int cmdlen, int tgtlen, int statuslen, int kf)
4248 4244 {
4249 4245 caddr_t cdbp, scbp, tgt;
4250 4246
4251 4247 NDBG3(("mptsas_pkt_alloc_extern: "
4252 4248 "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
4253 4249 (void *)cmd, cmdlen, tgtlen, statuslen, kf));
4254 4250
4255 4251 tgt = cdbp = scbp = NULL;
4256 4252 cmd->cmd_scblen = statuslen;
4257 4253 cmd->cmd_privlen = (uchar_t)tgtlen;
4258 4254
4259 4255 if (cmdlen > sizeof (cmd->cmd_cdb)) {
4260 4256 if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
4261 4257 goto fail;
4262 4258 }
4263 4259 cmd->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
4264 4260 cmd->cmd_flags |= CFLAG_CDBEXTERN;
4265 4261 }
4266 4262 if (tgtlen > PKT_PRIV_LEN) {
4267 4263 if ((tgt = kmem_zalloc((size_t)tgtlen, kf)) == NULL) {
4268 4264 goto fail;
4269 4265 }
4270 4266 cmd->cmd_flags |= CFLAG_PRIVEXTERN;
4271 4267 cmd->cmd_pkt->pkt_private = tgt;
4272 4268 }
4273 4269 if (statuslen > EXTCMDS_STATUS_SIZE) {
4274 4270 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
4275 4271 goto fail;
4276 4272 }
4277 4273 cmd->cmd_flags |= CFLAG_SCBEXTERN;
4278 4274 cmd->cmd_pkt->pkt_scbp = (opaque_t)scbp;
4279 4275
4280 4276 /* allocate sense data buf for DMA */
4281 4277 if (mptsas_cmdarqsize(mpt, cmd, statuslen -
4282 4278 MPTSAS_GET_ITEM_OFF(struct scsi_arq_status, sts_sensedata),
4283 4279 kf) == B_FALSE)
4284 4280 goto fail;
4285 4281 }
4286 4282 return (0);
4287 4283 fail:
4288 4284 mptsas_pkt_destroy_extern(mpt, cmd);
4289 4285 return (1);
4290 4286 }
4291 4287
4292 4288 /*
4293 4289 * deallocate external pkt space and deallocate the pkt
4294 4290 */
4295 4291 static void
4296 4292 mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd)
4297 4293 {
4298 4294 NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd));
4299 4295
4300 4296 if (cmd->cmd_flags & CFLAG_FREE) {
4301 4297 mptsas_log(mpt, CE_PANIC,
4302 4298 "mptsas_pkt_destroy_extern: freeing free packet");
4303 4299 _NOTE(NOT_REACHED)
4304 4300 /* NOTREACHED */
4305 4301 }
4306 4302 if (cmd->cmd_extrqslen != 0) {
4307 4303 rmfree(mpt->m_erqsense_map, cmd->cmd_extrqschunks,
4308 4304 cmd->cmd_extrqsidx + 1);
4309 4305 }
4310 4306 if (cmd->cmd_flags & CFLAG_CDBEXTERN) {
4311 4307 kmem_free(cmd->cmd_pkt->pkt_cdbp, (size_t)cmd->cmd_cdblen);
4312 4308 }
4313 4309 if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
4314 4310 kmem_free(cmd->cmd_pkt->pkt_scbp, (size_t)cmd->cmd_scblen);
4315 4311 }
4316 4312 if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
4317 4313 kmem_free(cmd->cmd_pkt->pkt_private, (size_t)cmd->cmd_privlen);
4318 4314 }
4319 4315 cmd->cmd_flags = CFLAG_FREE;
4320 4316 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4321 4317 }
4322 4318
4323 4319 /*
4324 4320 * tran_sync_pkt(9E) - explicit DMA synchronization
4325 4321 */
4326 4322 /*ARGSUSED*/
4327 4323 static void
4328 4324 mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4329 4325 {
4330 4326 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4331 4327
4332 4328 NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
4333 4329 ap->a_target, (void *)pkt));
4334 4330
4335 4331 if (cmd->cmd_dmahandle) {
4336 4332 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4337 4333 (cmd->cmd_flags & CFLAG_DMASEND) ?
4338 4334 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
4339 4335 }
4340 4336 }
4341 4337
4342 4338 /*
4343 4339 * tran_dmafree(9E) - deallocate DMA resources allocated for command
4344 4340 */
4345 4341 /*ARGSUSED*/
4346 4342 static void
4347 4343 mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
4348 4344 {
4349 4345 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4350 4346 mptsas_t *mpt = ADDR2MPT(ap);
4351 4347
4352 4348 NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
4353 4349 ap->a_target, (void *)pkt));
4354 4350
4355 4351 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4356 4352 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4357 4353 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4358 4354 }
4359 4355
4360 4356 mptsas_free_extra_sgl_frame(mpt, cmd);
4361 4357 }
4362 4358
4363 4359 static void
4364 4360 mptsas_pkt_comp(struct scsi_pkt *pkt, mptsas_cmd_t *cmd)
4365 4361 {
4366 4362 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
4367 4363 (!(cmd->cmd_flags & CFLAG_DMASEND))) {
4368 4364 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4369 4365 DDI_DMA_SYNC_FORCPU);
4370 4366 }
4371 4367 (*pkt->pkt_comp)(pkt);
4372 4368 }
4373 4369
4374 4370 static void
4375 4371 mptsas_sge_mainframe(mptsas_cmd_t *cmd, pMpi2SCSIIORequest_t frame,
4376 4372 ddi_acc_handle_t acc_hdl, uint_t cookiec, uint32_t end_flags)
4377 4373 {
4378 4374 pMpi2SGESimple64_t sge;
4379 4375 mptti_t *dmap;
4380 4376 uint32_t flags;
4381 4377
4382 4378 dmap = cmd->cmd_sg;
4383 4379
4384 4380 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4385 4381 while (cookiec--) {
4386 4382 ddi_put32(acc_hdl,
4387 4383 &sge->Address.Low, dmap->addr.address64.Low);
4388 4384 ddi_put32(acc_hdl,
4389 4385 &sge->Address.High, dmap->addr.address64.High);
4390 4386 ddi_put32(acc_hdl, &sge->FlagsLength,
4391 4387 dmap->count);
4392 4388 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4393 4389 flags |= ((uint32_t)
4394 4390 (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4395 4391 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4396 4392 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4397 4393 MPI2_SGE_FLAGS_SHIFT);
4398 4394
4399 4395 /*
4400 4396 * If this is the last cookie, we set the flags
4401 4397 * to indicate so
4402 4398 */
4403 4399 if (cookiec == 0) {
4404 4400 flags |= end_flags;
4405 4401 }
4406 4402 if (cmd->cmd_flags & CFLAG_DMASEND) {
4407 4403 flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4408 4404 MPI2_SGE_FLAGS_SHIFT);
4409 4405 } else {
4410 4406 flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4411 4407 MPI2_SGE_FLAGS_SHIFT);
4412 4408 }
4413 4409 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4414 4410 dmap++;
4415 4411 sge++;
4416 4412 }
4417 4413 }
4418 4414
4419 4415 static void
4420 4416 mptsas_sge_chain(mptsas_t *mpt, mptsas_cmd_t *cmd,
4421 4417 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4422 4418 {
4423 4419 pMpi2SGESimple64_t sge;
4424 4420 pMpi2SGEChain64_t sgechain;
4425 4421 uint64_t nframe_phys_addr;
4426 4422 uint_t cookiec;
4427 4423 mptti_t *dmap;
4428 4424 uint32_t flags;
4429 4425
4430 4426 /*
4431 4427 * Save the number of entries in the DMA
4432 4428 * Scatter/Gather list
4433 4429 */
4434 4430 cookiec = cmd->cmd_cookiec;
4435 4431
4436 4432 /*
4437 4433 * Hereby we start to deal with multiple frames.
4438 4434 * The process is as follows:
4439 4435 * 1. Determine how many frames are needed for SGL element
4440 4436 * storage; Note that all frames are stored in contiguous
4441 4437 * memory space and in 64-bit DMA mode each element is
4442 4438 * 3 double-words (12 bytes) long.
4443 4439 * 2. Fill up the main frame. We need to do this separately
4444 4440 * since it contains the SCSI IO request header and needs
4445 4441 * dedicated processing. Note that the last 4 double-words
4446 4442 * of the SCSI IO header is for SGL element storage
4447 4443 * (MPI2_SGE_IO_UNION).
4448 4444 * 3. Fill the chain element in the main frame, so the DMA
4449 4445 * engine can use the following frames.
4450 4446 * 4. Enter a loop to fill the remaining frames. Note that the
4451 4447 * last frame contains no chain element. The remaining
4452 4448 * frames go into the mpt SGL buffer allocated on the fly,
4453 4449 * not immediately following the main message frame, as in
4454 4450 * Gen1.
4455 4451 * Some restrictions:
4456 4452 * 1. For 64-bit DMA, the simple element and chain element
4457 4453 * are both of 3 double-words (12 bytes) in size, even
4458 4454 * though all frames are stored in the first 4G of mem
4459 4455 * range and the higher 32-bits of the address are always 0.
4460 4456 * 2. On some controllers (like the 1064/1068), a frame can
4461 4457 * hold SGL elements with the last 1 or 2 double-words
4462 4458 * (4 or 8 bytes) un-used. On these controllers, we should
4463 4459 * recognize that there's not enough room for another SGL
4464 4460 * element and move the sge pointer to the next frame.
4465 4461 */
4466 4462 int i, j, k, l, frames, sgemax;
4467 4463 int temp;
4468 4464 uint8_t chainflags;
4469 4465 uint16_t chainlength;
4470 4466 mptsas_cache_frames_t *p;
4471 4467
4472 4468 /*
4473 4469 * Sgemax is the number of SGE's that will fit
4474 4470 * each extra frame and frames is total
4475 4471 * number of frames we'll need. 1 sge entry per
4476 4472 * frame is reseverd for the chain element thus the -1 below.
4477 4473 */
4478 4474 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64))
4479 4475 - 1);
4480 4476 temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4481 4477
4482 4478 /*
4483 4479 * A little check to see if we need to round up the number
4484 4480 * of frames we need
4485 4481 */
4486 4482 if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4487 4483 sgemax) > 1) {
4488 4484 frames = (temp + 1);
4489 4485 } else {
4490 4486 frames = temp;
4491 4487 }
4492 4488 dmap = cmd->cmd_sg;
4493 4489 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4494 4490
4495 4491 /*
4496 4492 * First fill in the main frame
4497 4493 */
4498 4494 j = MPTSAS_MAX_FRAME_SGES64(mpt) - 1;
4499 4495 mptsas_sge_mainframe(cmd, frame, acc_hdl, j,
4500 4496 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4501 4497 MPI2_SGE_FLAGS_SHIFT));
4502 4498 dmap += j;
4503 4499 sge += j;
4504 4500 j++;
4505 4501
4506 4502 /*
4507 4503 * Fill in the chain element in the main frame.
4508 4504 * About calculation on ChainOffset:
4509 4505 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4510 4506 * in the end reserved for SGL element storage
4511 4507 * (MPI2_SGE_IO_UNION); we should count it in our
4512 4508 * calculation. See its definition in the header file.
4513 4509 * 2. Constant j is the counter of the current SGL element
4514 4510 * that will be processed, and (j - 1) is the number of
4515 4511 * SGL elements that have been processed (stored in the
4516 4512 * main frame).
4517 4513 * 3. ChainOffset value should be in units of double-words (4
4518 4514 * bytes) so the last value should be divided by 4.
4519 4515 */
4520 4516 ddi_put8(acc_hdl, &frame->ChainOffset,
4521 4517 (sizeof (MPI2_SCSI_IO_REQUEST) -
4522 4518 sizeof (MPI2_SGE_IO_UNION) +
4523 4519 (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4524 4520 sgechain = (pMpi2SGEChain64_t)sge;
4525 4521 chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4526 4522 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4527 4523 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4528 4524 ddi_put8(acc_hdl, &sgechain->Flags, chainflags);
4529 4525
4530 4526 /*
4531 4527 * The size of the next frame is the accurate size of space
4532 4528 * (in bytes) used to store the SGL elements. j is the counter
4533 4529 * of SGL elements. (j - 1) is the number of SGL elements that
4534 4530 * have been processed (stored in frames).
4535 4531 */
4536 4532 if (frames >= 2) {
4537 4533 ASSERT(mpt->m_req_frame_size >= sizeof (MPI2_SGE_SIMPLE64));
4538 4534 chainlength = mpt->m_req_frame_size /
4539 4535 sizeof (MPI2_SGE_SIMPLE64) *
4540 4536 sizeof (MPI2_SGE_SIMPLE64);
4541 4537 } else {
4542 4538 chainlength = ((cookiec - (j - 1)) *
4543 4539 sizeof (MPI2_SGE_SIMPLE64));
4544 4540 }
4545 4541
4546 4542 p = cmd->cmd_extra_frames;
4547 4543
4548 4544 ddi_put16(acc_hdl, &sgechain->Length, chainlength);
4549 4545 ddi_put32(acc_hdl, &sgechain->Address.Low, p->m_phys_addr);
4550 4546 ddi_put32(acc_hdl, &sgechain->Address.High, p->m_phys_addr >> 32);
4551 4547
4552 4548 /*
4553 4549 * If there are more than 2 frames left we have to
4554 4550 * fill in the next chain offset to the location of
4555 4551 * the chain element in the next frame.
4556 4552 * sgemax is the number of simple elements in an extra
4557 4553 * frame. Note that the value NextChainOffset should be
4558 4554 * in double-words (4 bytes).
4559 4555 */
4560 4556 if (frames >= 2) {
4561 4557 ddi_put8(acc_hdl, &sgechain->NextChainOffset,
4562 4558 (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4563 4559 } else {
4564 4560 ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0);
4565 4561 }
4566 4562
4567 4563 /*
4568 4564 * Jump to next frame;
4569 4565 * Starting here, chain buffers go into the per command SGL.
4570 4566 * This buffer is allocated when chain buffers are needed.
4571 4567 */
4572 4568 sge = (pMpi2SGESimple64_t)p->m_frames_addr;
4573 4569 i = cookiec;
4574 4570
4575 4571 /*
4576 4572 * Start filling in frames with SGE's. If we
4577 4573 * reach the end of frame and still have SGE's
4578 4574 * to fill we need to add a chain element and
4579 4575 * use another frame. j will be our counter
4580 4576 * for what cookie we are at and i will be
4581 4577 * the total cookiec. k is the current frame
4582 4578 */
4583 4579 for (k = 1; k <= frames; k++) {
4584 4580 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4585 4581
4586 4582 /*
4587 4583 * If we have reached the end of frame
4588 4584 * and we have more SGE's to fill in
4589 4585 * we have to fill the final entry
4590 4586 * with a chain element and then
4591 4587 * continue to the next frame
4592 4588 */
4593 4589 if ((l == (sgemax + 1)) && (k != frames)) {
4594 4590 sgechain = (pMpi2SGEChain64_t)sge;
4595 4591 j--;
4596 4592 chainflags = (
4597 4593 MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4598 4594 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4599 4595 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4600 4596 ddi_put8(p->m_acc_hdl,
4601 4597 &sgechain->Flags, chainflags);
4602 4598 /*
4603 4599 * k is the frame counter and (k + 1)
4604 4600 * is the number of the next frame.
4605 4601 * Note that frames are in contiguous
4606 4602 * memory space.
4607 4603 */
4608 4604 nframe_phys_addr = p->m_phys_addr +
4609 4605 (mpt->m_req_frame_size * k);
4610 4606 ddi_put32(p->m_acc_hdl,
4611 4607 &sgechain->Address.Low,
4612 4608 nframe_phys_addr);
4613 4609 ddi_put32(p->m_acc_hdl,
4614 4610 &sgechain->Address.High,
4615 4611 nframe_phys_addr >> 32);
4616 4612
4617 4613 /*
4618 4614 * If there are more than 2 frames left
4619 4615 * we have to next chain offset to
4620 4616 * the location of the chain element
4621 4617 * in the next frame and fill in the
4622 4618 * length of the next chain
4623 4619 */
4624 4620 if ((frames - k) >= 2) {
4625 4621 ddi_put8(p->m_acc_hdl,
4626 4622 &sgechain->NextChainOffset,
4627 4623 (sgemax *
4628 4624 sizeof (MPI2_SGE_SIMPLE64))
4629 4625 >> 2);
4630 4626 ddi_put16(p->m_acc_hdl,
4631 4627 &sgechain->Length,
4632 4628 mpt->m_req_frame_size /
4633 4629 sizeof (MPI2_SGE_SIMPLE64) *
4634 4630 sizeof (MPI2_SGE_SIMPLE64));
4635 4631 } else {
4636 4632 /*
4637 4633 * This is the last frame. Set
4638 4634 * the NextChainOffset to 0 and
4639 4635 * Length is the total size of
4640 4636 * all remaining simple elements
4641 4637 */
4642 4638 ddi_put8(p->m_acc_hdl,
4643 4639 &sgechain->NextChainOffset,
4644 4640 0);
4645 4641 ddi_put16(p->m_acc_hdl,
4646 4642 &sgechain->Length,
4647 4643 (cookiec - j) *
4648 4644 sizeof (MPI2_SGE_SIMPLE64));
4649 4645 }
4650 4646
4651 4647 /* Jump to the next frame */
4652 4648 sge = (pMpi2SGESimple64_t)
4653 4649 ((char *)p->m_frames_addr +
4654 4650 (int)mpt->m_req_frame_size * k);
4655 4651
4656 4652 continue;
4657 4653 }
4658 4654
4659 4655 ddi_put32(p->m_acc_hdl,
4660 4656 &sge->Address.Low,
4661 4657 dmap->addr.address64.Low);
4662 4658 ddi_put32(p->m_acc_hdl,
4663 4659 &sge->Address.High,
4664 4660 dmap->addr.address64.High);
4665 4661 ddi_put32(p->m_acc_hdl,
4666 4662 &sge->FlagsLength, dmap->count);
4667 4663 flags = ddi_get32(p->m_acc_hdl,
4668 4664 &sge->FlagsLength);
4669 4665 flags |= ((uint32_t)(
4670 4666 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4671 4667 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4672 4668 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4673 4669 MPI2_SGE_FLAGS_SHIFT);
4674 4670
4675 4671 /*
4676 4672 * If we are at the end of the frame and
4677 4673 * there is another frame to fill in
4678 4674 * we set the last simple element as last
4679 4675 * element
4680 4676 */
4681 4677 if ((l == sgemax) && (k != frames)) {
4682 4678 flags |= ((uint32_t)
4683 4679 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4684 4680 MPI2_SGE_FLAGS_SHIFT);
4685 4681 }
4686 4682
4687 4683 /*
4688 4684 * If this is the final cookie we
4689 4685 * indicate it by setting the flags
4690 4686 */
4691 4687 if (j == i) {
4692 4688 flags |= ((uint32_t)
4693 4689 (MPI2_SGE_FLAGS_LAST_ELEMENT |
4694 4690 MPI2_SGE_FLAGS_END_OF_BUFFER |
4695 4691 MPI2_SGE_FLAGS_END_OF_LIST) <<
4696 4692 MPI2_SGE_FLAGS_SHIFT);
4697 4693 }
4698 4694 if (cmd->cmd_flags & CFLAG_DMASEND) {
4699 4695 flags |=
4700 4696 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4701 4697 MPI2_SGE_FLAGS_SHIFT);
4702 4698 } else {
4703 4699 flags |=
4704 4700 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4705 4701 MPI2_SGE_FLAGS_SHIFT);
4706 4702 }
4707 4703 ddi_put32(p->m_acc_hdl,
4708 4704 &sge->FlagsLength, flags);
4709 4705 dmap++;
4710 4706 sge++;
4711 4707 }
4712 4708 }
4713 4709
4714 4710 /*
4715 4711 * Sync DMA with the chain buffers that were just created
4716 4712 */
4717 4713 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4718 4714 }
4719 4715
4720 4716 static void
4721 4717 mptsas_ieee_sge_mainframe(mptsas_cmd_t *cmd, pMpi2SCSIIORequest_t frame,
4722 4718 ddi_acc_handle_t acc_hdl, uint_t cookiec, uint8_t end_flag)
4723 4719 {
4724 4720 pMpi2IeeeSgeSimple64_t ieeesge;
4725 4721 mptti_t *dmap;
4726 4722 uint8_t flags;
4727 4723
4728 4724 dmap = cmd->cmd_sg;
4729 4725
4730 4726 NDBG1(("mptsas_ieee_sge_mainframe: cookiec=%d, %s", cookiec,
4731 4727 cmd->cmd_flags & CFLAG_DMASEND?"Out":"In"));
4732 4728
4733 4729 ieeesge = (pMpi2IeeeSgeSimple64_t)(&frame->SGL);
4734 4730 while (cookiec--) {
4735 4731 ddi_put32(acc_hdl,
4736 4732 &ieeesge->Address.Low, dmap->addr.address64.Low);
4737 4733 ddi_put32(acc_hdl,
4738 4734 &ieeesge->Address.High, dmap->addr.address64.High);
4739 4735 ddi_put32(acc_hdl, &ieeesge->Length,
4740 4736 dmap->count);
4741 4737 NDBG1(("mptsas_ieee_sge_mainframe: len=%d", dmap->count));
4742 4738 flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
4743 4739 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4744 4740
4745 4741 /*
4746 4742 * If this is the last cookie, we set the flags
4747 4743 * to indicate so
4748 4744 */
4749 4745 if (cookiec == 0) {
4750 4746 flags |= end_flag;
4751 4747 }
4752 4748
4753 4749 ddi_put8(acc_hdl, &ieeesge->Flags, flags);
4754 4750 dmap++;
4755 4751 ieeesge++;
4756 4752 }
4757 4753 }
4758 4754
4759 4755 static void
4760 4756 mptsas_ieee_sge_chain(mptsas_t *mpt, mptsas_cmd_t *cmd,
4761 4757 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4762 4758 {
4763 4759 pMpi2IeeeSgeSimple64_t ieeesge;
4764 4760 pMpi25IeeeSgeChain64_t ieeesgechain;
4765 4761 uint64_t nframe_phys_addr;
4766 4762 uint_t cookiec;
4767 4763 mptti_t *dmap;
4768 4764 uint8_t flags;
4769 4765
4770 4766 /*
4771 4767 * Save the number of entries in the DMA
4772 4768 * Scatter/Gather list
4773 4769 */
4774 4770 cookiec = cmd->cmd_cookiec;
4775 4771
4776 4772 NDBG1(("mptsas_ieee_sge_chain: cookiec=%d", cookiec));
4777 4773
4778 4774 /*
4779 4775 * Hereby we start to deal with multiple frames.
4780 4776 * The process is as follows:
4781 4777 * 1. Determine how many frames are needed for SGL element
4782 4778 * storage; Note that all frames are stored in contiguous
4783 4779 * memory space and in 64-bit DMA mode each element is
4784 4780 * 4 double-words (16 bytes) long.
4785 4781 * 2. Fill up the main frame. We need to do this separately
4786 4782 * since it contains the SCSI IO request header and needs
4787 4783 * dedicated processing. Note that the last 4 double-words
4788 4784 * of the SCSI IO header is for SGL element storage
4789 4785 * (MPI2_SGE_IO_UNION).
4790 4786 * 3. Fill the chain element in the main frame, so the DMA
4791 4787 * engine can use the following frames.
4792 4788 * 4. Enter a loop to fill the remaining frames. Note that the
4793 4789 * last frame contains no chain element. The remaining
4794 4790 * frames go into the mpt SGL buffer allocated on the fly,
4795 4791 * not immediately following the main message frame, as in
4796 4792 * Gen1.
4797 4793 * Restrictions:
4798 4794 * For 64-bit DMA, the simple element and chain element
4799 4795 * are both of 4 double-words (16 bytes) in size, even
4800 4796 * though all frames are stored in the first 4G of mem
4801 4797 * range and the higher 32-bits of the address are always 0.
4802 4798 */
4803 4799 int i, j, k, l, frames, sgemax;
4804 4800 int temp;
4805 4801 uint8_t chainflags;
4806 4802 uint32_t chainlength;
4807 4803 mptsas_cache_frames_t *p;
4808 4804
4809 4805 /*
4810 4806 * Sgemax is the number of SGE's that will fit
4811 4807 * each extra frame and frames is total
4812 4808 * number of frames we'll need. 1 sge entry per
4813 4809 * frame is reseverd for the chain element thus the -1 below.
4814 4810 */
4815 4811 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_IEEE_SGE_SIMPLE64))
4816 4812 - 1);
4817 4813 temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4818 4814
4819 4815 /*
4820 4816 * A little check to see if we need to round up the number
4821 4817 * of frames we need
4822 4818 */
4823 4819 if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4824 4820 sgemax) > 1) {
4825 4821 frames = (temp + 1);
4826 4822 } else {
4827 4823 frames = temp;
4828 4824 }
4829 4825 NDBG1(("mptsas_ieee_sge_chain: temp=%d, frames=%d", temp, frames));
4830 4826 dmap = cmd->cmd_sg;
4831 4827 ieeesge = (pMpi2IeeeSgeSimple64_t)(&frame->SGL);
4832 4828
4833 4829 /*
4834 4830 * First fill in the main frame
4835 4831 */
4836 4832 j = MPTSAS_MAX_FRAME_SGES64(mpt) - 1;
4837 4833 mptsas_ieee_sge_mainframe(cmd, frame, acc_hdl, j, 0);
4838 4834 dmap += j;
4839 4835 ieeesge += j;
4840 4836 j++;
4841 4837
4842 4838 /*
4843 4839 * Fill in the chain element in the main frame.
4844 4840 * About calculation on ChainOffset:
4845 4841 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4846 4842 * in the end reserved for SGL element storage
4847 4843 * (MPI2_SGE_IO_UNION); we should count it in our
4848 4844 * calculation. See its definition in the header file.
4849 4845 * 2. Constant j is the counter of the current SGL element
4850 4846 * that will be processed, and (j - 1) is the number of
4851 4847 * SGL elements that have been processed (stored in the
4852 4848 * main frame).
4853 4849 * 3. ChainOffset value should be in units of quad-words (16
4854 4850 * bytes) so the last value should be divided by 16.
4855 4851 */
4856 4852 ddi_put8(acc_hdl, &frame->ChainOffset,
4857 4853 (sizeof (MPI2_SCSI_IO_REQUEST) -
4858 4854 sizeof (MPI2_SGE_IO_UNION) +
4859 4855 (j - 1) * sizeof (MPI2_IEEE_SGE_SIMPLE64)) >> 4);
4860 4856 ieeesgechain = (pMpi25IeeeSgeChain64_t)ieeesge;
4861 4857 chainflags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
4862 4858 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4863 4859 ddi_put8(acc_hdl, &ieeesgechain->Flags, chainflags);
4864 4860
4865 4861 /*
4866 4862 * The size of the next frame is the accurate size of space
4867 4863 * (in bytes) used to store the SGL elements. j is the counter
4868 4864 * of SGL elements. (j - 1) is the number of SGL elements that
4869 4865 * have been processed (stored in frames).
4870 4866 */
4871 4867 if (frames >= 2) {
4872 4868 ASSERT(mpt->m_req_frame_size >=
4873 4869 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4874 4870 chainlength = mpt->m_req_frame_size /
4875 4871 sizeof (MPI2_IEEE_SGE_SIMPLE64) *
4876 4872 sizeof (MPI2_IEEE_SGE_SIMPLE64);
4877 4873 } else {
4878 4874 chainlength = ((cookiec - (j - 1)) *
4879 4875 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4880 4876 }
4881 4877
4882 4878 p = cmd->cmd_extra_frames;
4883 4879
4884 4880 ddi_put32(acc_hdl, &ieeesgechain->Length, chainlength);
4885 4881 ddi_put32(acc_hdl, &ieeesgechain->Address.Low, p->m_phys_addr);
4886 4882 ddi_put32(acc_hdl, &ieeesgechain->Address.High, p->m_phys_addr >> 32);
4887 4883
4888 4884 /*
4889 4885 * If there are more than 2 frames left we have to
4890 4886 * fill in the next chain offset to the location of
4891 4887 * the chain element in the next frame.
4892 4888 * sgemax is the number of simple elements in an extra
4893 4889 * frame. Note that the value NextChainOffset should be
4894 4890 * in double-words (4 bytes).
4895 4891 */
4896 4892 if (frames >= 2) {
4897 4893 ddi_put8(acc_hdl, &ieeesgechain->NextChainOffset,
4898 4894 (sgemax * sizeof (MPI2_IEEE_SGE_SIMPLE64)) >> 4);
4899 4895 } else {
4900 4896 ddi_put8(acc_hdl, &ieeesgechain->NextChainOffset, 0);
4901 4897 }
4902 4898
4903 4899 /*
4904 4900 * Jump to next frame;
4905 4901 * Starting here, chain buffers go into the per command SGL.
4906 4902 * This buffer is allocated when chain buffers are needed.
4907 4903 */
4908 4904 ieeesge = (pMpi2IeeeSgeSimple64_t)p->m_frames_addr;
4909 4905 i = cookiec;
4910 4906
4911 4907 /*
4912 4908 * Start filling in frames with SGE's. If we
4913 4909 * reach the end of frame and still have SGE's
4914 4910 * to fill we need to add a chain element and
4915 4911 * use another frame. j will be our counter
4916 4912 * for what cookie we are at and i will be
4917 4913 * the total cookiec. k is the current frame
4918 4914 */
4919 4915 for (k = 1; k <= frames; k++) {
4920 4916 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4921 4917
4922 4918 /*
4923 4919 * If we have reached the end of frame
4924 4920 * and we have more SGE's to fill in
4925 4921 * we have to fill the final entry
4926 4922 * with a chain element and then
4927 4923 * continue to the next frame
4928 4924 */
4929 4925 if ((l == (sgemax + 1)) && (k != frames)) {
4930 4926 ieeesgechain = (pMpi25IeeeSgeChain64_t)ieeesge;
4931 4927 j--;
4932 4928 chainflags =
4933 4929 MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
4934 4930 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
4935 4931 ddi_put8(p->m_acc_hdl,
4936 4932 &ieeesgechain->Flags, chainflags);
4937 4933 /*
4938 4934 * k is the frame counter and (k + 1)
4939 4935 * is the number of the next frame.
4940 4936 * Note that frames are in contiguous
4941 4937 * memory space.
4942 4938 */
4943 4939 nframe_phys_addr = p->m_phys_addr +
4944 4940 (mpt->m_req_frame_size * k);
4945 4941 ddi_put32(p->m_acc_hdl,
4946 4942 &ieeesgechain->Address.Low,
4947 4943 nframe_phys_addr);
4948 4944 ddi_put32(p->m_acc_hdl,
4949 4945 &ieeesgechain->Address.High,
4950 4946 nframe_phys_addr >> 32);
4951 4947
4952 4948 /*
4953 4949 * If there are more than 2 frames left
4954 4950 * we have to next chain offset to
4955 4951 * the location of the chain element
4956 4952 * in the next frame and fill in the
4957 4953 * length of the next chain
4958 4954 */
4959 4955 if ((frames - k) >= 2) {
4960 4956 ddi_put8(p->m_acc_hdl,
4961 4957 &ieeesgechain->NextChainOffset,
4962 4958 (sgemax *
4963 4959 sizeof (MPI2_IEEE_SGE_SIMPLE64))
4964 4960 >> 4);
4965 4961 ASSERT(mpt->m_req_frame_size >=
4966 4962 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4967 4963 ddi_put32(p->m_acc_hdl,
4968 4964 &ieeesgechain->Length,
4969 4965 mpt->m_req_frame_size /
4970 4966 sizeof (MPI2_IEEE_SGE_SIMPLE64) *
4971 4967 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4972 4968 } else {
4973 4969 /*
4974 4970 * This is the last frame. Set
4975 4971 * the NextChainOffset to 0 and
4976 4972 * Length is the total size of
4977 4973 * all remaining simple elements
4978 4974 */
4979 4975 ddi_put8(p->m_acc_hdl,
4980 4976 &ieeesgechain->NextChainOffset,
4981 4977 0);
4982 4978 ddi_put32(p->m_acc_hdl,
4983 4979 &ieeesgechain->Length,
4984 4980 (cookiec - j) *
4985 4981 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4986 4982 }
4987 4983
4988 4984 /* Jump to the next frame */
4989 4985 ieeesge = (pMpi2IeeeSgeSimple64_t)
4990 4986 ((char *)p->m_frames_addr +
4991 4987 (int)mpt->m_req_frame_size * k);
4992 4988
4993 4989 continue;
4994 4990 }
4995 4991
4996 4992 ddi_put32(p->m_acc_hdl,
4997 4993 &ieeesge->Address.Low,
4998 4994 dmap->addr.address64.Low);
4999 4995 ddi_put32(p->m_acc_hdl,
5000 4996 &ieeesge->Address.High,
5001 4997 dmap->addr.address64.High);
5002 4998 ddi_put32(p->m_acc_hdl,
5003 4999 &ieeesge->Length, dmap->count);
5004 5000 flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
5005 5001 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
5006 5002
5007 5003 /*
5008 5004 * If we are at the end of the frame and
5009 5005 * there is another frame to fill in
5010 5006 * do we need to do anything?
5011 5007 * if ((l == sgemax) && (k != frames)) {
5012 5008 * }
5013 5009 */
5014 5010
5015 5011 /*
5016 5012 * If this is the final cookie set end of list.
5017 5013 */
5018 5014 if (j == i) {
5019 5015 flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
5020 5016 }
5021 5017
5022 5018 ddi_put8(p->m_acc_hdl, &ieeesge->Flags, flags);
5023 5019 dmap++;
5024 5020 ieeesge++;
5025 5021 }
5026 5022 }
5027 5023
5028 5024 /*
5029 5025 * Sync DMA with the chain buffers that were just created
5030 5026 */
5031 5027 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
5032 5028 }
5033 5029
5034 5030 static void
5035 5031 mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control,
5036 5032 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
5037 5033 {
5038 5034 ASSERT(cmd->cmd_flags & CFLAG_DMAVALID);
5039 5035
5040 5036 NDBG1(("mptsas_sge_setup: cookiec=%d", cmd->cmd_cookiec));
5041 5037
5042 5038 /*
5043 5039 * Set read/write bit in control.
5044 5040 */
5045 5041 if (cmd->cmd_flags & CFLAG_DMASEND) {
5046 5042 *control |= MPI2_SCSIIO_CONTROL_WRITE;
5047 5043 } else {
5048 5044 *control |= MPI2_SCSIIO_CONTROL_READ;
5049 5045 }
5050 5046
5051 5047 ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount);
5052 5048
5053 5049 /*
5054 5050 * We have 4 cases here. First where we can fit all the
5055 5051 * SG elements into the main frame, and the case
5056 5052 * where we can't. The SG element is also different when using
5057 5053 * MPI2.5 interface.
5058 5054 * If we have more cookies than we can attach to a frame
5059 5055 * we will need to use a chain element to point
5060 5056 * a location of memory where the rest of the S/G
5061 5057 * elements reside.
5062 5058 */
5063 5059 if (cmd->cmd_cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) {
5064 5060 if (mpt->m_MPI25) {
5065 5061 mptsas_ieee_sge_mainframe(cmd, frame, acc_hdl,
5066 5062 cmd->cmd_cookiec,
5067 5063 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
5068 5064 } else {
5069 5065 mptsas_sge_mainframe(cmd, frame, acc_hdl,
5070 5066 cmd->cmd_cookiec,
5071 5067 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
5072 5068 | MPI2_SGE_FLAGS_END_OF_BUFFER
5073 5069 | MPI2_SGE_FLAGS_END_OF_LIST) <<
5074 5070 MPI2_SGE_FLAGS_SHIFT));
5075 5071 }
5076 5072 } else {
5077 5073 if (mpt->m_MPI25) {
5078 5074 mptsas_ieee_sge_chain(mpt, cmd, frame, acc_hdl);
5079 5075 } else {
5080 5076 mptsas_sge_chain(mpt, cmd, frame, acc_hdl);
5081 5077 }
5082 5078 }
5083 5079 }
5084 5080
5085 5081 /*
5086 5082 * Interrupt handling
5087 5083 * Utility routine. Poll for status of a command sent to HBA
5088 5084 * without interrupts (a FLAG_NOINTR command).
5089 5085 */
5090 5086 int
5091 5087 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
5092 5088 {
5093 5089 int rval = TRUE;
5094 5090
5095 5091 NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
5096 5092
5097 5093 if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
5098 5094 mptsas_restart_hba(mpt);
5099 5095 }
5100 5096
5101 5097 /*
5102 5098 * Wait, using drv_usecwait(), long enough for the command to
5103 5099 * reasonably return from the target if the target isn't
5104 5100 * "dead". A polled command may well be sent from scsi_poll, and
5105 5101 * there are retries built in to scsi_poll if the transport
5106 5102 * accepted the packet (TRAN_ACCEPT). scsi_poll waits 1 second
5107 5103 * and retries the transport up to scsi_poll_busycnt times
5108 5104 * (currently 60) if
5109 5105 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
5110 5106 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
5111 5107 *
5112 5108 * limit the waiting to avoid a hang in the event that the
5113 5109 * cmd never gets started but we are still receiving interrupts
5114 5110 */
5115 5111 while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
5116 5112 if (mptsas_wait_intr(mpt, polltime) == FALSE) {
5117 5113 NDBG5(("mptsas_poll: command incomplete"));
5118 5114 rval = FALSE;
5119 5115 break;
5120 5116 }
5121 5117 }
5122 5118
5123 5119 if (rval == FALSE) {
5124 5120
5125 5121 /*
5126 5122 * this isn't supposed to happen, the hba must be wedged
5127 5123 * Mark this cmd as a timeout.
5128 5124 */
5129 5125 mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
5130 5126 (STAT_TIMEOUT|STAT_ABORTED));
5131 5127
5132 5128 if (poll_cmd->cmd_queued == FALSE) {
5133 5129
5134 5130 NDBG5(("mptsas_poll: not on waitq"));
5135 5131
5136 5132 poll_cmd->cmd_pkt->pkt_state |=
5137 5133 (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
5138 5134 } else {
5139 5135
5140 5136 /* find and remove it from the waitq */
5141 5137 NDBG5(("mptsas_poll: delete from waitq"));
5142 5138 mptsas_waitq_delete(mpt, poll_cmd);
5143 5139 }
5144 5140
5145 5141 }
5146 5142 mptsas_fma_check(mpt, poll_cmd);
5147 5143 NDBG5(("mptsas_poll: done"));
5148 5144 return (rval);
5149 5145 }
5150 5146
5151 5147 /*
5152 5148 * Used for polling cmds and TM function
5153 5149 */
5154 5150 static int
5155 5151 mptsas_wait_intr(mptsas_t *mpt, int polltime)
5156 5152 {
5157 5153 int cnt;
5158 5154 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
5159 5155 uint32_t int_mask;
5160 5156
5161 5157 NDBG5(("mptsas_wait_intr"));
5162 5158
5163 5159 mpt->m_polled_intr = 1;
5164 5160
5165 5161 /*
5166 5162 * Get the current interrupt mask and disable interrupts. When
5167 5163 * re-enabling ints, set mask to saved value.
5168 5164 */
5169 5165 int_mask = ddi_get32(mpt->m_datap, &mpt->m_reg->HostInterruptMask);
5170 5166 MPTSAS_DISABLE_INTR(mpt);
5171 5167
5172 5168 /*
5173 5169 * Keep polling for at least (polltime * 1000) seconds
5174 5170 */
5175 5171 for (cnt = 0; cnt < polltime; cnt++) {
5176 5172 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5177 5173 DDI_DMA_SYNC_FORCPU);
5178 5174
5179 5175 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5180 5176 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5181 5177
5182 5178 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5183 5179 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5184 5180 ddi_get32(mpt->m_acc_post_queue_hdl,
5185 5181 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5186 5182 drv_usecwait(1000);
5187 5183 continue;
5188 5184 }
5189 5185
5190 5186 /*
5191 5187 * The reply is valid, process it according to its
5192 5188 * type.
5193 5189 */
5194 5190 mptsas_process_intr(mpt, reply_desc_union);
5195 5191
5196 5192 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5197 5193 mpt->m_post_index = 0;
5198 5194 }
5199 5195
5200 5196 /*
5201 5197 * Update the global reply index
5202 5198 */
5203 5199 ddi_put32(mpt->m_datap,
5204 5200 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5205 5201 mpt->m_polled_intr = 0;
5206 5202
5207 5203 /*
5208 5204 * Re-enable interrupts and quit.
5209 5205 */
5210 5206 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask,
5211 5207 int_mask);
5212 5208 return (TRUE);
5213 5209
5214 5210 }
5215 5211
5216 5212 /*
5217 5213 * Clear polling flag, re-enable interrupts and quit.
5218 5214 */
5219 5215 mpt->m_polled_intr = 0;
5220 5216 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask);
5221 5217 return (FALSE);
5222 5218 }
5223 5219
5224 5220 static void
5225 5221 mptsas_handle_scsi_io_success(mptsas_t *mpt,
5226 5222 pMpi2ReplyDescriptorsUnion_t reply_desc)
5227 5223 {
5228 5224 pMpi2SCSIIOSuccessReplyDescriptor_t scsi_io_success;
5229 5225 uint16_t SMID;
5230 5226 mptsas_slots_t *slots = mpt->m_active;
5231 5227 mptsas_cmd_t *cmd = NULL;
5232 5228 struct scsi_pkt *pkt;
5233 5229
5234 5230 ASSERT(mutex_owned(&mpt->m_mutex));
5235 5231
5236 5232 scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
5237 5233 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &scsi_io_success->SMID);
5238 5234
5239 5235 /*
5240 5236 * This is a success reply so just complete the IO. First, do a sanity
5241 5237 * check on the SMID. The final slot is used for TM requests, which
5242 5238 * would not come into this reply handler.
5243 5239 */
5244 5240 if ((SMID == 0) || (SMID > slots->m_n_normal)) {
5245 5241 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
5246 5242 SMID);
5247 5243 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5248 5244 return;
5249 5245 }
5250 5246
5251 5247 cmd = slots->m_slot[SMID];
5252 5248
5253 5249 /*
5254 5250 * print warning and return if the slot is empty
5255 5251 */
5256 5252 if (cmd == NULL) {
5257 5253 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
5258 5254 "in slot %d", SMID);
5259 5255 return;
5260 5256 }
5261 5257
5262 5258 pkt = CMD2PKT(cmd);
5263 5259 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
5264 5260 STATE_GOT_STATUS);
5265 5261 if (cmd->cmd_flags & CFLAG_DMAVALID) {
5266 5262 pkt->pkt_state |= STATE_XFERRED_DATA;
5267 5263 }
5268 5264 pkt->pkt_resid = 0;
5269 5265
5270 5266 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
5271 5267 cmd->cmd_flags |= CFLAG_FINISHED;
5272 5268 cv_broadcast(&mpt->m_passthru_cv);
5273 5269 return;
5274 5270 } else {
5275 5271 mptsas_remove_cmd(mpt, cmd);
5276 5272 }
5277 5273
5278 5274 if (cmd->cmd_flags & CFLAG_RETRY) {
5279 5275 /*
5280 5276 * The target returned QFULL or busy, do not add tihs
5281 5277 * pkt to the doneq since the hba will retry
5282 5278 * this cmd.
5283 5279 *
5284 5280 * The pkt has already been resubmitted in
5285 5281 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5286 5282 * Remove this cmd_flag here.
5287 5283 */
5288 5284 cmd->cmd_flags &= ~CFLAG_RETRY;
5289 5285 } else {
5290 5286 mptsas_doneq_add(mpt, cmd);
5291 5287 }
5292 5288 }
5293 5289
5294 5290 static void
5295 5291 mptsas_handle_address_reply(mptsas_t *mpt,
5296 5292 pMpi2ReplyDescriptorsUnion_t reply_desc)
5297 5293 {
5298 5294 pMpi2AddressReplyDescriptor_t address_reply;
5299 5295 pMPI2DefaultReply_t reply;
5300 5296 mptsas_fw_diagnostic_buffer_t *pBuffer;
5301 5297 uint32_t reply_addr, reply_frame_dma_baseaddr;
5302 5298 uint16_t SMID, iocstatus;
5303 5299 mptsas_slots_t *slots = mpt->m_active;
5304 5300 mptsas_cmd_t *cmd = NULL;
5305 5301 uint8_t function, buffer_type;
5306 5302 m_replyh_arg_t *args;
5307 5303 int reply_frame_no;
5308 5304
5309 5305 ASSERT(mutex_owned(&mpt->m_mutex));
5310 5306
5311 5307 address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
5312 5308 reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
5313 5309 &address_reply->ReplyFrameAddress);
5314 5310 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID);
5315 5311
5316 5312 /*
5317 5313 * If reply frame is not in the proper range we should ignore this
5318 5314 * message and exit the interrupt handler.
5319 5315 */
5320 5316 reply_frame_dma_baseaddr = mpt->m_reply_frame_dma_addr & 0xffffffffu;
5321 5317 if ((reply_addr < reply_frame_dma_baseaddr) ||
5322 5318 (reply_addr >= (reply_frame_dma_baseaddr +
5323 5319 (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
5324 5320 ((reply_addr - reply_frame_dma_baseaddr) %
5325 5321 mpt->m_reply_frame_size != 0)) {
5326 5322 mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
5327 5323 "address 0x%x\n", reply_addr);
5328 5324 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5329 5325 return;
5330 5326 }
5331 5327
5332 5328 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
5333 5329 DDI_DMA_SYNC_FORCPU);
5334 5330 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
5335 5331 reply_frame_dma_baseaddr));
5336 5332 function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
5337 5333
5338 5334 NDBG31(("mptsas_handle_address_reply: function 0x%x, reply_addr=0x%x",
5339 5335 function, reply_addr));
5340 5336
5341 5337 /*
5342 5338 * don't get slot information and command for events since these values
5343 5339 * don't exist
5344 5340 */
5345 5341 if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) &&
5346 5342 (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) {
5347 5343 /*
5348 5344 * This could be a TM reply, which use the last allocated SMID,
5349 5345 * so allow for that.
5350 5346 */
5351 5347 if ((SMID == 0) || (SMID > (slots->m_n_normal + 1))) {
5352 5348 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of "
5353 5349 "%d\n", SMID);
5354 5350 ddi_fm_service_impact(mpt->m_dip,
5355 5351 DDI_SERVICE_UNAFFECTED);
5356 5352 return;
5357 5353 }
5358 5354
5359 5355 cmd = slots->m_slot[SMID];
5360 5356
5361 5357 /*
5362 5358 * print warning and return if the slot is empty
5363 5359 */
5364 5360 if (cmd == NULL) {
5365 5361 mptsas_log(mpt, CE_WARN, "?NULL command for address "
5366 5362 "reply in slot %d", SMID);
5367 5363 return;
5368 5364 }
5369 5365 if ((cmd->cmd_flags &
5370 5366 (CFLAG_PASSTHRU | CFLAG_CONFIG | CFLAG_FW_DIAG))) {
5371 5367 cmd->cmd_rfm = reply_addr;
5372 5368 cmd->cmd_flags |= CFLAG_FINISHED;
5373 5369 cv_broadcast(&mpt->m_passthru_cv);
5374 5370 cv_broadcast(&mpt->m_config_cv);
5375 5371 cv_broadcast(&mpt->m_fw_diag_cv);
5376 5372 return;
5377 5373 } else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
5378 5374 mptsas_remove_cmd(mpt, cmd);
5379 5375 }
5380 5376 NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
5381 5377 }
5382 5378 /*
5383 5379 * Depending on the function, we need to handle
5384 5380 * the reply frame (and cmd) differently.
5385 5381 */
5386 5382 switch (function) {
5387 5383 case MPI2_FUNCTION_SCSI_IO_REQUEST:
5388 5384 mptsas_check_scsi_io_error(mpt, (pMpi2SCSIIOReply_t)reply, cmd);
5389 5385 break;
5390 5386 case MPI2_FUNCTION_SCSI_TASK_MGMT:
5391 5387 cmd->cmd_rfm = reply_addr;
5392 5388 mptsas_check_task_mgt(mpt, (pMpi2SCSIManagementReply_t)reply,
5393 5389 cmd);
5394 5390 break;
5395 5391 case MPI2_FUNCTION_FW_DOWNLOAD:
5396 5392 cmd->cmd_flags |= CFLAG_FINISHED;
5397 5393 cv_signal(&mpt->m_fw_cv);
5398 5394 break;
5399 5395 case MPI2_FUNCTION_EVENT_NOTIFICATION:
5400 5396 reply_frame_no = (reply_addr - reply_frame_dma_baseaddr) /
5401 5397 mpt->m_reply_frame_size;
5402 5398 args = &mpt->m_replyh_args[reply_frame_no];
5403 5399 args->mpt = (void *)mpt;
5404 5400 args->rfm = reply_addr;
5405 5401
5406 5402 /*
5407 5403 * Record the event if its type is enabled in
5408 5404 * this mpt instance by ioctl.
5409 5405 */
5410 5406 mptsas_record_event(args);
5411 5407
5412 5408 /*
5413 5409 * Handle time critical events
5414 5410 * NOT_RESPONDING/ADDED only now
5415 5411 */
5416 5412 if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
5417 5413 /*
5418 5414 * Would not return main process,
5419 5415 * just let taskq resolve ack action
5420 5416 * and ack would be sent in taskq thread
5421 5417 */
5422 5418 NDBG20(("send mptsas_handle_event_sync success"));
5423 5419 }
5424 5420
5425 5421 if (mpt->m_in_reset) {
5426 5422 NDBG20(("dropping event received during reset"));
5427 5423 return;
5428 5424 }
5429 5425
5430 5426 if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
5431 5427 (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
5432 5428 mptsas_log(mpt, CE_WARN, "No memory available"
5433 5429 "for dispatch taskq");
5434 5430 /*
5435 5431 * Return the reply frame to the free queue.
5436 5432 */
5437 5433 ddi_put32(mpt->m_acc_free_queue_hdl,
5438 5434 &((uint32_t *)(void *)
5439 5435 mpt->m_free_queue)[mpt->m_free_index], reply_addr);
5440 5436 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5441 5437 DDI_DMA_SYNC_FORDEV);
5442 5438 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5443 5439 mpt->m_free_index = 0;
5444 5440 }
5445 5441
5446 5442 ddi_put32(mpt->m_datap,
5447 5443 &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
5448 5444 }
5449 5445 return;
5450 5446 case MPI2_FUNCTION_DIAG_BUFFER_POST:
5451 5447 /*
5452 5448 * If SMID is 0, this implies that the reply is due to a
5453 5449 * release function with a status that the buffer has been
5454 5450 * released. Set the buffer flags accordingly.
5455 5451 */
5456 5452 if (SMID == 0) {
5457 5453 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
5458 5454 &reply->IOCStatus);
5459 5455 buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl,
5460 5456 &(((pMpi2DiagBufferPostReply_t)reply)->BufferType));
5461 5457 if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) {
5462 5458 pBuffer =
5463 5459 &mpt->m_fw_diag_buffer_list[buffer_type];
5464 5460 pBuffer->valid_data = TRUE;
5465 5461 pBuffer->owned_by_firmware = FALSE;
5466 5462 pBuffer->immediate = FALSE;
5467 5463 }
5468 5464 } else {
5469 5465 /*
5470 5466 * Normal handling of diag post reply with SMID.
5471 5467 */
5472 5468 cmd = slots->m_slot[SMID];
5473 5469
5474 5470 /*
5475 5471 * print warning and return if the slot is empty
5476 5472 */
5477 5473 if (cmd == NULL) {
5478 5474 mptsas_log(mpt, CE_WARN, "?NULL command for "
5479 5475 "address reply in slot %d", SMID);
5480 5476 return;
5481 5477 }
5482 5478 cmd->cmd_rfm = reply_addr;
5483 5479 cmd->cmd_flags |= CFLAG_FINISHED;
5484 5480 cv_broadcast(&mpt->m_fw_diag_cv);
5485 5481 }
5486 5482 return;
5487 5483 default:
5488 5484 mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
5489 5485 break;
5490 5486 }
5491 5487
5492 5488 /*
5493 5489 * Return the reply frame to the free queue.
5494 5490 */
5495 5491 ddi_put32(mpt->m_acc_free_queue_hdl,
5496 5492 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
5497 5493 reply_addr);
5498 5494 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5499 5495 DDI_DMA_SYNC_FORDEV);
5500 5496 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5501 5497 mpt->m_free_index = 0;
5502 5498 }
5503 5499 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
5504 5500 mpt->m_free_index);
5505 5501
5506 5502 if (cmd->cmd_flags & CFLAG_FW_CMD)
5507 5503 return;
5508 5504
5509 5505 if (cmd->cmd_flags & CFLAG_RETRY) {
5510 5506 /*
5511 5507 * The target returned QFULL or busy, do not add this
5512 5508 * pkt to the doneq since the hba will retry
5513 5509 * this cmd.
5514 5510 *
5515 5511 * The pkt has already been resubmitted in
5516 5512 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5517 5513 * Remove this cmd_flag here.
5518 5514 */
5519 5515 cmd->cmd_flags &= ~CFLAG_RETRY;
5520 5516 } else {
5521 5517 mptsas_doneq_add(mpt, cmd);
5522 5518 }
5523 5519 }
5524 5520
5525 5521 #ifdef MPTSAS_DEBUG
5526 5522 static uint8_t mptsas_last_sense[256];
5527 5523 #endif
5528 5524
5529 5525 static void
5530 5526 mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
5531 5527 mptsas_cmd_t *cmd)
5532 5528 {
5533 5529 uint8_t scsi_status, scsi_state;
5534 5530 uint16_t ioc_status, cmd_rqs_len;
5535 5531 uint32_t xferred, sensecount, responsedata, loginfo = 0;
5536 5532 struct scsi_pkt *pkt;
5537 5533 struct scsi_arq_status *arqstat;
5538 5534 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5539 5535 uint8_t *sensedata = NULL;
5540 5536 uint64_t sas_wwn;
5541 5537 uint8_t phy;
5542 5538 char wwn_str[MPTSAS_WWN_STRLEN];
5543 5539
5544 5540 scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
5545 5541 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5546 5542 scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
5547 5543 xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
5548 5544 sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
5549 5545 responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl,
5550 5546 &reply->ResponseInfo);
5551 5547
5552 5548 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
5553 5549 sas_wwn = ptgt->m_addr.mta_wwn;
5554 5550 phy = ptgt->m_phynum;
5555 5551 if (sas_wwn == 0) {
5556 5552 (void) sprintf(wwn_str, "p%x", phy);
5557 5553 } else {
5558 5554 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
5559 5555 }
5560 5556 loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
5561 5557 &reply->IOCLogInfo);
5562 5558 mptsas_log(mpt, CE_NOTE,
5563 5559 "?Log info 0x%x received for target %d %s.\n"
5564 5560 "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5565 5561 loginfo, Tgt(cmd), wwn_str, scsi_status, ioc_status,
5566 5562 scsi_state);
5567 5563 }
5568 5564
5569 5565 NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5570 5566 scsi_status, ioc_status, scsi_state));
5571 5567
5572 5568 pkt = CMD2PKT(cmd);
5573 5569 *(pkt->pkt_scbp) = scsi_status;
5574 5570
5575 5571 if (loginfo == 0x31170000) {
5576 5572 /*
5577 5573 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
5578 5574 * 0x31170000 comes, that means the device missing delay
5579 5575 * is in progressing, the command need retry later.
5580 5576 */
5581 5577 *(pkt->pkt_scbp) = STATUS_BUSY;
5582 5578 return;
5583 5579 }
5584 5580
5585 5581 if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
5586 5582 ((ioc_status & MPI2_IOCSTATUS_MASK) ==
5587 5583 MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
5588 5584 pkt->pkt_reason = CMD_INCOMPLETE;
5589 5585 pkt->pkt_state |= STATE_GOT_BUS;
5590 5586 if (ptgt->m_reset_delay == 0) {
5591 5587 mptsas_set_throttle(mpt, ptgt,
5592 5588 DRAIN_THROTTLE);
5593 5589 }
5594 5590 return;
5595 5591 }
5596 5592
5597 5593 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5598 5594 responsedata &= 0x000000FF;
5599 5595 if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
5600 5596 mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n");
5601 5597 pkt->pkt_reason = CMD_TLR_OFF;
5602 5598 return;
5603 5599 }
5604 5600 }
5605 5601
5606 5602
5607 5603 switch (scsi_status) {
5608 5604 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5609 5605 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5610 5606 arqstat = (void*)(pkt->pkt_scbp);
5611 5607 arqstat->sts_rqpkt_status = *((struct scsi_status *)
5612 5608 (pkt->pkt_scbp));
5613 5609 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
5614 5610 STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
5615 5611 if (cmd->cmd_flags & CFLAG_XARQ) {
5616 5612 pkt->pkt_state |= STATE_XARQ_DONE;
5617 5613 }
5618 5614 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5619 5615 pkt->pkt_state |= STATE_XFERRED_DATA;
5620 5616 }
5621 5617 arqstat->sts_rqpkt_reason = pkt->pkt_reason;
5622 5618 arqstat->sts_rqpkt_state = pkt->pkt_state;
5623 5619 arqstat->sts_rqpkt_state |= STATE_XFERRED_DATA;
5624 5620 arqstat->sts_rqpkt_statistics = pkt->pkt_statistics;
5625 5621 sensedata = (uint8_t *)&arqstat->sts_sensedata;
5626 5622 cmd_rqs_len = cmd->cmd_extrqslen ?
5627 5623 cmd->cmd_extrqslen : cmd->cmd_rqslen;
5628 5624 (void) ddi_dma_sync(mpt->m_dma_req_sense_hdl, 0, 0,
5629 5625 DDI_DMA_SYNC_FORKERNEL);
5630 5626 #ifdef MPTSAS_DEBUG
5631 5627 bcopy(cmd->cmd_arq_buf, mptsas_last_sense,
5632 5628 ((cmd_rqs_len >= sizeof (mptsas_last_sense)) ?
5633 5629 sizeof (mptsas_last_sense):cmd_rqs_len));
5634 5630 #endif
5635 5631 bcopy((uchar_t *)cmd->cmd_arq_buf, sensedata,
5636 5632 ((cmd_rqs_len >= sensecount) ? sensecount :
5637 5633 cmd_rqs_len));
5638 5634 arqstat->sts_rqpkt_resid = (cmd_rqs_len - sensecount);
5639 5635 cmd->cmd_flags |= CFLAG_CMDARQ;
5640 5636 /*
5641 5637 * Set proper status for pkt if autosense was valid
5642 5638 */
5643 5639 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5644 5640 struct scsi_status zero_status = { 0 };
5645 5641 arqstat->sts_rqpkt_status = zero_status;
5646 5642 }
5647 5643
5648 5644 /*
5649 5645 * ASC=0x47 is parity error
5650 5646 * ASC=0x48 is initiator detected error received
5651 5647 */
5652 5648 if ((scsi_sense_key(sensedata) == KEY_ABORTED_COMMAND) &&
5653 5649 ((scsi_sense_asc(sensedata) == 0x47) ||
5654 5650 (scsi_sense_asc(sensedata) == 0x48))) {
5655 5651 mptsas_log(mpt, CE_NOTE, "Aborted_command!");
5656 5652 }
5657 5653
5658 5654 /*
5659 5655 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5660 5656 * ASC/ASCQ=0x25/0x00 means invalid lun
5661 5657 */
5662 5658 if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
5663 5659 (scsi_sense_asc(sensedata) == 0x3F) &&
5664 5660 (scsi_sense_ascq(sensedata) == 0x0E)) ||
5665 5661 ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
5666 5662 (scsi_sense_asc(sensedata) == 0x25) &&
5667 5663 (scsi_sense_ascq(sensedata) == 0x00))) {
5668 5664 mptsas_topo_change_list_t *topo_node = NULL;
5669 5665
5670 5666 topo_node = kmem_zalloc(
5671 5667 sizeof (mptsas_topo_change_list_t),
5672 5668 KM_NOSLEEP);
5673 5669 if (topo_node == NULL) {
5674 5670 mptsas_log(mpt, CE_NOTE, "No memory"
5675 5671 "resource for handle SAS dynamic"
5676 5672 "reconfigure.\n");
5677 5673 break;
5678 5674 }
5679 5675 topo_node->mpt = mpt;
5680 5676 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
5681 5677 topo_node->un.phymask = ptgt->m_addr.mta_phymask;
5682 5678 topo_node->devhdl = ptgt->m_devhdl;
5683 5679 topo_node->object = (void *)ptgt;
5684 5680 topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5685 5681
5686 5682 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5687 5683 mptsas_handle_dr,
5688 5684 (void *)topo_node,
5689 5685 DDI_NOSLEEP)) != DDI_SUCCESS) {
5690 5686 kmem_free(topo_node,
5691 5687 sizeof (mptsas_topo_change_list_t));
5692 5688 mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5693 5689 "for handle SAS dynamic reconfigure"
5694 5690 "failed. \n");
5695 5691 }
5696 5692 }
5697 5693 break;
5698 5694 case MPI2_SCSI_STATUS_GOOD:
5699 5695 switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5700 5696 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5701 5697 pkt->pkt_reason = CMD_DEV_GONE;
5702 5698 pkt->pkt_state |= STATE_GOT_BUS;
5703 5699 if (ptgt->m_reset_delay == 0) {
5704 5700 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5705 5701 }
5706 5702 NDBG31(("lost disk for target%d, command:%x",
5707 5703 Tgt(cmd), pkt->pkt_cdbp[0]));
5708 5704 break;
5709 5705 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5710 5706 NDBG31(("data overrun: xferred=%d", xferred));
5711 5707 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5712 5708 pkt->pkt_reason = CMD_DATA_OVR;
5713 5709 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5714 5710 | STATE_SENT_CMD | STATE_GOT_STATUS
5715 5711 | STATE_XFERRED_DATA);
5716 5712 pkt->pkt_resid = 0;
5717 5713 break;
5718 5714 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5719 5715 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5720 5716 NDBG31(("data underrun: xferred=%d", xferred));
5721 5717 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5722 5718 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5723 5719 | STATE_SENT_CMD | STATE_GOT_STATUS);
5724 5720 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5725 5721 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5726 5722 pkt->pkt_state |= STATE_XFERRED_DATA;
5727 5723 }
5728 5724 break;
5729 5725 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5730 5726 if (cmd->cmd_active_expiration <= gethrtime()) {
5731 5727 /*
5732 5728 * When timeout requested, propagate
5733 5729 * proper reason and statistics to
5734 5730 * target drivers.
5735 5731 */
5736 5732 mptsas_set_pkt_reason(mpt, cmd, CMD_TIMEOUT,
5737 5733 STAT_BUS_RESET | STAT_TIMEOUT);
5738 5734 } else {
5739 5735 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
5740 5736 STAT_BUS_RESET);
5741 5737 }
5742 5738 break;
5743 5739 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5744 5740 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5745 5741 mptsas_set_pkt_reason(mpt,
5746 5742 cmd, CMD_RESET, STAT_DEV_RESET);
5747 5743 break;
5748 5744 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5749 5745 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5750 5746 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5751 5747 mptsas_set_pkt_reason(mpt,
5752 5748 cmd, CMD_TERMINATED, STAT_TERMINATED);
5753 5749 break;
5754 5750 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5755 5751 case MPI2_IOCSTATUS_BUSY:
5756 5752 /*
5757 5753 * set throttles to drain
5758 5754 */
5759 5755 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
5760 5756 ptgt = refhash_next(mpt->m_targets, ptgt)) {
5761 5757 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5762 5758 }
5763 5759
5764 5760 /*
5765 5761 * retry command
5766 5762 */
5767 5763 cmd->cmd_flags |= CFLAG_RETRY;
5768 5764 cmd->cmd_pkt_flags |= FLAG_HEAD;
5769 5765
5770 5766 (void) mptsas_accept_pkt(mpt, cmd);
5771 5767 break;
5772 5768 default:
5773 5769 mptsas_log(mpt, CE_WARN,
5774 5770 "unknown ioc_status = %x\n", ioc_status);
5775 5771 mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5776 5772 "count = %x, scsi_status = %x", scsi_state,
5777 5773 xferred, scsi_status);
5778 5774 break;
5779 5775 }
5780 5776 break;
5781 5777 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5782 5778 mptsas_handle_qfull(mpt, cmd);
5783 5779 break;
5784 5780 case MPI2_SCSI_STATUS_BUSY:
5785 5781 NDBG31(("scsi_status busy received"));
5786 5782 break;
5787 5783 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5788 5784 NDBG31(("scsi_status reservation conflict received"));
5789 5785 break;
5790 5786 default:
5791 5787 mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x\n",
5792 5788 scsi_status, ioc_status);
5793 5789 mptsas_log(mpt, CE_WARN,
5794 5790 "mptsas_process_intr: invalid scsi status\n");
5795 5791 break;
5796 5792 }
5797 5793 }
5798 5794
5799 5795 static void
5800 5796 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5801 5797 mptsas_cmd_t *cmd)
5802 5798 {
5803 5799 uint8_t task_type;
5804 5800 uint16_t ioc_status;
5805 5801 uint32_t log_info;
5806 5802 uint16_t dev_handle;
5807 5803 struct scsi_pkt *pkt = CMD2PKT(cmd);
5808 5804
5809 5805 task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5810 5806 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5811 5807 log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5812 5808 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5813 5809
5814 5810 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5815 5811 mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5816 5812 "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
5817 5813 task_type, ioc_status, log_info, dev_handle);
5818 5814 pkt->pkt_reason = CMD_INCOMPLETE;
5819 5815 return;
5820 5816 }
5821 5817
5822 5818 switch (task_type) {
5823 5819 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5824 5820 case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5825 5821 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5826 5822 case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5827 5823 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5828 5824 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5829 5825 break;
5830 5826 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5831 5827 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5832 5828 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5833 5829 /*
5834 5830 * Check for invalid DevHandle of 0 in case application
5835 5831 * sends bad command. DevHandle of 0 could cause problems.
5836 5832 */
5837 5833 if (dev_handle == 0) {
5838 5834 mptsas_log(mpt, CE_WARN, "!Can't flush target with"
5839 5835 " DevHandle of 0.");
5840 5836 } else {
5841 5837 mptsas_flush_target(mpt, dev_handle, Lun(cmd),
5842 5838 task_type);
5843 5839 }
5844 5840 break;
5845 5841 default:
5846 5842 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5847 5843 task_type);
5848 5844 mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5849 5845 break;
5850 5846 }
5851 5847 }
5852 5848
5853 5849 static void
5854 5850 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5855 5851 {
5856 5852 mptsas_t *mpt = arg->mpt;
5857 5853 uint64_t t = arg->t;
5858 5854 mptsas_cmd_t *cmd;
5859 5855 struct scsi_pkt *pkt;
5860 5856 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
5861 5857
5862 5858 mutex_enter(&item->mutex);
5863 5859 while (item->flag & MPTSAS_DONEQ_THREAD_ACTIVE) {
5864 5860 if (!item->doneq) {
5865 5861 cv_wait(&item->cv, &item->mutex);
5866 5862 }
5867 5863 pkt = NULL;
5868 5864 if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5869 5865 cmd->cmd_flags |= CFLAG_COMPLETED;
5870 5866 pkt = CMD2PKT(cmd);
5871 5867 }
5872 5868 mutex_exit(&item->mutex);
5873 5869 if (pkt) {
5874 5870 mptsas_pkt_comp(pkt, cmd);
5875 5871 }
5876 5872 mutex_enter(&item->mutex);
5877 5873 }
5878 5874 mutex_exit(&item->mutex);
5879 5875 mutex_enter(&mpt->m_doneq_mutex);
5880 5876 mpt->m_doneq_thread_n--;
5881 5877 cv_broadcast(&mpt->m_doneq_thread_cv);
5882 5878 mutex_exit(&mpt->m_doneq_mutex);
5883 5879 }
5884 5880
5885 5881
5886 5882 /*
5887 5883 * mpt interrupt handler.
5888 5884 */
5889 5885 static uint_t
5890 5886 mptsas_intr(caddr_t arg1, caddr_t arg2)
5891 5887 {
5892 5888 mptsas_t *mpt = (void *)arg1;
5893 5889 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
5894 5890 uchar_t did_reply = FALSE;
5895 5891
5896 5892 NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5897 5893
5898 5894 mutex_enter(&mpt->m_mutex);
5899 5895
5900 5896 /*
5901 5897 * If interrupts are shared by two channels then check whether this
5902 5898 * interrupt is genuinely for this channel by making sure first the
5903 5899 * chip is in high power state.
5904 5900 */
5905 5901 if ((mpt->m_options & MPTSAS_OPT_PM) &&
5906 5902 (mpt->m_power_level != PM_LEVEL_D0)) {
5907 5903 mutex_exit(&mpt->m_mutex);
5908 5904 return (DDI_INTR_UNCLAIMED);
5909 5905 }
5910 5906
5911 5907 /*
5912 5908 * If polling, interrupt was triggered by some shared interrupt because
5913 5909 * IOC interrupts are disabled during polling, so polling routine will
5914 5910 * handle any replies. Considering this, if polling is happening,
5915 5911 * return with interrupt unclaimed.
5916 5912 */
5917 5913 if (mpt->m_polled_intr) {
5918 5914 mutex_exit(&mpt->m_mutex);
5919 5915 mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt");
5920 5916 return (DDI_INTR_UNCLAIMED);
5921 5917 }
5922 5918
5923 5919 /*
5924 5920 * Read the istat register.
5925 5921 */
5926 5922 if ((INTPENDING(mpt)) != 0) {
5927 5923 /*
5928 5924 * read fifo until empty.
5929 5925 */
5930 5926 #ifndef __lock_lint
5931 5927 _NOTE(CONSTCOND)
5932 5928 #endif
5933 5929 while (TRUE) {
5934 5930 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5935 5931 DDI_DMA_SYNC_FORCPU);
5936 5932 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5937 5933 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5938 5934
5939 5935 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5940 5936 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5941 5937 ddi_get32(mpt->m_acc_post_queue_hdl,
5942 5938 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5943 5939 break;
5944 5940 }
5945 5941
5946 5942 /*
5947 5943 * The reply is valid, process it according to its
5948 5944 * type. Also, set a flag for updating the reply index
5949 5945 * after they've all been processed.
5950 5946 */
5951 5947 did_reply = TRUE;
5952 5948
5953 5949 mptsas_process_intr(mpt, reply_desc_union);
5954 5950
5955 5951 /*
5956 5952 * Increment post index and roll over if needed.
5957 5953 */
5958 5954 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5959 5955 mpt->m_post_index = 0;
5960 5956 }
5961 5957 }
5962 5958
5963 5959 /*
5964 5960 * Update the global reply index if at least one reply was
5965 5961 * processed.
5966 5962 */
5967 5963 if (did_reply) {
5968 5964 ddi_put32(mpt->m_datap,
5969 5965 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5970 5966 }
5971 5967 } else {
5972 5968 mutex_exit(&mpt->m_mutex);
5973 5969 return (DDI_INTR_UNCLAIMED);
5974 5970 }
5975 5971 NDBG1(("mptsas_intr complete"));
5976 5972
5977 5973 /*
5978 5974 * If no helper threads are created, process the doneq in ISR. If
5979 5975 * helpers are created, use the doneq length as a metric to measure the
5980 5976 * load on the interrupt CPU. If it is long enough, which indicates the
5981 5977 * load is heavy, then we deliver the IO completions to the helpers.
5982 5978 * This measurement has some limitations, although it is simple and
5983 5979 * straightforward and works well for most of the cases at present.
5984 5980 */
5985 5981 if (!mpt->m_doneq_thread_n ||
5986 5982 (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)) {
5987 5983 mptsas_doneq_empty(mpt);
5988 5984 } else {
5989 5985 mptsas_deliver_doneq_thread(mpt);
5990 5986 }
5991 5987
5992 5988 /*
5993 5989 * If there are queued cmd, start them now.
5994 5990 */
5995 5991 if (mpt->m_waitq != NULL) {
5996 5992 mptsas_restart_waitq(mpt);
5997 5993 }
5998 5994
5999 5995 mutex_exit(&mpt->m_mutex);
6000 5996 return (DDI_INTR_CLAIMED);
6001 5997 }
6002 5998
6003 5999 static void
6004 6000 mptsas_process_intr(mptsas_t *mpt,
6005 6001 pMpi2ReplyDescriptorsUnion_t reply_desc_union)
6006 6002 {
6007 6003 uint8_t reply_type;
6008 6004
6009 6005 ASSERT(mutex_owned(&mpt->m_mutex));
6010 6006
6011 6007 /*
6012 6008 * The reply is valid, process it according to its
6013 6009 * type. Also, set a flag for updated the reply index
6014 6010 * after they've all been processed.
6015 6011 */
6016 6012 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
6017 6013 &reply_desc_union->Default.ReplyFlags);
6018 6014 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
6019 6015 if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
6020 6016 reply_type == MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS) {
6021 6017 mptsas_handle_scsi_io_success(mpt, reply_desc_union);
6022 6018 } else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
6023 6019 mptsas_handle_address_reply(mpt, reply_desc_union);
6024 6020 } else {
6025 6021 mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
6026 6022 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
6027 6023 }
6028 6024
6029 6025 /*
6030 6026 * Clear the reply descriptor for re-use and increment
6031 6027 * index.
6032 6028 */
6033 6029 ddi_put64(mpt->m_acc_post_queue_hdl,
6034 6030 &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
6035 6031 0xFFFFFFFFFFFFFFFF);
6036 6032 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
6037 6033 DDI_DMA_SYNC_FORDEV);
6038 6034 }
6039 6035
6040 6036 /*
6041 6037 * handle qfull condition
6042 6038 */
6043 6039 static void
6044 6040 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
6045 6041 {
6046 6042 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
6047 6043
6048 6044 if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
6049 6045 (ptgt->m_qfull_retries == 0)) {
6050 6046 /*
6051 6047 * We have exhausted the retries on QFULL, or,
6052 6048 * the target driver has indicated that it
6053 6049 * wants to handle QFULL itself by setting
6054 6050 * qfull-retries capability to 0. In either case
6055 6051 * we want the target driver's QFULL handling
6056 6052 * to kick in. We do this by having pkt_reason
6057 6053 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
6058 6054 */
6059 6055 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
6060 6056 } else {
6061 6057 if (ptgt->m_reset_delay == 0) {
6062 6058 ptgt->m_t_throttle =
6063 6059 max((ptgt->m_t_ncmds - 2), 0);
6064 6060 }
6065 6061
6066 6062 cmd->cmd_pkt_flags |= FLAG_HEAD;
6067 6063 cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
6068 6064 cmd->cmd_flags |= CFLAG_RETRY;
6069 6065
6070 6066 (void) mptsas_accept_pkt(mpt, cmd);
6071 6067
6072 6068 /*
6073 6069 * when target gives queue full status with no commands
6074 6070 * outstanding (m_t_ncmds == 0), throttle is set to 0
6075 6071 * (HOLD_THROTTLE), and the queue full handling start
6076 6072 * (see psarc/1994/313); if there are commands outstanding,
6077 6073 * throttle is set to (m_t_ncmds - 2)
6078 6074 */
6079 6075 if (ptgt->m_t_throttle == HOLD_THROTTLE) {
6080 6076 /*
6081 6077 * By setting throttle to QFULL_THROTTLE, we
6082 6078 * avoid submitting new commands and in
6083 6079 * mptsas_restart_cmd find out slots which need
6084 6080 * their throttles to be cleared.
6085 6081 */
6086 6082 mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
6087 6083 if (mpt->m_restart_cmd_timeid == 0) {
6088 6084 mpt->m_restart_cmd_timeid =
6089 6085 timeout(mptsas_restart_cmd, mpt,
6090 6086 ptgt->m_qfull_retry_interval);
6091 6087 }
6092 6088 }
6093 6089 }
6094 6090 }
6095 6091
6096 6092 mptsas_phymask_t
6097 6093 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
6098 6094 {
6099 6095 mptsas_phymask_t phy_mask = 0;
6100 6096 uint8_t i = 0;
6101 6097
6102 6098 NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
6103 6099
6104 6100 ASSERT(mutex_owned(&mpt->m_mutex));
6105 6101
6106 6102 /*
6107 6103 * If physport is 0xFF, this is a RAID volume. Use phymask of 0.
6108 6104 */
6109 6105 if (physport == 0xFF) {
6110 6106 return (0);
6111 6107 }
6112 6108
6113 6109 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
6114 6110 if (mpt->m_phy_info[i].attached_devhdl &&
6115 6111 (mpt->m_phy_info[i].phy_mask != 0) &&
6116 6112 (mpt->m_phy_info[i].port_num == physport)) {
6117 6113 phy_mask = mpt->m_phy_info[i].phy_mask;
6118 6114 break;
6119 6115 }
6120 6116 }
6121 6117 NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
6122 6118 mpt->m_instance, physport, phy_mask));
6123 6119 return (phy_mask);
6124 6120 }
6125 6121
6126 6122 /*
6127 6123 * mpt free device handle after device gone, by use of passthrough
6128 6124 */
6129 6125 static int
6130 6126 mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl)
6131 6127 {
6132 6128 Mpi2SasIoUnitControlRequest_t req;
6133 6129 Mpi2SasIoUnitControlReply_t rep;
6134 6130 int ret;
6135 6131
6136 6132 ASSERT(mutex_owned(&mpt->m_mutex));
6137 6133
6138 6134 /*
6139 6135 * Need to compose a SAS IO Unit Control request message
6140 6136 * and call mptsas_do_passthru() function
6141 6137 */
6142 6138 bzero(&req, sizeof (req));
6143 6139 bzero(&rep, sizeof (rep));
6144 6140
6145 6141 req.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
6146 6142 req.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
6147 6143 req.DevHandle = LE_16(devhdl);
6148 6144
6149 6145 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
6150 6146 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
6151 6147 if (ret != 0) {
6152 6148 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
6153 6149 "Control error %d", ret);
6154 6150 return (DDI_FAILURE);
6155 6151 }
6156 6152
6157 6153 /* do passthrough success, check the ioc status */
6158 6154 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
6159 6155 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
6160 6156 "Control IOCStatus %d", LE_16(rep.IOCStatus));
6161 6157 return (DDI_FAILURE);
6162 6158 }
6163 6159
6164 6160 return (DDI_SUCCESS);
6165 6161 }
6166 6162
6167 6163 /*
6168 6164 * We have a SATA target that has changed, which means the "bridge-port"
6169 6165 * property must be updated to reflect the SAS WWN of the new attachment point.
6170 6166 * This may change if a SATA device changes which bay, and therefore phy, it is
6171 6167 * plugged into. This SATA device may be a multipath virtual device or may be a
6172 6168 * physical device. We have to handle both cases.
6173 6169 */
6174 6170 static boolean_t
6175 6171 mptsas_update_sata_bridge(mptsas_t *mpt, dev_info_t *parent,
6176 6172 mptsas_target_t *ptgt)
6177 6173 {
6178 6174 int rval;
6179 6175 uint16_t dev_hdl;
6180 6176 uint16_t pdev_hdl;
6181 6177 uint64_t dev_sas_wwn;
6182 6178 uint8_t physport;
6183 6179 uint8_t phy_id;
6184 6180 uint32_t page_address;
6185 6181 uint16_t bay_num, enclosure, io_flags;
6186 6182 uint32_t dev_info;
6187 6183 char uabuf[SCSI_WWN_BUFLEN];
6188 6184 dev_info_t *dip;
6189 6185 mdi_pathinfo_t *pip;
6190 6186
6191 6187 mutex_enter(&mpt->m_mutex);
6192 6188 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
6193 6189 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)ptgt->m_devhdl;
6194 6190 rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
6195 6191 &dev_sas_wwn, &dev_info, &physport, &phy_id, &pdev_hdl, &bay_num,
6196 6192 &enclosure, &io_flags);
6197 6193 mutex_exit(&mpt->m_mutex);
6198 6194 if (rval != DDI_SUCCESS) {
6199 6195 mptsas_log(mpt, CE_WARN, "unable to get SAS page 0 for "
6200 6196 "handle %d", page_address);
6201 6197 return (B_FALSE);
6202 6198 }
6203 6199
6204 6200 if (scsi_wwn_to_wwnstr(dev_sas_wwn, 1, uabuf) == NULL) {
6205 6201 mptsas_log(mpt, CE_WARN,
6206 6202 "mptsas unable to format SATA bridge WWN");
6207 6203 return (B_FALSE);
6208 6204 }
6209 6205
6210 6206 if (mpt->m_mpxio_enable == TRUE && (pip = mptsas_find_path_addr(parent,
6211 6207 ptgt->m_addr.mta_wwn, 0)) != NULL) {
6212 6208 if (mdi_prop_update_string(pip, SCSI_ADDR_PROP_BRIDGE_PORT,
6213 6209 uabuf) != DDI_SUCCESS) {
6214 6210 mptsas_log(mpt, CE_WARN,
6215 6211 "mptsas unable to create SCSI bridge port "
6216 6212 "property for SATA device");
6217 6213 return (B_FALSE);
6218 6214 }
6219 6215 return (B_TRUE);
6220 6216 }
6221 6217
6222 6218 if ((dip = mptsas_find_child_addr(parent, ptgt->m_addr.mta_wwn,
6223 6219 0)) != NULL) {
6224 6220 if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
6225 6221 SCSI_ADDR_PROP_BRIDGE_PORT, uabuf) != DDI_PROP_SUCCESS) {
6226 6222 mptsas_log(mpt, CE_WARN,
6227 6223 "mptsas unable to create SCSI bridge port "
6228 6224 "property for SATA device");
6229 6225 return (B_FALSE);
6230 6226 }
6231 6227 return (B_TRUE);
6232 6228 }
6233 6229
6234 6230 mptsas_log(mpt, CE_WARN, "mptsas failed to find dev_info_t or "
6235 6231 "mdi_pathinfo_t for target with WWN %016" PRIx64,
6236 6232 ptgt->m_addr.mta_wwn);
6237 6233
6238 6234 return (B_FALSE);
6239 6235 }
6240 6236
6241 6237 static void
6242 6238 mptsas_update_phymask(mptsas_t *mpt)
6243 6239 {
6244 6240 mptsas_phymask_t mask = 0, phy_mask;
6245 6241 char *phy_mask_name;
6246 6242 uint8_t current_port;
6247 6243 int i, j;
6248 6244
6249 6245 NDBG20(("mptsas%d update phymask ", mpt->m_instance));
6250 6246
6251 6247 ASSERT(mutex_owned(&mpt->m_mutex));
6252 6248
6253 6249 (void) mptsas_get_sas_io_unit_page(mpt);
6254 6250
6255 6251 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6256 6252
6257 6253 for (i = 0; i < mpt->m_num_phys; i++) {
6258 6254 phy_mask = 0x00;
6259 6255
6260 6256 if (mpt->m_phy_info[i].attached_devhdl == 0)
6261 6257 continue;
6262 6258
6263 6259 bzero(phy_mask_name, sizeof (phy_mask_name));
6264 6260
6265 6261 current_port = mpt->m_phy_info[i].port_num;
6266 6262
6267 6263 if ((mask & (1 << i)) != 0)
6268 6264 continue;
6269 6265
6270 6266 for (j = 0; j < mpt->m_num_phys; j++) {
6271 6267 if (mpt->m_phy_info[j].attached_devhdl &&
6272 6268 (mpt->m_phy_info[j].port_num == current_port)) {
6273 6269 phy_mask |= (1 << j);
6274 6270 }
6275 6271 }
6276 6272 mask = mask | phy_mask;
6277 6273
6278 6274 for (j = 0; j < mpt->m_num_phys; j++) {
6279 6275 if ((phy_mask >> j) & 0x01) {
6280 6276 mpt->m_phy_info[j].phy_mask = phy_mask;
6281 6277 }
6282 6278 }
6283 6279
6284 6280 (void) sprintf(phy_mask_name, "%x", phy_mask);
6285 6281
6286 6282 mutex_exit(&mpt->m_mutex);
6287 6283 /*
6288 6284 * register a iport, if the port has already been existed
6289 6285 * SCSA will do nothing and just return.
6290 6286 */
6291 6287 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
6292 6288 mutex_enter(&mpt->m_mutex);
6293 6289 }
6294 6290 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6295 6291 NDBG20(("mptsas%d update phymask return", mpt->m_instance));
6296 6292 }
6297 6293
6298 6294 /*
6299 6295 * mptsas_handle_dr is a task handler for DR, the DR action includes:
6300 6296 * 1. Directly attched Device Added/Removed.
6301 6297 * 2. Expander Device Added/Removed.
6302 6298 * 3. Indirectly Attached Device Added/Expander.
6303 6299 * 4. LUNs of a existing device status change.
6304 6300 * 5. RAID volume created/deleted.
6305 6301 * 6. Member of RAID volume is released because of RAID deletion.
6306 6302 * 7. Physical disks are removed because of RAID creation.
6307 6303 */
6308 6304 static void
6309 6305 mptsas_handle_dr(void *args)
6310 6306 {
6311 6307 mptsas_topo_change_list_t *topo_node = NULL;
6312 6308 mptsas_topo_change_list_t *save_node = NULL;
6313 6309 mptsas_t *mpt;
6314 6310 dev_info_t *parent = NULL;
6315 6311 mptsas_phymask_t phymask = 0;
6316 6312 char *phy_mask_name;
6317 6313 uint8_t flags = 0, physport = 0xff;
6318 6314 uint8_t port_update = 0;
6319 6315 uint_t event;
6320 6316
6321 6317 topo_node = (mptsas_topo_change_list_t *)args;
6322 6318
6323 6319 mpt = topo_node->mpt;
6324 6320 event = topo_node->event;
6325 6321 flags = topo_node->flags;
6326 6322
6327 6323 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6328 6324
6329 6325 NDBG20(("mptsas%d handle_dr enter", mpt->m_instance));
6330 6326
6331 6327 switch (event) {
6332 6328 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6333 6329 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6334 6330 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE) ||
6335 6331 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6336 6332 /*
6337 6333 * Direct attached or expander attached device added
6338 6334 * into system or a Phys Disk that is being unhidden.
6339 6335 */
6340 6336 port_update = 1;
6341 6337 }
6342 6338 break;
6343 6339 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6344 6340 /*
6345 6341 * New expander added into system, it must be the head
6346 6342 * of topo_change_list_t
6347 6343 */
6348 6344 port_update = 1;
6349 6345 break;
6350 6346 default:
6351 6347 port_update = 0;
6352 6348 break;
6353 6349 }
6354 6350 /*
6355 6351 * All cases port_update == 1 may cause initiator port form change
6356 6352 */
6357 6353 mutex_enter(&mpt->m_mutex);
6358 6354 if (mpt->m_port_chng && port_update) {
6359 6355 /*
6360 6356 * mpt->m_port_chng flag indicates some PHYs of initiator
6361 6357 * port have changed to online. So when expander added or
6362 6358 * directly attached device online event come, we force to
6363 6359 * update port information by issueing SAS IO Unit Page and
6364 6360 * update PHYMASKs.
6365 6361 */
6366 6362 (void) mptsas_update_phymask(mpt);
6367 6363 mpt->m_port_chng = 0;
6368 6364
6369 6365 }
6370 6366 mutex_exit(&mpt->m_mutex);
6371 6367 while (topo_node) {
6372 6368 phymask = 0;
6373 6369 if (parent == NULL) {
6374 6370 physport = topo_node->un.physport;
6375 6371 event = topo_node->event;
6376 6372 flags = topo_node->flags;
6377 6373 if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET |
6378 6374 MPTSAS_DR_EVENT_OFFLINE_SMP)) {
6379 6375 /*
6380 6376 * For all offline events, phymask is known
6381 6377 */
6382 6378 phymask = topo_node->un.phymask;
6383 6379 goto find_parent;
6384 6380 }
6385 6381 if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6386 6382 goto handle_topo_change;
6387 6383 }
6388 6384 if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) {
6389 6385 phymask = topo_node->un.phymask;
6390 6386 goto find_parent;
6391 6387 }
6392 6388
6393 6389 if ((flags ==
6394 6390 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) &&
6395 6391 (event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) {
6396 6392 /*
6397 6393 * There is no any field in IR_CONFIG_CHANGE
6398 6394 * event indicate physport/phynum, let's get
6399 6395 * parent after SAS Device Page0 request.
6400 6396 */
6401 6397 goto handle_topo_change;
6402 6398 }
6403 6399
6404 6400 mutex_enter(&mpt->m_mutex);
6405 6401 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6406 6402 /*
6407 6403 * If the direct attached device added or a
6408 6404 * phys disk is being unhidden, argument
6409 6405 * physport actually is PHY#, so we have to get
6410 6406 * phymask according PHY#.
6411 6407 */
6412 6408 physport = mpt->m_phy_info[physport].port_num;
6413 6409 }
6414 6410
6415 6411 /*
6416 6412 * Translate physport to phymask so that we can search
6417 6413 * parent dip.
6418 6414 */
6419 6415 phymask = mptsas_physport_to_phymask(mpt,
6420 6416 physport);
6421 6417 mutex_exit(&mpt->m_mutex);
6422 6418
6423 6419 find_parent:
6424 6420 bzero(phy_mask_name, MPTSAS_MAX_PHYS);
6425 6421 /*
6426 6422 * For RAID topology change node, write the iport name
6427 6423 * as v0.
6428 6424 */
6429 6425 if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6430 6426 (void) sprintf(phy_mask_name, "v0");
6431 6427 } else {
6432 6428 /*
6433 6429 * phymask can bo 0 if the drive has been
6434 6430 * pulled by the time an add event is
6435 6431 * processed. If phymask is 0, just skip this
6436 6432 * event and continue.
6437 6433 */
6438 6434 if (phymask == 0) {
6439 6435 mutex_enter(&mpt->m_mutex);
6440 6436 save_node = topo_node;
6441 6437 topo_node = topo_node->next;
6442 6438 ASSERT(save_node);
6443 6439 kmem_free(save_node,
6444 6440 sizeof (mptsas_topo_change_list_t));
6445 6441 mutex_exit(&mpt->m_mutex);
6446 6442
6447 6443 parent = NULL;
6448 6444 continue;
6449 6445 }
6450 6446 (void) sprintf(phy_mask_name, "%x", phymask);
6451 6447 }
6452 6448 parent = scsi_hba_iport_find(mpt->m_dip,
6453 6449 phy_mask_name);
6454 6450 if (parent == NULL) {
6455 6451 mptsas_log(mpt, CE_WARN, "Failed to find an "
6456 6452 "iport, should not happen!");
6457 6453 goto out;
6458 6454 }
6459 6455
6460 6456 }
6461 6457 ASSERT(parent);
6462 6458 handle_topo_change:
6463 6459
6464 6460 mutex_enter(&mpt->m_mutex);
6465 6461 /*
6466 6462 * If HBA is being reset, don't perform operations depending
6467 6463 * on the IOC. We must free the topo list, however.
6468 6464 */
6469 6465 if (!mpt->m_in_reset) {
6470 6466 mptsas_handle_topo_change(topo_node, parent);
6471 6467 } else {
6472 6468 NDBG20(("skipping topo change received during reset"));
6473 6469 }
6474 6470 save_node = topo_node;
6475 6471 topo_node = topo_node->next;
6476 6472 ASSERT(save_node);
6477 6473 kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
6478 6474 mutex_exit(&mpt->m_mutex);
6479 6475
6480 6476 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6481 6477 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
6482 6478 (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
6483 6479 /*
6484 6480 * If direct attached device associated, make sure
6485 6481 * reset the parent before start the next one. But
6486 6482 * all devices associated with expander shares the
6487 6483 * parent. Also, reset parent if this is for RAID.
6488 6484 */
6489 6485 parent = NULL;
6490 6486 }
6491 6487 }
6492 6488 out:
6493 6489 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6494 6490 }
6495 6491
6496 6492 static void
6497 6493 mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
6498 6494 dev_info_t *parent)
6499 6495 {
6500 6496 mptsas_target_t *ptgt = NULL;
6501 6497 mptsas_smp_t *psmp = NULL;
6502 6498 mptsas_t *mpt = (void *)topo_node->mpt;
6503 6499 uint16_t devhdl;
6504 6500 uint16_t attached_devhdl;
6505 6501 uint64_t sas_wwn = 0;
6506 6502 int rval = 0;
6507 6503 uint32_t page_address;
6508 6504 uint8_t phy, flags;
6509 6505 char *addr = NULL;
6510 6506 dev_info_t *lundip;
6511 6507 int circ = 0, circ1 = 0;
6512 6508 char attached_wwnstr[MPTSAS_WWN_STRLEN];
6513 6509
6514 6510 NDBG20(("mptsas%d handle_topo_change enter, devhdl 0x%x,"
6515 6511 "event 0x%x, flags 0x%x", mpt->m_instance, topo_node->devhdl,
6516 6512 topo_node->event, topo_node->flags));
6517 6513
6518 6514 ASSERT(mutex_owned(&mpt->m_mutex));
6519 6515
6520 6516 switch (topo_node->event) {
6521 6517 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6522 6518 {
6523 6519 char *phy_mask_name;
6524 6520 mptsas_phymask_t phymask = 0;
6525 6521
6526 6522 if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6527 6523 /*
6528 6524 * Get latest RAID info.
6529 6525 */
6530 6526 (void) mptsas_get_raid_info(mpt);
6531 6527 ptgt = refhash_linear_search(mpt->m_targets,
6532 6528 mptsas_target_eval_devhdl, &topo_node->devhdl);
6533 6529 if (ptgt == NULL)
6534 6530 break;
6535 6531 } else {
6536 6532 ptgt = (void *)topo_node->object;
6537 6533 }
6538 6534
6539 6535 if (ptgt == NULL) {
6540 6536 /*
6541 6537 * If a Phys Disk was deleted, RAID info needs to be
6542 6538 * updated to reflect the new topology.
6543 6539 */
6544 6540 (void) mptsas_get_raid_info(mpt);
6545 6541
6546 6542 /*
6547 6543 * Get sas device page 0 by DevHandle to make sure if
6548 6544 * SSP/SATA end device exist.
6549 6545 */
6550 6546 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
6551 6547 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
6552 6548 topo_node->devhdl;
6553 6549
6554 6550 rval = mptsas_get_target_device_info(mpt, page_address,
6555 6551 &devhdl, &ptgt);
6556 6552 if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
6557 6553 mptsas_log(mpt, CE_NOTE,
6558 6554 "mptsas_handle_topo_change: target %d is "
6559 6555 "not a SAS/SATA device. \n",
6560 6556 topo_node->devhdl);
6561 6557 } else if (rval == DEV_INFO_FAIL_ALLOC) {
6562 6558 mptsas_log(mpt, CE_NOTE,
6563 6559 "mptsas_handle_topo_change: could not "
6564 6560 "allocate memory. \n");
6565 6561 } else if (rval == DEV_INFO_FAIL_GUID) {
6566 6562 mptsas_log(mpt, CE_NOTE,
6567 6563 "mptsas_handle_topo_change: could not "
6568 6564 "get SATA GUID for target %d. \n",
6569 6565 topo_node->devhdl);
6570 6566 }
6571 6567 /*
6572 6568 * If rval is DEV_INFO_PHYS_DISK or indicates failure
6573 6569 * then there is nothing else to do, just leave.
6574 6570 */
6575 6571 if (rval != DEV_INFO_SUCCESS) {
6576 6572 return;
6577 6573 }
6578 6574 }
6579 6575
6580 6576 ASSERT(ptgt->m_devhdl == topo_node->devhdl);
6581 6577
6582 6578 mutex_exit(&mpt->m_mutex);
6583 6579 flags = topo_node->flags;
6584 6580
6585 6581 if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
6586 6582 phymask = ptgt->m_addr.mta_phymask;
6587 6583 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6588 6584 (void) sprintf(phy_mask_name, "%x", phymask);
6589 6585 parent = scsi_hba_iport_find(mpt->m_dip,
6590 6586 phy_mask_name);
6591 6587 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6592 6588 if (parent == NULL) {
6593 6589 mptsas_log(mpt, CE_WARN, "Failed to find a "
6594 6590 "iport for PD, should not happen!");
6595 6591 mutex_enter(&mpt->m_mutex);
6596 6592 break;
6597 6593 }
6598 6594 }
6599 6595
6600 6596 if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6601 6597 ndi_devi_enter(parent, &circ1);
6602 6598 (void) mptsas_config_raid(parent, topo_node->devhdl,
6603 6599 &lundip);
6604 6600 ndi_devi_exit(parent, circ1);
6605 6601 } else {
6606 6602 /*
6607 6603 * hold nexus for bus configure
6608 6604 */
6609 6605 ndi_devi_enter(scsi_vhci_dip, &circ);
6610 6606 ndi_devi_enter(parent, &circ1);
6611 6607 rval = mptsas_config_target(parent, ptgt);
6612 6608 /*
6613 6609 * release nexus for bus configure
6614 6610 */
6615 6611 ndi_devi_exit(parent, circ1);
6616 6612 ndi_devi_exit(scsi_vhci_dip, circ);
6617 6613
6618 6614 /*
6619 6615 * If this is a SATA device, make sure that the
6620 6616 * bridge-port (the SAS WWN that the SATA device is
6621 6617 * plugged into) is updated. This may change if a SATA
6622 6618 * device changes which bay, and therefore phy, it is
6623 6619 * plugged into.
6624 6620 */
6625 6621 if (IS_SATA_DEVICE(ptgt->m_deviceinfo)) {
6626 6622 if (!mptsas_update_sata_bridge(mpt, parent,
6627 6623 ptgt)) {
6628 6624 mutex_enter(&mpt->m_mutex);
6629 6625 return;
6630 6626 }
6631 6627 }
6632 6628
6633 6629 /*
6634 6630 * Add parent's props for SMHBA support
6635 6631 */
6636 6632 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6637 6633 bzero(attached_wwnstr,
6638 6634 sizeof (attached_wwnstr));
6639 6635 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
6640 6636 ptgt->m_addr.mta_wwn);
6641 6637 if (ddi_prop_update_string(DDI_DEV_T_NONE,
6642 6638 parent,
6643 6639 SCSI_ADDR_PROP_ATTACHED_PORT,
6644 6640 attached_wwnstr)
6645 6641 != DDI_PROP_SUCCESS) {
6646 6642 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6647 6643 parent,
6648 6644 SCSI_ADDR_PROP_ATTACHED_PORT);
6649 6645 mptsas_log(mpt, CE_WARN, "Failed to"
6650 6646 "attached-port props");
6651 6647 mutex_enter(&mpt->m_mutex);
6652 6648 return;
6653 6649 }
6654 6650 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6655 6651 MPTSAS_NUM_PHYS, 1) !=
6656 6652 DDI_PROP_SUCCESS) {
6657 6653 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6658 6654 parent, MPTSAS_NUM_PHYS);
6659 6655 mptsas_log(mpt, CE_WARN, "Failed to"
6660 6656 " create num-phys props");
6661 6657 mutex_enter(&mpt->m_mutex);
6662 6658 return;
6663 6659 }
6664 6660
6665 6661 /*
6666 6662 * Update PHY info for smhba
6667 6663 */
6668 6664 mutex_enter(&mpt->m_mutex);
6669 6665 if (mptsas_smhba_phy_init(mpt)) {
6670 6666 mptsas_log(mpt, CE_WARN, "mptsas phy"
6671 6667 " update failed");
6672 6668 return;
6673 6669 }
6674 6670 mutex_exit(&mpt->m_mutex);
6675 6671
6676 6672 /*
6677 6673 * topo_node->un.physport is really the PHY#
6678 6674 * for direct attached devices
6679 6675 */
6680 6676 mptsas_smhba_set_one_phy_props(mpt, parent,
6681 6677 topo_node->un.physport, &attached_devhdl);
6682 6678
6683 6679 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6684 6680 MPTSAS_VIRTUAL_PORT, 0) !=
6685 6681 DDI_PROP_SUCCESS) {
6686 6682 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6687 6683 parent, MPTSAS_VIRTUAL_PORT);
6688 6684 mptsas_log(mpt, CE_WARN,
6689 6685 "mptsas virtual-port"
6690 6686 "port prop update failed");
6691 6687 mutex_enter(&mpt->m_mutex);
6692 6688 return;
6693 6689 }
6694 6690 }
6695 6691 }
6696 6692 mutex_enter(&mpt->m_mutex);
6697 6693
6698 6694 NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
6699 6695 "phymask:%x.", mpt->m_instance, ptgt->m_devhdl,
6700 6696 ptgt->m_addr.mta_phymask));
6701 6697 break;
6702 6698 }
6703 6699 case MPTSAS_DR_EVENT_OFFLINE_TARGET:
6704 6700 {
6705 6701 devhdl = topo_node->devhdl;
6706 6702 ptgt = refhash_linear_search(mpt->m_targets,
6707 6703 mptsas_target_eval_devhdl, &devhdl);
6708 6704 if (ptgt == NULL)
6709 6705 break;
6710 6706
6711 6707 sas_wwn = ptgt->m_addr.mta_wwn;
6712 6708 phy = ptgt->m_phynum;
6713 6709
6714 6710 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
6715 6711
6716 6712 if (sas_wwn) {
6717 6713 (void) sprintf(addr, "w%016"PRIx64, sas_wwn);
6718 6714 } else {
6719 6715 (void) sprintf(addr, "p%x", phy);
6720 6716 }
6721 6717 ASSERT(ptgt->m_devhdl == devhdl);
6722 6718
6723 6719 if ((topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) ||
6724 6720 (topo_node->flags ==
6725 6721 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6726 6722 /*
6727 6723 * Get latest RAID info if RAID volume status changes
6728 6724 * or Phys Disk status changes
6729 6725 */
6730 6726 (void) mptsas_get_raid_info(mpt);
6731 6727 }
6732 6728 /*
6733 6729 * Abort all outstanding command on the device
6734 6730 */
6735 6731 rval = mptsas_do_scsi_reset(mpt, devhdl);
6736 6732 if (rval) {
6737 6733 NDBG20(("mptsas%d handle_topo_change to reset target "
6738 6734 "before offline devhdl:%x, phymask:%x, rval:%x",
6739 6735 mpt->m_instance, ptgt->m_devhdl,
6740 6736 ptgt->m_addr.mta_phymask, rval));
6741 6737 }
6742 6738
6743 6739 mutex_exit(&mpt->m_mutex);
6744 6740
6745 6741 ndi_devi_enter(scsi_vhci_dip, &circ);
6746 6742 ndi_devi_enter(parent, &circ1);
6747 6743 rval = mptsas_offline_target(parent, addr);
6748 6744 ndi_devi_exit(parent, circ1);
6749 6745 ndi_devi_exit(scsi_vhci_dip, circ);
6750 6746 NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
6751 6747 "phymask:%x, rval:%x", mpt->m_instance,
6752 6748 ptgt->m_devhdl, ptgt->m_addr.mta_phymask, rval));
6753 6749
6754 6750 kmem_free(addr, SCSI_MAXNAMELEN);
6755 6751
6756 6752 /*
6757 6753 * Clear parent's props for SMHBA support
6758 6754 */
6759 6755 flags = topo_node->flags;
6760 6756 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6761 6757 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6762 6758 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6763 6759 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6764 6760 DDI_PROP_SUCCESS) {
6765 6761 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6766 6762 SCSI_ADDR_PROP_ATTACHED_PORT);
6767 6763 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6768 6764 "prop update failed");
6769 6765 mutex_enter(&mpt->m_mutex);
6770 6766 break;
6771 6767 }
6772 6768 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6773 6769 MPTSAS_NUM_PHYS, 0) !=
6774 6770 DDI_PROP_SUCCESS) {
6775 6771 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6776 6772 MPTSAS_NUM_PHYS);
6777 6773 mptsas_log(mpt, CE_WARN, "mptsas num phys "
6778 6774 "prop update failed");
6779 6775 mutex_enter(&mpt->m_mutex);
6780 6776 break;
6781 6777 }
6782 6778 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6783 6779 MPTSAS_VIRTUAL_PORT, 1) !=
6784 6780 DDI_PROP_SUCCESS) {
6785 6781 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6786 6782 MPTSAS_VIRTUAL_PORT);
6787 6783 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6788 6784 "prop update failed");
6789 6785 mutex_enter(&mpt->m_mutex);
6790 6786 break;
6791 6787 }
6792 6788 }
6793 6789
6794 6790 mutex_enter(&mpt->m_mutex);
6795 6791 if (rval == DDI_SUCCESS) {
6796 6792 refhash_remove(mpt->m_targets, ptgt);
6797 6793 ptgt = NULL;
6798 6794 } else {
6799 6795 /*
6800 6796 * clean DR_INTRANSITION flag to allow I/O down to
6801 6797 * PHCI driver since failover finished.
6802 6798 * Invalidate the devhdl
6803 6799 */
6804 6800 ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6805 6801 ptgt->m_tgt_unconfigured = 0;
6806 6802 mutex_enter(&mpt->m_tx_waitq_mutex);
6807 6803 ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6808 6804 mutex_exit(&mpt->m_tx_waitq_mutex);
6809 6805 }
6810 6806
6811 6807 /*
6812 6808 * Send SAS IO Unit Control to free the dev handle
6813 6809 */
6814 6810 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6815 6811 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
6816 6812 rval = mptsas_free_devhdl(mpt, devhdl);
6817 6813
6818 6814 NDBG20(("mptsas%d handle_topo_change to remove "
6819 6815 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6820 6816 rval));
6821 6817 }
6822 6818
6823 6819 break;
6824 6820 }
6825 6821 case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
6826 6822 {
6827 6823 devhdl = topo_node->devhdl;
6828 6824 /*
6829 6825 * If this is the remove handle event, do a reset first.
6830 6826 */
6831 6827 if (topo_node->event == MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6832 6828 rval = mptsas_do_scsi_reset(mpt, devhdl);
6833 6829 if (rval) {
6834 6830 NDBG20(("mpt%d reset target before remove "
6835 6831 "devhdl:%x, rval:%x", mpt->m_instance,
6836 6832 devhdl, rval));
6837 6833 }
6838 6834 }
6839 6835
6840 6836 /*
6841 6837 * Send SAS IO Unit Control to free the dev handle
6842 6838 */
6843 6839 rval = mptsas_free_devhdl(mpt, devhdl);
6844 6840 NDBG20(("mptsas%d handle_topo_change to remove "
6845 6841 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6846 6842 rval));
6847 6843 break;
6848 6844 }
6849 6845 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6850 6846 {
6851 6847 mptsas_smp_t smp;
6852 6848 dev_info_t *smpdip;
6853 6849
6854 6850 devhdl = topo_node->devhdl;
6855 6851
6856 6852 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
6857 6853 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)devhdl;
6858 6854 rval = mptsas_get_sas_expander_page0(mpt, page_address, &smp);
6859 6855 if (rval != DDI_SUCCESS) {
6860 6856 mptsas_log(mpt, CE_WARN, "failed to online smp, "
6861 6857 "handle %x", devhdl);
6862 6858 return;
6863 6859 }
6864 6860
6865 6861 psmp = mptsas_smp_alloc(mpt, &smp);
6866 6862 if (psmp == NULL) {
6867 6863 return;
6868 6864 }
6869 6865
6870 6866 mutex_exit(&mpt->m_mutex);
6871 6867 ndi_devi_enter(parent, &circ1);
6872 6868 (void) mptsas_online_smp(parent, psmp, &smpdip);
6873 6869 ndi_devi_exit(parent, circ1);
6874 6870
6875 6871 mutex_enter(&mpt->m_mutex);
6876 6872 break;
6877 6873 }
6878 6874 case MPTSAS_DR_EVENT_OFFLINE_SMP:
6879 6875 {
6880 6876 devhdl = topo_node->devhdl;
6881 6877 uint32_t dev_info;
6882 6878
6883 6879 psmp = refhash_linear_search(mpt->m_smp_targets,
↓ open down ↓ |
6467 lines elided |
↑ open up ↑ |
6884 6880 mptsas_smp_eval_devhdl, &devhdl);
6885 6881 if (psmp == NULL)
6886 6882 break;
6887 6883 /*
6888 6884 * The mptsas_smp_t data is released only if the dip is offlined
6889 6885 * successfully.
6890 6886 */
6891 6887 mutex_exit(&mpt->m_mutex);
6892 6888
6893 6889 ndi_devi_enter(parent, &circ1);
6894 - rval = mptsas_offline_smp(parent, psmp, NDI_DEVI_REMOVE);
6890 + rval = mptsas_offline_smp(parent, psmp);
6895 6891 ndi_devi_exit(parent, circ1);
6896 6892
6897 6893 dev_info = psmp->m_deviceinfo;
6898 6894 if ((dev_info & DEVINFO_DIRECT_ATTACHED) ==
6899 6895 DEVINFO_DIRECT_ATTACHED) {
6900 6896 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6901 6897 MPTSAS_VIRTUAL_PORT, 1) !=
6902 6898 DDI_PROP_SUCCESS) {
6903 6899 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6904 6900 MPTSAS_VIRTUAL_PORT);
6905 6901 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6906 6902 "prop update failed");
6907 6903 mutex_enter(&mpt->m_mutex);
6908 6904 return;
6909 6905 }
6910 6906 /*
6911 6907 * Check whether the smp connected to the iport,
6912 6908 */
6913 6909 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6914 6910 MPTSAS_NUM_PHYS, 0) !=
6915 6911 DDI_PROP_SUCCESS) {
6916 6912 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6917 6913 MPTSAS_NUM_PHYS);
6918 6914 mptsas_log(mpt, CE_WARN, "mptsas num phys"
6919 6915 "prop update failed");
6920 6916 mutex_enter(&mpt->m_mutex);
6921 6917 return;
6922 6918 }
6923 6919 /*
6924 6920 * Clear parent's attached-port props
6925 6921 */
6926 6922 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6927 6923 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6928 6924 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6929 6925 DDI_PROP_SUCCESS) {
6930 6926 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6931 6927 SCSI_ADDR_PROP_ATTACHED_PORT);
6932 6928 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6933 6929 "prop update failed");
6934 6930 mutex_enter(&mpt->m_mutex);
6935 6931 return;
6936 6932 }
6937 6933 }
6938 6934
6939 6935 mutex_enter(&mpt->m_mutex);
6940 6936 NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
6941 6937 "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
6942 6938 if (rval == DDI_SUCCESS) {
6943 6939 refhash_remove(mpt->m_smp_targets, psmp);
6944 6940 } else {
6945 6941 psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
6946 6942 }
6947 6943
6948 6944 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6949 6945
6950 6946 break;
6951 6947 }
6952 6948 default:
6953 6949 return;
6954 6950 }
6955 6951 }
6956 6952
6957 6953 /*
6958 6954 * Record the event if its type is enabled in mpt instance by ioctl.
6959 6955 */
6960 6956 static void
6961 6957 mptsas_record_event(void *args)
6962 6958 {
6963 6959 m_replyh_arg_t *replyh_arg;
6964 6960 pMpi2EventNotificationReply_t eventreply;
6965 6961 uint32_t event, rfm;
6966 6962 mptsas_t *mpt;
6967 6963 int i, j;
6968 6964 uint16_t event_data_len;
6969 6965 boolean_t sendAEN = FALSE;
6970 6966
6971 6967 replyh_arg = (m_replyh_arg_t *)args;
6972 6968 rfm = replyh_arg->rfm;
6973 6969 mpt = replyh_arg->mpt;
6974 6970
6975 6971 eventreply = (pMpi2EventNotificationReply_t)
6976 6972 (mpt->m_reply_frame + (rfm -
6977 6973 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
6978 6974 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6979 6975
6980 6976
6981 6977 /*
6982 6978 * Generate a system event to let anyone who cares know that a
6983 6979 * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the
6984 6980 * event mask is set to.
6985 6981 */
6986 6982 if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
6987 6983 sendAEN = TRUE;
6988 6984 }
6989 6985
6990 6986 /*
6991 6987 * Record the event only if it is not masked. Determine which dword
6992 6988 * and bit of event mask to test.
6993 6989 */
6994 6990 i = (uint8_t)(event / 32);
6995 6991 j = (uint8_t)(event % 32);
6996 6992 if ((i < 4) && ((1 << j) & mpt->m_event_mask[i])) {
6997 6993 i = mpt->m_event_index;
6998 6994 mpt->m_events[i].Type = event;
6999 6995 mpt->m_events[i].Number = ++mpt->m_event_number;
7000 6996 bzero(mpt->m_events[i].Data, MPTSAS_MAX_EVENT_DATA_LENGTH * 4);
7001 6997 event_data_len = ddi_get16(mpt->m_acc_reply_frame_hdl,
7002 6998 &eventreply->EventDataLength);
7003 6999
7004 7000 if (event_data_len > 0) {
7005 7001 /*
7006 7002 * Limit data to size in m_event entry
7007 7003 */
7008 7004 if (event_data_len > MPTSAS_MAX_EVENT_DATA_LENGTH) {
7009 7005 event_data_len = MPTSAS_MAX_EVENT_DATA_LENGTH;
7010 7006 }
7011 7007 for (j = 0; j < event_data_len; j++) {
7012 7008 mpt->m_events[i].Data[j] =
7013 7009 ddi_get32(mpt->m_acc_reply_frame_hdl,
7014 7010 &(eventreply->EventData[j]));
7015 7011 }
7016 7012
7017 7013 /*
7018 7014 * check for index wrap-around
7019 7015 */
7020 7016 if (++i == MPTSAS_EVENT_QUEUE_SIZE) {
7021 7017 i = 0;
7022 7018 }
7023 7019 mpt->m_event_index = (uint8_t)i;
7024 7020
7025 7021 /*
7026 7022 * Set flag to send the event.
7027 7023 */
7028 7024 sendAEN = TRUE;
7029 7025 }
7030 7026 }
7031 7027
7032 7028 /*
7033 7029 * Generate a system event if flag is set to let anyone who cares know
7034 7030 * that an event has occurred.
7035 7031 */
7036 7032 if (sendAEN) {
7037 7033 (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
7038 7034 "SAS", NULL, NULL, DDI_NOSLEEP);
7039 7035 }
7040 7036 }
7041 7037
7042 7038 #define SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS
7043 7039 /*
7044 7040 * handle sync events from ioc in interrupt
7045 7041 * return value:
7046 7042 * DDI_SUCCESS: The event is handled by this func
7047 7043 * DDI_FAILURE: Event is not handled
7048 7044 */
7049 7045 static int
7050 7046 mptsas_handle_event_sync(void *args)
7051 7047 {
7052 7048 m_replyh_arg_t *replyh_arg;
7053 7049 pMpi2EventNotificationReply_t eventreply;
7054 7050 uint32_t event, rfm;
7055 7051 mptsas_t *mpt;
7056 7052 uint_t iocstatus;
7057 7053
7058 7054 replyh_arg = (m_replyh_arg_t *)args;
7059 7055 rfm = replyh_arg->rfm;
7060 7056 mpt = replyh_arg->mpt;
7061 7057
7062 7058 ASSERT(mutex_owned(&mpt->m_mutex));
7063 7059
7064 7060 eventreply = (pMpi2EventNotificationReply_t)
7065 7061 (mpt->m_reply_frame + (rfm -
7066 7062 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
7067 7063 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7068 7064
7069 7065 if ((iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7070 7066 &eventreply->IOCStatus)) != 0) {
7071 7067 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7072 7068 mptsas_log(mpt, CE_WARN,
7073 7069 "!mptsas_handle_event_sync: event 0x%x, "
7074 7070 "IOCStatus=0x%x, "
7075 7071 "IOCLogInfo=0x%x", event, iocstatus,
7076 7072 ddi_get32(mpt->m_acc_reply_frame_hdl,
7077 7073 &eventreply->IOCLogInfo));
7078 7074 } else {
7079 7075 mptsas_log(mpt, CE_WARN,
7080 7076 "mptsas_handle_event_sync: event 0x%x, "
7081 7077 "IOCStatus=0x%x, "
7082 7078 "(IOCLogInfo=0x%x)", event, iocstatus,
7083 7079 ddi_get32(mpt->m_acc_reply_frame_hdl,
7084 7080 &eventreply->IOCLogInfo));
7085 7081 }
7086 7082 }
7087 7083
7088 7084 /*
7089 7085 * figure out what kind of event we got and handle accordingly
7090 7086 */
7091 7087 switch (event) {
7092 7088 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7093 7089 {
7094 7090 pMpi2EventDataSasTopologyChangeList_t sas_topo_change_list;
7095 7091 uint8_t num_entries, expstatus, phy;
7096 7092 uint8_t phystatus, physport, state, i;
7097 7093 uint8_t start_phy_num, link_rate;
7098 7094 uint16_t dev_handle, reason_code;
7099 7095 uint16_t enc_handle, expd_handle;
7100 7096 char string[80], curr[80], prev[80];
7101 7097 mptsas_topo_change_list_t *topo_head = NULL;
7102 7098 mptsas_topo_change_list_t *topo_tail = NULL;
7103 7099 mptsas_topo_change_list_t *topo_node = NULL;
7104 7100 mptsas_target_t *ptgt;
7105 7101 mptsas_smp_t *psmp;
7106 7102 uint8_t flags = 0, exp_flag;
7107 7103 smhba_info_t *pSmhba = NULL;
7108 7104
7109 7105 NDBG20(("mptsas_handle_event_sync: SAS topology change"));
7110 7106
7111 7107 sas_topo_change_list = (pMpi2EventDataSasTopologyChangeList_t)
7112 7108 eventreply->EventData;
7113 7109
7114 7110 enc_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7115 7111 &sas_topo_change_list->EnclosureHandle);
7116 7112 expd_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7117 7113 &sas_topo_change_list->ExpanderDevHandle);
7118 7114 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
7119 7115 &sas_topo_change_list->NumEntries);
7120 7116 start_phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
7121 7117 &sas_topo_change_list->StartPhyNum);
7122 7118 expstatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
7123 7119 &sas_topo_change_list->ExpStatus);
7124 7120 physport = ddi_get8(mpt->m_acc_reply_frame_hdl,
7125 7121 &sas_topo_change_list->PhysicalPort);
7126 7122
7127 7123 string[0] = 0;
7128 7124 if (expd_handle) {
7129 7125 flags = MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED;
7130 7126 switch (expstatus) {
7131 7127 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
7132 7128 (void) sprintf(string, " added");
7133 7129 /*
7134 7130 * New expander device added
7135 7131 */
7136 7132 mpt->m_port_chng = 1;
7137 7133 topo_node = kmem_zalloc(
7138 7134 sizeof (mptsas_topo_change_list_t),
7139 7135 KM_SLEEP);
7140 7136 topo_node->mpt = mpt;
7141 7137 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_SMP;
7142 7138 topo_node->un.physport = physport;
7143 7139 topo_node->devhdl = expd_handle;
7144 7140 topo_node->flags = flags;
7145 7141 topo_node->object = NULL;
7146 7142 if (topo_head == NULL) {
7147 7143 topo_head = topo_tail = topo_node;
7148 7144 } else {
7149 7145 topo_tail->next = topo_node;
7150 7146 topo_tail = topo_node;
7151 7147 }
7152 7148 break;
7153 7149 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
7154 7150 (void) sprintf(string, " not responding, "
7155 7151 "removed");
7156 7152 psmp = refhash_linear_search(mpt->m_smp_targets,
7157 7153 mptsas_smp_eval_devhdl, &expd_handle);
7158 7154 if (psmp == NULL)
7159 7155 break;
7160 7156
7161 7157 topo_node = kmem_zalloc(
7162 7158 sizeof (mptsas_topo_change_list_t),
7163 7159 KM_SLEEP);
7164 7160 topo_node->mpt = mpt;
7165 7161 topo_node->un.phymask =
7166 7162 psmp->m_addr.mta_phymask;
7167 7163 topo_node->event = MPTSAS_DR_EVENT_OFFLINE_SMP;
7168 7164 topo_node->devhdl = expd_handle;
7169 7165 topo_node->flags = flags;
7170 7166 topo_node->object = NULL;
7171 7167 if (topo_head == NULL) {
7172 7168 topo_head = topo_tail = topo_node;
7173 7169 } else {
7174 7170 topo_tail->next = topo_node;
7175 7171 topo_tail = topo_node;
7176 7172 }
7177 7173 break;
7178 7174 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
7179 7175 break;
7180 7176 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
7181 7177 (void) sprintf(string, " not responding, "
7182 7178 "delaying removal");
7183 7179 break;
7184 7180 default:
7185 7181 break;
7186 7182 }
7187 7183 } else {
7188 7184 flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
7189 7185 }
7190 7186
7191 7187 NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
7192 7188 enc_handle, expd_handle, string));
7193 7189 for (i = 0; i < num_entries; i++) {
7194 7190 phy = i + start_phy_num;
7195 7191 phystatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
7196 7192 &sas_topo_change_list->PHY[i].PhyStatus);
7197 7193 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7198 7194 &sas_topo_change_list->PHY[i].AttachedDevHandle);
7199 7195 reason_code = phystatus & MPI2_EVENT_SAS_TOPO_RC_MASK;
7200 7196 /*
7201 7197 * Filter out processing of Phy Vacant Status unless
7202 7198 * the reason code is "Not Responding". Process all
7203 7199 * other combinations of Phy Status and Reason Codes.
7204 7200 */
7205 7201 if ((phystatus &
7206 7202 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) &&
7207 7203 (reason_code !=
7208 7204 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
7209 7205 continue;
7210 7206 }
7211 7207 curr[0] = 0;
7212 7208 prev[0] = 0;
7213 7209 string[0] = 0;
7214 7210 switch (reason_code) {
7215 7211 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7216 7212 {
7217 7213 NDBG20(("mptsas%d phy %d physical_port %d "
7218 7214 "dev_handle %d added", mpt->m_instance, phy,
7219 7215 physport, dev_handle));
7220 7216 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
7221 7217 &sas_topo_change_list->PHY[i].LinkRate);
7222 7218 state = (link_rate &
7223 7219 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
7224 7220 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
7225 7221 switch (state) {
7226 7222 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7227 7223 (void) sprintf(curr, "is disabled");
7228 7224 break;
7229 7225 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7230 7226 (void) sprintf(curr, "is offline, "
7231 7227 "failed speed negotiation");
7232 7228 break;
7233 7229 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7234 7230 (void) sprintf(curr, "SATA OOB "
7235 7231 "complete");
7236 7232 break;
7237 7233 case SMP_RESET_IN_PROGRESS:
7238 7234 (void) sprintf(curr, "SMP reset in "
7239 7235 "progress");
7240 7236 break;
7241 7237 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7242 7238 (void) sprintf(curr, "is online at "
7243 7239 "1.5 Gbps");
7244 7240 break;
7245 7241 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7246 7242 (void) sprintf(curr, "is online at 3.0 "
7247 7243 "Gbps");
7248 7244 break;
7249 7245 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7250 7246 (void) sprintf(curr, "is online at 6.0 "
7251 7247 "Gbps");
7252 7248 break;
7253 7249 case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7254 7250 (void) sprintf(curr,
7255 7251 "is online at 12.0 Gbps");
7256 7252 break;
7257 7253 default:
7258 7254 (void) sprintf(curr, "state is "
7259 7255 "unknown");
7260 7256 break;
7261 7257 }
7262 7258 /*
7263 7259 * New target device added into the system.
7264 7260 * Set association flag according to if an
7265 7261 * expander is used or not.
7266 7262 */
7267 7263 exp_flag =
7268 7264 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
7269 7265 if (flags ==
7270 7266 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
7271 7267 flags = exp_flag;
7272 7268 }
7273 7269 topo_node = kmem_zalloc(
7274 7270 sizeof (mptsas_topo_change_list_t),
7275 7271 KM_SLEEP);
7276 7272 topo_node->mpt = mpt;
7277 7273 topo_node->event =
7278 7274 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7279 7275 if (expd_handle == 0) {
7280 7276 /*
7281 7277 * Per MPI 2, if expander dev handle
7282 7278 * is 0, it's a directly attached
7283 7279 * device. So driver use PHY to decide
7284 7280 * which iport is associated
7285 7281 */
7286 7282 physport = phy;
7287 7283 mpt->m_port_chng = 1;
7288 7284 }
7289 7285 topo_node->un.physport = physport;
7290 7286 topo_node->devhdl = dev_handle;
7291 7287 topo_node->flags = flags;
7292 7288 topo_node->object = NULL;
7293 7289 if (topo_head == NULL) {
7294 7290 topo_head = topo_tail = topo_node;
7295 7291 } else {
7296 7292 topo_tail->next = topo_node;
7297 7293 topo_tail = topo_node;
7298 7294 }
7299 7295 break;
7300 7296 }
7301 7297 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7302 7298 {
7303 7299 NDBG20(("mptsas%d phy %d physical_port %d "
7304 7300 "dev_handle %d removed", mpt->m_instance,
7305 7301 phy, physport, dev_handle));
7306 7302 /*
7307 7303 * Set association flag according to if an
7308 7304 * expander is used or not.
7309 7305 */
7310 7306 exp_flag =
7311 7307 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
7312 7308 if (flags ==
7313 7309 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
7314 7310 flags = exp_flag;
7315 7311 }
7316 7312 /*
7317 7313 * Target device is removed from the system
7318 7314 * Before the device is really offline from
7319 7315 * from system.
7320 7316 */
7321 7317 ptgt = refhash_linear_search(mpt->m_targets,
7322 7318 mptsas_target_eval_devhdl, &dev_handle);
7323 7319 /*
7324 7320 * If ptgt is NULL here, it means that the
7325 7321 * DevHandle is not in the hash table. This is
7326 7322 * reasonable sometimes. For example, if a
7327 7323 * disk was pulled, then added, then pulled
7328 7324 * again, the disk will not have been put into
7329 7325 * the hash table because the add event will
7330 7326 * have an invalid phymask. BUT, this does not
7331 7327 * mean that the DevHandle is invalid. The
7332 7328 * controller will still have a valid DevHandle
7333 7329 * that must be removed. To do this, use the
7334 7330 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
7335 7331 */
7336 7332 if (ptgt == NULL) {
7337 7333 topo_node = kmem_zalloc(
7338 7334 sizeof (mptsas_topo_change_list_t),
7339 7335 KM_SLEEP);
7340 7336 topo_node->mpt = mpt;
7341 7337 topo_node->un.phymask = 0;
7342 7338 topo_node->event =
7343 7339 MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
7344 7340 topo_node->devhdl = dev_handle;
7345 7341 topo_node->flags = flags;
7346 7342 topo_node->object = NULL;
7347 7343 if (topo_head == NULL) {
7348 7344 topo_head = topo_tail =
7349 7345 topo_node;
7350 7346 } else {
7351 7347 topo_tail->next = topo_node;
7352 7348 topo_tail = topo_node;
7353 7349 }
7354 7350 break;
7355 7351 }
7356 7352
7357 7353 /*
7358 7354 * Update DR flag immediately avoid I/O failure
7359 7355 * before failover finish. Pay attention to the
7360 7356 * mutex protect, we need grab m_tx_waitq_mutex
7361 7357 * during set m_dr_flag because we won't add
7362 7358 * the following command into waitq, instead,
7363 7359 * we need return TRAN_BUSY in the tran_start
7364 7360 * context.
7365 7361 */
7366 7362 mutex_enter(&mpt->m_tx_waitq_mutex);
7367 7363 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7368 7364 mutex_exit(&mpt->m_tx_waitq_mutex);
7369 7365
7370 7366 topo_node = kmem_zalloc(
7371 7367 sizeof (mptsas_topo_change_list_t),
7372 7368 KM_SLEEP);
7373 7369 topo_node->mpt = mpt;
7374 7370 topo_node->un.phymask =
7375 7371 ptgt->m_addr.mta_phymask;
7376 7372 topo_node->event =
7377 7373 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7378 7374 topo_node->devhdl = dev_handle;
7379 7375 topo_node->flags = flags;
7380 7376 topo_node->object = NULL;
7381 7377 if (topo_head == NULL) {
7382 7378 topo_head = topo_tail = topo_node;
7383 7379 } else {
7384 7380 topo_tail->next = topo_node;
7385 7381 topo_tail = topo_node;
7386 7382 }
7387 7383 break;
7388 7384 }
7389 7385 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7390 7386 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
7391 7387 &sas_topo_change_list->PHY[i].LinkRate);
7392 7388 state = (link_rate &
7393 7389 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
7394 7390 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
7395 7391 pSmhba = &mpt->m_phy_info[i].smhba_info;
7396 7392 pSmhba->negotiated_link_rate = state;
7397 7393 switch (state) {
7398 7394 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7399 7395 (void) sprintf(curr, "is disabled");
7400 7396 mptsas_smhba_log_sysevent(mpt,
7401 7397 ESC_SAS_PHY_EVENT,
7402 7398 SAS_PHY_REMOVE,
7403 7399 &mpt->m_phy_info[i].smhba_info);
7404 7400 mpt->m_phy_info[i].smhba_info.
7405 7401 negotiated_link_rate
7406 7402 = 0x1;
7407 7403 break;
7408 7404 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7409 7405 (void) sprintf(curr, "is offline, "
7410 7406 "failed speed negotiation");
7411 7407 mptsas_smhba_log_sysevent(mpt,
7412 7408 ESC_SAS_PHY_EVENT,
7413 7409 SAS_PHY_OFFLINE,
7414 7410 &mpt->m_phy_info[i].smhba_info);
7415 7411 break;
7416 7412 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7417 7413 (void) sprintf(curr, "SATA OOB "
7418 7414 "complete");
7419 7415 break;
7420 7416 case SMP_RESET_IN_PROGRESS:
7421 7417 (void) sprintf(curr, "SMP reset in "
7422 7418 "progress");
7423 7419 break;
7424 7420 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7425 7421 (void) sprintf(curr, "is online at "
7426 7422 "1.5 Gbps");
7427 7423 if ((expd_handle == 0) &&
7428 7424 (enc_handle == 1)) {
7429 7425 mpt->m_port_chng = 1;
7430 7426 }
7431 7427 mptsas_smhba_log_sysevent(mpt,
7432 7428 ESC_SAS_PHY_EVENT,
7433 7429 SAS_PHY_ONLINE,
7434 7430 &mpt->m_phy_info[i].smhba_info);
7435 7431 break;
7436 7432 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7437 7433 (void) sprintf(curr, "is online at 3.0 "
7438 7434 "Gbps");
7439 7435 if ((expd_handle == 0) &&
7440 7436 (enc_handle == 1)) {
7441 7437 mpt->m_port_chng = 1;
7442 7438 }
7443 7439 mptsas_smhba_log_sysevent(mpt,
7444 7440 ESC_SAS_PHY_EVENT,
7445 7441 SAS_PHY_ONLINE,
7446 7442 &mpt->m_phy_info[i].smhba_info);
7447 7443 break;
7448 7444 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7449 7445 (void) sprintf(curr, "is online at "
7450 7446 "6.0 Gbps");
7451 7447 if ((expd_handle == 0) &&
7452 7448 (enc_handle == 1)) {
7453 7449 mpt->m_port_chng = 1;
7454 7450 }
7455 7451 mptsas_smhba_log_sysevent(mpt,
7456 7452 ESC_SAS_PHY_EVENT,
7457 7453 SAS_PHY_ONLINE,
7458 7454 &mpt->m_phy_info[i].smhba_info);
7459 7455 break;
7460 7456 case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7461 7457 (void) sprintf(curr, "is online at "
7462 7458 "12.0 Gbps");
7463 7459 if ((expd_handle == 0) &&
7464 7460 (enc_handle == 1)) {
7465 7461 mpt->m_port_chng = 1;
7466 7462 }
7467 7463 mptsas_smhba_log_sysevent(mpt,
7468 7464 ESC_SAS_PHY_EVENT,
7469 7465 SAS_PHY_ONLINE,
7470 7466 &mpt->m_phy_info[i].smhba_info);
7471 7467 break;
7472 7468 default:
7473 7469 (void) sprintf(curr, "state is "
7474 7470 "unknown");
7475 7471 break;
7476 7472 }
7477 7473
7478 7474 state = (link_rate &
7479 7475 MPI2_EVENT_SAS_TOPO_LR_PREV_MASK) >>
7480 7476 MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT;
7481 7477 switch (state) {
7482 7478 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7483 7479 (void) sprintf(prev, ", was disabled");
7484 7480 break;
7485 7481 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7486 7482 (void) sprintf(prev, ", was offline, "
7487 7483 "failed speed negotiation");
7488 7484 break;
7489 7485 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7490 7486 (void) sprintf(prev, ", was SATA OOB "
7491 7487 "complete");
7492 7488 break;
7493 7489 case SMP_RESET_IN_PROGRESS:
7494 7490 (void) sprintf(prev, ", was SMP reset "
7495 7491 "in progress");
7496 7492 break;
7497 7493 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7498 7494 (void) sprintf(prev, ", was online at "
7499 7495 "1.5 Gbps");
7500 7496 break;
7501 7497 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7502 7498 (void) sprintf(prev, ", was online at "
7503 7499 "3.0 Gbps");
7504 7500 break;
7505 7501 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7506 7502 (void) sprintf(prev, ", was online at "
7507 7503 "6.0 Gbps");
7508 7504 break;
7509 7505 case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7510 7506 (void) sprintf(prev, ", was online at "
7511 7507 "12.0 Gbps");
7512 7508 break;
7513 7509 default:
7514 7510 break;
7515 7511 }
7516 7512 (void) sprintf(&string[strlen(string)], "link "
7517 7513 "changed, ");
7518 7514 break;
7519 7515 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7520 7516 continue;
7521 7517 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7522 7518 (void) sprintf(&string[strlen(string)],
7523 7519 "target not responding, delaying "
7524 7520 "removal");
7525 7521 break;
7526 7522 }
7527 7523 NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
7528 7524 mpt->m_instance, phy, dev_handle, string, curr,
7529 7525 prev));
7530 7526 }
7531 7527 if (topo_head != NULL) {
7532 7528 /*
7533 7529 * Launch DR taskq to handle topology change
7534 7530 */
7535 7531 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7536 7532 mptsas_handle_dr, (void *)topo_head,
7537 7533 DDI_NOSLEEP)) != DDI_SUCCESS) {
7538 7534 while (topo_head != NULL) {
7539 7535 topo_node = topo_head;
7540 7536 topo_head = topo_head->next;
7541 7537 kmem_free(topo_node,
7542 7538 sizeof (mptsas_topo_change_list_t));
7543 7539 }
7544 7540 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7545 7541 "for handle SAS DR event failed. \n");
7546 7542 }
7547 7543 }
7548 7544 break;
7549 7545 }
7550 7546 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7551 7547 {
7552 7548 Mpi2EventDataIrConfigChangeList_t *irChangeList;
7553 7549 mptsas_topo_change_list_t *topo_head = NULL;
7554 7550 mptsas_topo_change_list_t *topo_tail = NULL;
7555 7551 mptsas_topo_change_list_t *topo_node = NULL;
7556 7552 mptsas_target_t *ptgt;
7557 7553 uint8_t num_entries, i, reason;
7558 7554 uint16_t volhandle, diskhandle;
7559 7555
7560 7556 irChangeList = (pMpi2EventDataIrConfigChangeList_t)
7561 7557 eventreply->EventData;
7562 7558 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
7563 7559 &irChangeList->NumElements);
7564 7560
7565 7561 NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
7566 7562 mpt->m_instance));
7567 7563
7568 7564 for (i = 0; i < num_entries; i++) {
7569 7565 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7570 7566 &irChangeList->ConfigElement[i].ReasonCode);
7571 7567 volhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7572 7568 &irChangeList->ConfigElement[i].VolDevHandle);
7573 7569 diskhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7574 7570 &irChangeList->ConfigElement[i].PhysDiskDevHandle);
7575 7571
7576 7572 switch (reason) {
7577 7573 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
7578 7574 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
7579 7575 {
7580 7576 NDBG20(("mptsas %d volume added\n",
7581 7577 mpt->m_instance));
7582 7578
7583 7579 topo_node = kmem_zalloc(
7584 7580 sizeof (mptsas_topo_change_list_t),
7585 7581 KM_SLEEP);
7586 7582
7587 7583 topo_node->mpt = mpt;
7588 7584 topo_node->event =
7589 7585 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7590 7586 topo_node->un.physport = 0xff;
7591 7587 topo_node->devhdl = volhandle;
7592 7588 topo_node->flags =
7593 7589 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7594 7590 topo_node->object = NULL;
7595 7591 if (topo_head == NULL) {
7596 7592 topo_head = topo_tail = topo_node;
7597 7593 } else {
7598 7594 topo_tail->next = topo_node;
7599 7595 topo_tail = topo_node;
7600 7596 }
7601 7597 break;
7602 7598 }
7603 7599 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
7604 7600 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
7605 7601 {
7606 7602 NDBG20(("mptsas %d volume deleted\n",
7607 7603 mpt->m_instance));
7608 7604 ptgt = refhash_linear_search(mpt->m_targets,
7609 7605 mptsas_target_eval_devhdl, &volhandle);
7610 7606 if (ptgt == NULL)
7611 7607 break;
7612 7608
7613 7609 /*
7614 7610 * Clear any flags related to volume
7615 7611 */
7616 7612 (void) mptsas_delete_volume(mpt, volhandle);
7617 7613
7618 7614 /*
7619 7615 * Update DR flag immediately avoid I/O failure
7620 7616 */
7621 7617 mutex_enter(&mpt->m_tx_waitq_mutex);
7622 7618 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7623 7619 mutex_exit(&mpt->m_tx_waitq_mutex);
7624 7620
7625 7621 topo_node = kmem_zalloc(
7626 7622 sizeof (mptsas_topo_change_list_t),
7627 7623 KM_SLEEP);
7628 7624 topo_node->mpt = mpt;
7629 7625 topo_node->un.phymask =
7630 7626 ptgt->m_addr.mta_phymask;
7631 7627 topo_node->event =
7632 7628 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7633 7629 topo_node->devhdl = volhandle;
7634 7630 topo_node->flags =
7635 7631 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7636 7632 topo_node->object = (void *)ptgt;
7637 7633 if (topo_head == NULL) {
7638 7634 topo_head = topo_tail = topo_node;
7639 7635 } else {
7640 7636 topo_tail->next = topo_node;
7641 7637 topo_tail = topo_node;
7642 7638 }
7643 7639 break;
7644 7640 }
7645 7641 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
7646 7642 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
7647 7643 {
7648 7644 ptgt = refhash_linear_search(mpt->m_targets,
7649 7645 mptsas_target_eval_devhdl, &diskhandle);
7650 7646 if (ptgt == NULL)
7651 7647 break;
7652 7648
7653 7649 /*
7654 7650 * Update DR flag immediately avoid I/O failure
7655 7651 */
7656 7652 mutex_enter(&mpt->m_tx_waitq_mutex);
7657 7653 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7658 7654 mutex_exit(&mpt->m_tx_waitq_mutex);
7659 7655
7660 7656 topo_node = kmem_zalloc(
7661 7657 sizeof (mptsas_topo_change_list_t),
7662 7658 KM_SLEEP);
7663 7659 topo_node->mpt = mpt;
7664 7660 topo_node->un.phymask =
7665 7661 ptgt->m_addr.mta_phymask;
7666 7662 topo_node->event =
7667 7663 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7668 7664 topo_node->devhdl = diskhandle;
7669 7665 topo_node->flags =
7670 7666 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7671 7667 topo_node->object = (void *)ptgt;
7672 7668 if (topo_head == NULL) {
7673 7669 topo_head = topo_tail = topo_node;
7674 7670 } else {
7675 7671 topo_tail->next = topo_node;
7676 7672 topo_tail = topo_node;
7677 7673 }
7678 7674 break;
7679 7675 }
7680 7676 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
7681 7677 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
7682 7678 {
7683 7679 /*
7684 7680 * The physical drive is released by a IR
7685 7681 * volume. But we cannot get the the physport
7686 7682 * or phynum from the event data, so we only
7687 7683 * can get the physport/phynum after SAS
7688 7684 * Device Page0 request for the devhdl.
7689 7685 */
7690 7686 topo_node = kmem_zalloc(
7691 7687 sizeof (mptsas_topo_change_list_t),
7692 7688 KM_SLEEP);
7693 7689 topo_node->mpt = mpt;
7694 7690 topo_node->un.phymask = 0;
7695 7691 topo_node->event =
7696 7692 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7697 7693 topo_node->devhdl = diskhandle;
7698 7694 topo_node->flags =
7699 7695 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7700 7696 topo_node->object = NULL;
7701 7697 mpt->m_port_chng = 1;
7702 7698 if (topo_head == NULL) {
7703 7699 topo_head = topo_tail = topo_node;
7704 7700 } else {
7705 7701 topo_tail->next = topo_node;
7706 7702 topo_tail = topo_node;
7707 7703 }
7708 7704 break;
7709 7705 }
7710 7706 default:
7711 7707 break;
7712 7708 }
7713 7709 }
7714 7710
7715 7711 if (topo_head != NULL) {
7716 7712 /*
7717 7713 * Launch DR taskq to handle topology change
7718 7714 */
7719 7715 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7720 7716 mptsas_handle_dr, (void *)topo_head,
7721 7717 DDI_NOSLEEP)) != DDI_SUCCESS) {
7722 7718 while (topo_head != NULL) {
7723 7719 topo_node = topo_head;
7724 7720 topo_head = topo_head->next;
7725 7721 kmem_free(topo_node,
7726 7722 sizeof (mptsas_topo_change_list_t));
7727 7723 }
7728 7724 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7729 7725 "for handle SAS DR event failed. \n");
7730 7726 }
7731 7727 }
7732 7728 break;
7733 7729 }
7734 7730 default:
7735 7731 return (DDI_FAILURE);
7736 7732 }
7737 7733
7738 7734 return (DDI_SUCCESS);
7739 7735 }
7740 7736
7741 7737 /*
7742 7738 * handle events from ioc
7743 7739 */
7744 7740 static void
7745 7741 mptsas_handle_event(void *args)
7746 7742 {
7747 7743 m_replyh_arg_t *replyh_arg;
7748 7744 pMpi2EventNotificationReply_t eventreply;
7749 7745 uint32_t event, iocloginfo, rfm;
7750 7746 uint32_t status;
7751 7747 uint8_t port;
7752 7748 mptsas_t *mpt;
7753 7749 uint_t iocstatus;
7754 7750
7755 7751 replyh_arg = (m_replyh_arg_t *)args;
7756 7752 rfm = replyh_arg->rfm;
7757 7753 mpt = replyh_arg->mpt;
7758 7754
7759 7755 mutex_enter(&mpt->m_mutex);
7760 7756 /*
7761 7757 * If HBA is being reset, drop incoming event.
7762 7758 */
7763 7759 if (mpt->m_in_reset) {
7764 7760 NDBG20(("dropping event received prior to reset"));
7765 7761 mutex_exit(&mpt->m_mutex);
7766 7762 return;
7767 7763 }
7768 7764
7769 7765 eventreply = (pMpi2EventNotificationReply_t)
7770 7766 (mpt->m_reply_frame + (rfm -
7771 7767 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
7772 7768 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7773 7769
7774 7770 if ((iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7775 7771 &eventreply->IOCStatus)) != 0) {
7776 7772 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7777 7773 mptsas_log(mpt, CE_WARN,
7778 7774 "!mptsas_handle_event: IOCStatus=0x%x, "
7779 7775 "IOCLogInfo=0x%x", iocstatus,
7780 7776 ddi_get32(mpt->m_acc_reply_frame_hdl,
7781 7777 &eventreply->IOCLogInfo));
7782 7778 } else {
7783 7779 mptsas_log(mpt, CE_WARN,
7784 7780 "mptsas_handle_event: IOCStatus=0x%x, "
7785 7781 "IOCLogInfo=0x%x", iocstatus,
7786 7782 ddi_get32(mpt->m_acc_reply_frame_hdl,
7787 7783 &eventreply->IOCLogInfo));
7788 7784 }
7789 7785 }
7790 7786
7791 7787 /*
7792 7788 * figure out what kind of event we got and handle accordingly
7793 7789 */
7794 7790 switch (event) {
7795 7791 case MPI2_EVENT_LOG_ENTRY_ADDED:
7796 7792 break;
7797 7793 case MPI2_EVENT_LOG_DATA:
7798 7794 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7799 7795 &eventreply->IOCLogInfo);
7800 7796 NDBG20(("mptsas %d log info %x received.\n", mpt->m_instance,
7801 7797 iocloginfo));
7802 7798 break;
7803 7799 case MPI2_EVENT_STATE_CHANGE:
7804 7800 NDBG20(("mptsas%d state change.", mpt->m_instance));
7805 7801 break;
7806 7802 case MPI2_EVENT_HARD_RESET_RECEIVED:
7807 7803 NDBG20(("mptsas%d event change.", mpt->m_instance));
7808 7804 break;
7809 7805 case MPI2_EVENT_SAS_DISCOVERY:
7810 7806 {
7811 7807 MPI2_EVENT_DATA_SAS_DISCOVERY *sasdiscovery;
7812 7808 char string[80];
7813 7809 uint8_t rc;
7814 7810
7815 7811 sasdiscovery =
7816 7812 (pMpi2EventDataSasDiscovery_t)eventreply->EventData;
7817 7813
7818 7814 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7819 7815 &sasdiscovery->ReasonCode);
7820 7816 port = ddi_get8(mpt->m_acc_reply_frame_hdl,
7821 7817 &sasdiscovery->PhysicalPort);
7822 7818 status = ddi_get32(mpt->m_acc_reply_frame_hdl,
7823 7819 &sasdiscovery->DiscoveryStatus);
7824 7820
7825 7821 string[0] = 0;
7826 7822 switch (rc) {
7827 7823 case MPI2_EVENT_SAS_DISC_RC_STARTED:
7828 7824 (void) sprintf(string, "STARTING");
7829 7825 break;
7830 7826 case MPI2_EVENT_SAS_DISC_RC_COMPLETED:
7831 7827 (void) sprintf(string, "COMPLETED");
7832 7828 break;
7833 7829 default:
7834 7830 (void) sprintf(string, "UNKNOWN");
7835 7831 break;
7836 7832 }
7837 7833
7838 7834 NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string,
7839 7835 port, status));
7840 7836
7841 7837 break;
7842 7838 }
7843 7839 case MPI2_EVENT_EVENT_CHANGE:
7844 7840 NDBG20(("mptsas%d event change.", mpt->m_instance));
7845 7841 break;
7846 7842 case MPI2_EVENT_TASK_SET_FULL:
7847 7843 {
7848 7844 pMpi2EventDataTaskSetFull_t taskfull;
7849 7845
7850 7846 taskfull = (pMpi2EventDataTaskSetFull_t)eventreply->EventData;
7851 7847
7852 7848 NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
7853 7849 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7854 7850 &taskfull->CurrentDepth)));
7855 7851 break;
7856 7852 }
7857 7853 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7858 7854 {
7859 7855 /*
7860 7856 * SAS TOPOLOGY CHANGE LIST Event has already been handled
7861 7857 * in mptsas_handle_event_sync() of interrupt context
7862 7858 */
7863 7859 break;
7864 7860 }
7865 7861 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
7866 7862 {
7867 7863 pMpi2EventDataSasEnclDevStatusChange_t encstatus;
7868 7864 uint8_t rc;
7869 7865 uint16_t enchdl;
7870 7866 char string[80];
7871 7867 mptsas_enclosure_t *mep;
7872 7868
7873 7869 encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
7874 7870 eventreply->EventData;
7875 7871
7876 7872 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7877 7873 &encstatus->ReasonCode);
7878 7874 enchdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
7879 7875 &encstatus->EnclosureHandle);
7880 7876
7881 7877 switch (rc) {
7882 7878 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7883 7879 (void) sprintf(string, "added");
7884 7880 break;
7885 7881 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7886 7882 mep = mptsas_enc_lookup(mpt, enchdl);
7887 7883 if (mep != NULL) {
7888 7884 list_remove(&mpt->m_enclosures, mep);
7889 7885 mptsas_enc_free(mep);
7890 7886 mep = NULL;
7891 7887 }
7892 7888 (void) sprintf(string, ", not responding");
7893 7889 break;
7894 7890 default:
7895 7891 break;
7896 7892 }
7897 7893 NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure "
7898 7894 "%x%s\n", mpt->m_instance,
7899 7895 ddi_get16(mpt->m_acc_reply_frame_hdl,
7900 7896 &encstatus->EnclosureHandle), string));
7901 7897
7902 7898 /*
7903 7899 * No matter what has happened, update all of our device state
7904 7900 * for enclosures, by retriggering an evaluation.
7905 7901 */
7906 7902 mpt->m_done_traverse_enc = 0;
7907 7903 mptsas_update_hashtab(mpt);
7908 7904 break;
7909 7905 }
7910 7906
7911 7907 /*
7912 7908 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
7913 7909 * mptsas_handle_event_sync,in here just send ack message.
7914 7910 */
7915 7911 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
7916 7912 {
7917 7913 pMpi2EventDataSasDeviceStatusChange_t statuschange;
7918 7914 uint8_t rc;
7919 7915 uint16_t devhdl;
7920 7916 uint64_t wwn = 0;
7921 7917 uint32_t wwn_lo, wwn_hi;
7922 7918
7923 7919 statuschange = (pMpi2EventDataSasDeviceStatusChange_t)
7924 7920 eventreply->EventData;
7925 7921 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7926 7922 &statuschange->ReasonCode);
7927 7923 wwn_lo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7928 7924 (uint32_t *)(void *)&statuschange->SASAddress);
7929 7925 wwn_hi = ddi_get32(mpt->m_acc_reply_frame_hdl,
7930 7926 (uint32_t *)(void *)&statuschange->SASAddress + 1);
7931 7927 wwn = ((uint64_t)wwn_hi << 32) | wwn_lo;
7932 7928 devhdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
7933 7929 &statuschange->DevHandle);
7934 7930
7935 7931 NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64,
7936 7932 wwn));
7937 7933
7938 7934 switch (rc) {
7939 7935 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7940 7936 NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x",
7941 7937 ddi_get8(mpt->m_acc_reply_frame_hdl,
7942 7938 &statuschange->ASC),
7943 7939 ddi_get8(mpt->m_acc_reply_frame_hdl,
7944 7940 &statuschange->ASCQ)));
7945 7941 break;
7946 7942
7947 7943 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7948 7944 NDBG20(("Device not supported"));
7949 7945 break;
7950 7946
7951 7947 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7952 7948 NDBG20(("IOC internally generated the Target Reset "
7953 7949 "for devhdl:%x", devhdl));
7954 7950 break;
7955 7951
7956 7952 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7957 7953 NDBG20(("IOC's internally generated Target Reset "
7958 7954 "completed for devhdl:%x", devhdl));
7959 7955 break;
7960 7956
7961 7957 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7962 7958 NDBG20(("IOC internally generated Abort Task"));
7963 7959 break;
7964 7960
7965 7961 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7966 7962 NDBG20(("IOC's internally generated Abort Task "
7967 7963 "completed"));
7968 7964 break;
7969 7965
7970 7966 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7971 7967 NDBG20(("IOC internally generated Abort Task Set"));
7972 7968 break;
7973 7969
7974 7970 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7975 7971 NDBG20(("IOC internally generated Clear Task Set"));
7976 7972 break;
7977 7973
7978 7974 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7979 7975 NDBG20(("IOC internally generated Query Task"));
7980 7976 break;
7981 7977
7982 7978 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7983 7979 NDBG20(("Device sent an Asynchronous Notification"));
7984 7980 break;
7985 7981
7986 7982 default:
7987 7983 break;
7988 7984 }
7989 7985 break;
7990 7986 }
7991 7987 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7992 7988 {
7993 7989 /*
7994 7990 * IR TOPOLOGY CHANGE LIST Event has already been handled
7995 7991 * in mpt_handle_event_sync() of interrupt context
7996 7992 */
7997 7993 break;
7998 7994 }
7999 7995 case MPI2_EVENT_IR_OPERATION_STATUS:
8000 7996 {
8001 7997 Mpi2EventDataIrOperationStatus_t *irOpStatus;
8002 7998 char reason_str[80];
8003 7999 uint8_t rc, percent;
8004 8000 uint16_t handle;
8005 8001
8006 8002 irOpStatus = (pMpi2EventDataIrOperationStatus_t)
8007 8003 eventreply->EventData;
8008 8004 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
8009 8005 &irOpStatus->RAIDOperation);
8010 8006 percent = ddi_get8(mpt->m_acc_reply_frame_hdl,
8011 8007 &irOpStatus->PercentComplete);
8012 8008 handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
8013 8009 &irOpStatus->VolDevHandle);
8014 8010
8015 8011 switch (rc) {
8016 8012 case MPI2_EVENT_IR_RAIDOP_RESYNC:
8017 8013 (void) sprintf(reason_str, "resync");
8018 8014 break;
8019 8015 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
8020 8016 (void) sprintf(reason_str, "online capacity "
8021 8017 "expansion");
8022 8018 break;
8023 8019 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
8024 8020 (void) sprintf(reason_str, "consistency check");
8025 8021 break;
8026 8022 default:
8027 8023 (void) sprintf(reason_str, "unknown reason %x",
8028 8024 rc);
8029 8025 }
8030 8026
8031 8027 NDBG20(("mptsas%d raid operational status: (%s)"
8032 8028 "\thandle(0x%04x), percent complete(%d)\n",
8033 8029 mpt->m_instance, reason_str, handle, percent));
8034 8030 break;
8035 8031 }
8036 8032 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
8037 8033 {
8038 8034 pMpi2EventDataSasBroadcastPrimitive_t sas_broadcast;
8039 8035 uint8_t phy_num;
8040 8036 uint8_t primitive;
8041 8037
8042 8038 sas_broadcast = (pMpi2EventDataSasBroadcastPrimitive_t)
8043 8039 eventreply->EventData;
8044 8040
8045 8041 phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
8046 8042 &sas_broadcast->PhyNum);
8047 8043 primitive = ddi_get8(mpt->m_acc_reply_frame_hdl,
8048 8044 &sas_broadcast->Primitive);
8049 8045
8050 8046 switch (primitive) {
8051 8047 case MPI2_EVENT_PRIMITIVE_CHANGE:
8052 8048 mptsas_smhba_log_sysevent(mpt,
8053 8049 ESC_SAS_HBA_PORT_BROADCAST,
8054 8050 SAS_PORT_BROADCAST_CHANGE,
8055 8051 &mpt->m_phy_info[phy_num].smhba_info);
8056 8052 break;
8057 8053 case MPI2_EVENT_PRIMITIVE_SES:
8058 8054 mptsas_smhba_log_sysevent(mpt,
8059 8055 ESC_SAS_HBA_PORT_BROADCAST,
8060 8056 SAS_PORT_BROADCAST_SES,
8061 8057 &mpt->m_phy_info[phy_num].smhba_info);
8062 8058 break;
8063 8059 case MPI2_EVENT_PRIMITIVE_EXPANDER:
8064 8060 mptsas_smhba_log_sysevent(mpt,
8065 8061 ESC_SAS_HBA_PORT_BROADCAST,
8066 8062 SAS_PORT_BROADCAST_D01_4,
8067 8063 &mpt->m_phy_info[phy_num].smhba_info);
8068 8064 break;
8069 8065 case MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT:
8070 8066 mptsas_smhba_log_sysevent(mpt,
8071 8067 ESC_SAS_HBA_PORT_BROADCAST,
8072 8068 SAS_PORT_BROADCAST_D04_7,
8073 8069 &mpt->m_phy_info[phy_num].smhba_info);
8074 8070 break;
8075 8071 case MPI2_EVENT_PRIMITIVE_RESERVED3:
8076 8072 mptsas_smhba_log_sysevent(mpt,
8077 8073 ESC_SAS_HBA_PORT_BROADCAST,
8078 8074 SAS_PORT_BROADCAST_D16_7,
8079 8075 &mpt->m_phy_info[phy_num].smhba_info);
8080 8076 break;
8081 8077 case MPI2_EVENT_PRIMITIVE_RESERVED4:
8082 8078 mptsas_smhba_log_sysevent(mpt,
8083 8079 ESC_SAS_HBA_PORT_BROADCAST,
8084 8080 SAS_PORT_BROADCAST_D29_7,
8085 8081 &mpt->m_phy_info[phy_num].smhba_info);
8086 8082 break;
8087 8083 case MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED:
8088 8084 mptsas_smhba_log_sysevent(mpt,
8089 8085 ESC_SAS_HBA_PORT_BROADCAST,
8090 8086 SAS_PORT_BROADCAST_D24_0,
8091 8087 &mpt->m_phy_info[phy_num].smhba_info);
8092 8088 break;
8093 8089 case MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED:
8094 8090 mptsas_smhba_log_sysevent(mpt,
8095 8091 ESC_SAS_HBA_PORT_BROADCAST,
8096 8092 SAS_PORT_BROADCAST_D27_4,
8097 8093 &mpt->m_phy_info[phy_num].smhba_info);
8098 8094 break;
8099 8095 default:
8100 8096 NDBG16(("mptsas%d: unknown BROADCAST PRIMITIVE"
8101 8097 " %x received",
8102 8098 mpt->m_instance, primitive));
8103 8099 break;
8104 8100 }
8105 8101 NDBG16(("mptsas%d sas broadcast primitive: "
8106 8102 "\tprimitive(0x%04x), phy(%d) complete\n",
8107 8103 mpt->m_instance, primitive, phy_num));
8108 8104 break;
8109 8105 }
8110 8106 case MPI2_EVENT_IR_VOLUME:
8111 8107 {
8112 8108 Mpi2EventDataIrVolume_t *irVolume;
8113 8109 uint16_t devhandle;
8114 8110 uint32_t state;
8115 8111 int config, vol;
8116 8112 uint8_t found = FALSE;
8117 8113
8118 8114 irVolume = (pMpi2EventDataIrVolume_t)eventreply->EventData;
8119 8115 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
8120 8116 &irVolume->NewValue);
8121 8117 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
8122 8118 &irVolume->VolDevHandle);
8123 8119
8124 8120 NDBG20(("EVENT_IR_VOLUME event is received"));
8125 8121
8126 8122 /*
8127 8123 * Get latest RAID info and then find the DevHandle for this
8128 8124 * event in the configuration. If the DevHandle is not found
8129 8125 * just exit the event.
8130 8126 */
8131 8127 (void) mptsas_get_raid_info(mpt);
8132 8128 for (config = 0; (config < mpt->m_num_raid_configs) &&
8133 8129 (!found); config++) {
8134 8130 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
8135 8131 if (mpt->m_raidconfig[config].m_raidvol[vol].
8136 8132 m_raidhandle == devhandle) {
8137 8133 found = TRUE;
8138 8134 break;
8139 8135 }
8140 8136 }
8141 8137 }
8142 8138 if (!found) {
8143 8139 break;
8144 8140 }
8145 8141
8146 8142 switch (irVolume->ReasonCode) {
8147 8143 case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
8148 8144 {
8149 8145 uint32_t i;
8150 8146 mpt->m_raidconfig[config].m_raidvol[vol].m_settings =
8151 8147 state;
8152 8148
8153 8149 i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
8154 8150 mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
8155 8151 ", auto-config of hot-swap drives is %s"
8156 8152 ", write caching is %s"
8157 8153 ", hot-spare pool mask is %02x\n",
8158 8154 vol, state &
8159 8155 MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
8160 8156 ? "disabled" : "enabled",
8161 8157 i == MPI2_RAIDVOL0_SETTING_UNCHANGED
8162 8158 ? "controlled by member disks" :
8163 8159 i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
8164 8160 ? "disabled" :
8165 8161 i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
8166 8162 ? "enabled" :
8167 8163 "incorrectly set",
8168 8164 (state >> 16) & 0xff);
8169 8165 break;
8170 8166 }
8171 8167 case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
8172 8168 {
8173 8169 mpt->m_raidconfig[config].m_raidvol[vol].m_state =
8174 8170 (uint8_t)state;
8175 8171
8176 8172 mptsas_log(mpt, CE_NOTE,
8177 8173 "Volume %d is now %s\n", vol,
8178 8174 state == MPI2_RAID_VOL_STATE_OPTIMAL
8179 8175 ? "optimal" :
8180 8176 state == MPI2_RAID_VOL_STATE_DEGRADED
8181 8177 ? "degraded" :
8182 8178 state == MPI2_RAID_VOL_STATE_ONLINE
8183 8179 ? "online" :
8184 8180 state == MPI2_RAID_VOL_STATE_INITIALIZING
8185 8181 ? "initializing" :
8186 8182 state == MPI2_RAID_VOL_STATE_FAILED
8187 8183 ? "failed" :
8188 8184 state == MPI2_RAID_VOL_STATE_MISSING
8189 8185 ? "missing" :
8190 8186 "state unknown");
8191 8187 break;
8192 8188 }
8193 8189 case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
8194 8190 {
8195 8191 mpt->m_raidconfig[config].m_raidvol[vol].
8196 8192 m_statusflags = state;
8197 8193
8198 8194 mptsas_log(mpt, CE_NOTE,
8199 8195 " Volume %d is now %s%s%s%s%s%s%s%s%s\n",
8200 8196 vol,
8201 8197 state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
8202 8198 ? ", enabled" : ", disabled",
8203 8199 state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
8204 8200 ? ", quiesced" : "",
8205 8201 state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
8206 8202 ? ", inactive" : ", active",
8207 8203 state &
8208 8204 MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
8209 8205 ? ", bad block table is full" : "",
8210 8206 state &
8211 8207 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
8212 8208 ? ", resync in progress" : "",
8213 8209 state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
8214 8210 ? ", background initialization in progress" : "",
8215 8211 state &
8216 8212 MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
8217 8213 ? ", capacity expansion in progress" : "",
8218 8214 state &
8219 8215 MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
8220 8216 ? ", consistency check in progress" : "",
8221 8217 state & MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB
8222 8218 ? ", data scrub in progress" : "");
8223 8219 break;
8224 8220 }
8225 8221 default:
8226 8222 break;
8227 8223 }
8228 8224 break;
8229 8225 }
8230 8226 case MPI2_EVENT_IR_PHYSICAL_DISK:
8231 8227 {
8232 8228 Mpi2EventDataIrPhysicalDisk_t *irPhysDisk;
8233 8229 uint16_t devhandle, enchandle, slot;
8234 8230 uint32_t status, state;
8235 8231 uint8_t physdisknum, reason;
8236 8232
8237 8233 irPhysDisk = (Mpi2EventDataIrPhysicalDisk_t *)
8238 8234 eventreply->EventData;
8239 8235 physdisknum = ddi_get8(mpt->m_acc_reply_frame_hdl,
8240 8236 &irPhysDisk->PhysDiskNum);
8241 8237 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
8242 8238 &irPhysDisk->PhysDiskDevHandle);
8243 8239 enchandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
8244 8240 &irPhysDisk->EnclosureHandle);
8245 8241 slot = ddi_get16(mpt->m_acc_reply_frame_hdl,
8246 8242 &irPhysDisk->Slot);
8247 8243 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
8248 8244 &irPhysDisk->NewValue);
8249 8245 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
8250 8246 &irPhysDisk->ReasonCode);
8251 8247
8252 8248 NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
8253 8249
8254 8250 switch (reason) {
8255 8251 case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
8256 8252 mptsas_log(mpt, CE_NOTE,
8257 8253 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8258 8254 "for enclosure with handle 0x%x is now in hot "
8259 8255 "spare pool %d",
8260 8256 physdisknum, devhandle, slot, enchandle,
8261 8257 (state >> 16) & 0xff);
8262 8258 break;
8263 8259
8264 8260 case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
8265 8261 status = state;
8266 8262 mptsas_log(mpt, CE_NOTE,
8267 8263 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8268 8264 "for enclosure with handle 0x%x is now "
8269 8265 "%s%s%s%s%s\n", physdisknum, devhandle, slot,
8270 8266 enchandle,
8271 8267 status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
8272 8268 ? ", inactive" : ", active",
8273 8269 status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
8274 8270 ? ", out of sync" : "",
8275 8271 status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
8276 8272 ? ", quiesced" : "",
8277 8273 status &
8278 8274 MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
8279 8275 ? ", write cache enabled" : "",
8280 8276 status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
8281 8277 ? ", capacity expansion target" : "");
8282 8278 break;
8283 8279
8284 8280 case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
8285 8281 mptsas_log(mpt, CE_NOTE,
8286 8282 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8287 8283 "for enclosure with handle 0x%x is now %s\n",
8288 8284 physdisknum, devhandle, slot, enchandle,
8289 8285 state == MPI2_RAID_PD_STATE_OPTIMAL
8290 8286 ? "optimal" :
8291 8287 state == MPI2_RAID_PD_STATE_REBUILDING
8292 8288 ? "rebuilding" :
8293 8289 state == MPI2_RAID_PD_STATE_DEGRADED
8294 8290 ? "degraded" :
8295 8291 state == MPI2_RAID_PD_STATE_HOT_SPARE
8296 8292 ? "a hot spare" :
8297 8293 state == MPI2_RAID_PD_STATE_ONLINE
8298 8294 ? "online" :
8299 8295 state == MPI2_RAID_PD_STATE_OFFLINE
8300 8296 ? "offline" :
8301 8297 state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
8302 8298 ? "not compatible" :
8303 8299 state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
8304 8300 ? "not configured" :
8305 8301 "state unknown");
8306 8302 break;
8307 8303 }
8308 8304 break;
8309 8305 }
8310 8306 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
8311 8307 {
8312 8308 pMpi26EventDataActiveCableExcept_t actcable;
8313 8309 uint32_t power;
8314 8310 uint8_t reason, id;
8315 8311
8316 8312 actcable = (pMpi26EventDataActiveCableExcept_t)
8317 8313 eventreply->EventData;
8318 8314 power = ddi_get32(mpt->m_acc_reply_frame_hdl,
8319 8315 &actcable->ActiveCablePowerRequirement);
8320 8316 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
8321 8317 &actcable->ReasonCode);
8322 8318 id = ddi_get8(mpt->m_acc_reply_frame_hdl,
8323 8319 &actcable->ReceptacleID);
8324 8320
8325 8321 /*
8326 8322 * It'd be nice if this weren't just logging to the system but
8327 8323 * were telling FMA about the active cable problem and FMA was
8328 8324 * aware of the cable topology and state.
8329 8325 */
8330 8326 switch (reason) {
8331 8327 case MPI26_EVENT_ACTIVE_CABLE_PRESENT:
8332 8328 /* Don't log anything if it's fine */
8333 8329 break;
8334 8330 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
8335 8331 mptsas_log(mpt, CE_WARN, "An active cable (id %u) does "
8336 8332 "not have sufficient power to be enabled. "
8337 8333 "Devices connected to this cable will not be "
8338 8334 "visible to the system.", id);
8339 8335 if (power == UINT32_MAX) {
8340 8336 mptsas_log(mpt, CE_CONT, "The cable's power "
8341 8337 "requirements are unknown.\n");
8342 8338 } else {
8343 8339 mptsas_log(mpt, CE_CONT, "The cable requires "
8344 8340 "%u mW of power to function.\n", power);
8345 8341 }
8346 8342 break;
8347 8343 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
8348 8344 mptsas_log(mpt, CE_WARN, "An active cable (id %u) is "
8349 8345 "degraded and not running at its full speed. "
8350 8346 "Some devices might not appear.", id);
8351 8347 break;
8352 8348 default:
8353 8349 break;
8354 8350 }
8355 8351 break;
8356 8352 }
8357 8353 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
8358 8354 case MPI2_EVENT_PCIE_ENUMERATION:
8359 8355 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
8360 8356 case MPI2_EVENT_PCIE_LINK_COUNTER:
8361 8357 mptsas_log(mpt, CE_NOTE, "Unhandled mpt_sas PCIe device "
8362 8358 "event received (0x%x)", event);
8363 8359 break;
8364 8360 default:
8365 8361 NDBG20(("mptsas%d: unknown event %x received",
8366 8362 mpt->m_instance, event));
8367 8363 break;
8368 8364 }
8369 8365
8370 8366 /*
8371 8367 * Return the reply frame to the free queue.
8372 8368 */
8373 8369 ddi_put32(mpt->m_acc_free_queue_hdl,
8374 8370 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm);
8375 8371 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
8376 8372 DDI_DMA_SYNC_FORDEV);
8377 8373 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
8378 8374 mpt->m_free_index = 0;
8379 8375 }
8380 8376 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
8381 8377 mpt->m_free_index);
8382 8378 mutex_exit(&mpt->m_mutex);
8383 8379 }
8384 8380
8385 8381 /*
8386 8382 * invoked from timeout() to restart qfull cmds with throttle == 0
8387 8383 */
8388 8384 static void
8389 8385 mptsas_restart_cmd(void *arg)
8390 8386 {
8391 8387 mptsas_t *mpt = arg;
8392 8388 mptsas_target_t *ptgt = NULL;
8393 8389
8394 8390 mutex_enter(&mpt->m_mutex);
8395 8391
8396 8392 mpt->m_restart_cmd_timeid = 0;
8397 8393
8398 8394 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
8399 8395 ptgt = refhash_next(mpt->m_targets, ptgt)) {
8400 8396 if (ptgt->m_reset_delay == 0) {
8401 8397 if (ptgt->m_t_throttle == QFULL_THROTTLE) {
8402 8398 mptsas_set_throttle(mpt, ptgt,
8403 8399 MAX_THROTTLE);
8404 8400 }
8405 8401 }
8406 8402 }
8407 8403 mptsas_restart_hba(mpt);
8408 8404 mutex_exit(&mpt->m_mutex);
8409 8405 }
8410 8406
8411 8407 void
8412 8408 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8413 8409 {
8414 8410 int slot;
8415 8411 mptsas_slots_t *slots = mpt->m_active;
8416 8412 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8417 8413
8418 8414 ASSERT(cmd != NULL);
8419 8415 ASSERT(cmd->cmd_queued == FALSE);
8420 8416
8421 8417 /*
8422 8418 * Task Management cmds are removed in their own routines. Also,
8423 8419 * we don't want to modify timeout based on TM cmds.
8424 8420 */
8425 8421 if (cmd->cmd_flags & CFLAG_TM_CMD) {
8426 8422 return;
8427 8423 }
8428 8424
8429 8425 slot = cmd->cmd_slot;
8430 8426
8431 8427 /*
8432 8428 * remove the cmd.
8433 8429 */
8434 8430 if (cmd == slots->m_slot[slot]) {
8435 8431 NDBG31(("mptsas_remove_cmd: removing cmd=0x%p, flags "
8436 8432 "0x%x", (void *)cmd, cmd->cmd_flags));
8437 8433 slots->m_slot[slot] = NULL;
8438 8434 mpt->m_ncmds--;
8439 8435
8440 8436 /*
8441 8437 * only decrement per target ncmds if command
8442 8438 * has a target associated with it.
8443 8439 */
8444 8440 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
8445 8441 ptgt->m_t_ncmds--;
8446 8442 /*
8447 8443 * reset throttle if we just ran an untagged command
8448 8444 * to a tagged target
8449 8445 */
8450 8446 if ((ptgt->m_t_ncmds == 0) &&
8451 8447 ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
8452 8448 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8453 8449 }
8454 8450
8455 8451 /*
8456 8452 * Remove this command from the active queue.
8457 8453 */
8458 8454 if (cmd->cmd_active_expiration != 0) {
8459 8455 TAILQ_REMOVE(&ptgt->m_active_cmdq, cmd,
8460 8456 cmd_active_link);
8461 8457 cmd->cmd_active_expiration = 0;
8462 8458 }
8463 8459 }
8464 8460 }
8465 8461
8466 8462 /*
8467 8463 * This is all we need to do for ioc commands.
8468 8464 */
8469 8465 if (cmd->cmd_flags & CFLAG_CMDIOC) {
8470 8466 mptsas_return_to_pool(mpt, cmd);
8471 8467 return;
8472 8468 }
8473 8469
8474 8470 ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);
8475 8471 }
8476 8472
8477 8473 /*
8478 8474 * accept all cmds on the tx_waitq if any and then
8479 8475 * start a fresh request from the top of the device queue.
8480 8476 *
8481 8477 * since there are always cmds queued on the tx_waitq, and rare cmds on
8482 8478 * the instance waitq, so this function should not be invoked in the ISR,
8483 8479 * the mptsas_restart_waitq() is invoked in the ISR instead. otherwise, the
8484 8480 * burden belongs to the IO dispatch CPUs is moved the interrupt CPU.
8485 8481 */
8486 8482 static void
8487 8483 mptsas_restart_hba(mptsas_t *mpt)
8488 8484 {
8489 8485 ASSERT(mutex_owned(&mpt->m_mutex));
8490 8486
8491 8487 mutex_enter(&mpt->m_tx_waitq_mutex);
8492 8488 if (mpt->m_tx_waitq) {
8493 8489 mptsas_accept_tx_waitq(mpt);
8494 8490 }
8495 8491 mutex_exit(&mpt->m_tx_waitq_mutex);
8496 8492 mptsas_restart_waitq(mpt);
8497 8493 }
8498 8494
8499 8495 /*
8500 8496 * start a fresh request from the top of the device queue
8501 8497 */
8502 8498 static void
8503 8499 mptsas_restart_waitq(mptsas_t *mpt)
8504 8500 {
8505 8501 mptsas_cmd_t *cmd, *next_cmd;
8506 8502 mptsas_target_t *ptgt = NULL;
8507 8503
8508 8504 NDBG1(("mptsas_restart_waitq: mpt=0x%p", (void *)mpt));
8509 8505
8510 8506 ASSERT(mutex_owned(&mpt->m_mutex));
8511 8507
8512 8508 /*
8513 8509 * If there is a reset delay, don't start any cmds. Otherwise, start
8514 8510 * as many cmds as possible.
8515 8511 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
8516 8512 * commands is m_max_requests - 2.
8517 8513 */
8518 8514 cmd = mpt->m_waitq;
8519 8515
8520 8516 while (cmd != NULL) {
8521 8517 next_cmd = cmd->cmd_linkp;
8522 8518 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
8523 8519 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8524 8520 /*
8525 8521 * passthru command get slot need
8526 8522 * set CFLAG_PREPARED.
8527 8523 */
8528 8524 cmd->cmd_flags |= CFLAG_PREPARED;
8529 8525 mptsas_waitq_delete(mpt, cmd);
8530 8526 mptsas_start_passthru(mpt, cmd);
8531 8527 }
8532 8528 cmd = next_cmd;
8533 8529 continue;
8534 8530 }
8535 8531 if (cmd->cmd_flags & CFLAG_CONFIG) {
8536 8532 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8537 8533 /*
8538 8534 * Send the config page request and delete it
8539 8535 * from the waitq.
8540 8536 */
8541 8537 cmd->cmd_flags |= CFLAG_PREPARED;
8542 8538 mptsas_waitq_delete(mpt, cmd);
8543 8539 mptsas_start_config_page_access(mpt, cmd);
8544 8540 }
8545 8541 cmd = next_cmd;
8546 8542 continue;
8547 8543 }
8548 8544 if (cmd->cmd_flags & CFLAG_FW_DIAG) {
8549 8545 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8550 8546 /*
8551 8547 * Send the FW Diag request and delete if from
8552 8548 * the waitq.
8553 8549 */
8554 8550 cmd->cmd_flags |= CFLAG_PREPARED;
8555 8551 mptsas_waitq_delete(mpt, cmd);
8556 8552 mptsas_start_diag(mpt, cmd);
8557 8553 }
8558 8554 cmd = next_cmd;
8559 8555 continue;
8560 8556 }
8561 8557
8562 8558 ptgt = cmd->cmd_tgt_addr;
8563 8559 if (ptgt && (ptgt->m_t_throttle == DRAIN_THROTTLE) &&
8564 8560 (ptgt->m_t_ncmds == 0)) {
8565 8561 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8566 8562 }
8567 8563 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
8568 8564 (ptgt && (ptgt->m_reset_delay == 0)) &&
8569 8565 (ptgt && (ptgt->m_t_ncmds <
8570 8566 ptgt->m_t_throttle))) {
8571 8567 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8572 8568 mptsas_waitq_delete(mpt, cmd);
8573 8569 (void) mptsas_start_cmd(mpt, cmd);
8574 8570 }
8575 8571 }
8576 8572 cmd = next_cmd;
8577 8573 }
8578 8574 }
8579 8575 /*
8580 8576 * Cmds are queued if tran_start() doesn't get the m_mutexlock(no wait).
8581 8577 * Accept all those queued cmds before new cmd is accept so that the
8582 8578 * cmds are sent in order.
8583 8579 */
8584 8580 static void
8585 8581 mptsas_accept_tx_waitq(mptsas_t *mpt)
8586 8582 {
8587 8583 mptsas_cmd_t *cmd;
8588 8584
8589 8585 ASSERT(mutex_owned(&mpt->m_mutex));
8590 8586 ASSERT(mutex_owned(&mpt->m_tx_waitq_mutex));
8591 8587
8592 8588 /*
8593 8589 * A Bus Reset could occur at any time and flush the tx_waitq,
8594 8590 * so we cannot count on the tx_waitq to contain even one cmd.
8595 8591 * And when the m_tx_waitq_mutex is released and run
8596 8592 * mptsas_accept_pkt(), the tx_waitq may be flushed.
8597 8593 */
8598 8594 cmd = mpt->m_tx_waitq;
8599 8595 for (;;) {
8600 8596 if ((cmd = mpt->m_tx_waitq) == NULL) {
8601 8597 mpt->m_tx_draining = 0;
8602 8598 break;
8603 8599 }
8604 8600 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL) {
8605 8601 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8606 8602 }
8607 8603 cmd->cmd_linkp = NULL;
8608 8604 mutex_exit(&mpt->m_tx_waitq_mutex);
8609 8605 if (mptsas_accept_pkt(mpt, cmd) != TRAN_ACCEPT)
8610 8606 cmn_err(CE_WARN, "mpt: mptsas_accept_tx_waitq: failed "
8611 8607 "to accept cmd on queue\n");
8612 8608 mutex_enter(&mpt->m_tx_waitq_mutex);
8613 8609 }
8614 8610 }
8615 8611
8616 8612
8617 8613 /*
8618 8614 * mpt tag type lookup
8619 8615 */
8620 8616 static char mptsas_tag_lookup[] =
8621 8617 {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
8622 8618
8623 8619 static int
8624 8620 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8625 8621 {
8626 8622 struct scsi_pkt *pkt = CMD2PKT(cmd);
8627 8623 uint32_t control = 0;
8628 8624 caddr_t mem, arsbuf;
8629 8625 pMpi2SCSIIORequest_t io_request;
8630 8626 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
8631 8627 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
8632 8628 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8633 8629 uint16_t SMID, io_flags = 0;
8634 8630 uint8_t ars_size;
8635 8631 uint64_t request_desc;
8636 8632 uint32_t ars_dmaaddrlow;
8637 8633 mptsas_cmd_t *c;
8638 8634
8639 8635 NDBG1(("mptsas_start_cmd: cmd=0x%p, flags 0x%x", (void *)cmd,
8640 8636 cmd->cmd_flags));
8641 8637
8642 8638 /*
8643 8639 * Set SMID and increment index. Rollover to 1 instead of 0 if index
8644 8640 * is at the max. 0 is an invalid SMID, so we call the first index 1.
8645 8641 */
8646 8642 SMID = cmd->cmd_slot;
8647 8643
8648 8644 /*
8649 8645 * It is possible for back to back device reset to
8650 8646 * happen before the reset delay has expired. That's
8651 8647 * ok, just let the device reset go out on the bus.
8652 8648 */
8653 8649 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8654 8650 ASSERT(ptgt->m_reset_delay == 0);
8655 8651 }
8656 8652
8657 8653 /*
8658 8654 * if a non-tagged cmd is submitted to an active tagged target
8659 8655 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
8660 8656 * to be untagged
8661 8657 */
8662 8658 if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
8663 8659 (ptgt->m_t_ncmds > 1) &&
8664 8660 ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
8665 8661 (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
8666 8662 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8667 8663 NDBG23(("target=%d, untagged cmd, start draining\n",
8668 8664 ptgt->m_devhdl));
8669 8665
8670 8666 if (ptgt->m_reset_delay == 0) {
8671 8667 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
8672 8668 }
8673 8669
8674 8670 mptsas_remove_cmd(mpt, cmd);
8675 8671 cmd->cmd_pkt_flags |= FLAG_HEAD;
8676 8672 mptsas_waitq_add(mpt, cmd);
8677 8673 }
8678 8674 return (DDI_FAILURE);
8679 8675 }
8680 8676
8681 8677 /*
8682 8678 * Set correct tag bits.
8683 8679 */
8684 8680 if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
8685 8681 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
8686 8682 FLAG_TAGMASK) >> 12)]) {
8687 8683 case MSG_SIMPLE_QTAG:
8688 8684 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8689 8685 break;
8690 8686 case MSG_HEAD_QTAG:
8691 8687 control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
8692 8688 break;
8693 8689 case MSG_ORDERED_QTAG:
8694 8690 control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
8695 8691 break;
8696 8692 default:
8697 8693 mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
8698 8694 break;
8699 8695 }
8700 8696 } else {
8701 8697 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
8702 8698 ptgt->m_t_throttle = 1;
8703 8699 }
8704 8700 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8705 8701 }
8706 8702
8707 8703 if (cmd->cmd_pkt_flags & FLAG_TLR) {
8708 8704 control |= MPI2_SCSIIO_CONTROL_TLR_ON;
8709 8705 }
8710 8706
8711 8707 mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
8712 8708 io_request = (pMpi2SCSIIORequest_t)mem;
8713 8709 if (cmd->cmd_extrqslen != 0) {
8714 8710 /*
8715 8711 * Mapping of the buffer was done in mptsas_pkt_alloc_extern().
8716 8712 * Calculate the DMA address with the same offset.
8717 8713 */
8718 8714 arsbuf = cmd->cmd_arq_buf;
8719 8715 ars_size = cmd->cmd_extrqslen;
8720 8716 ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
8721 8717 ((uintptr_t)arsbuf - (uintptr_t)mpt->m_req_sense)) &
8722 8718 0xffffffffu;
8723 8719 } else {
8724 8720 arsbuf = mpt->m_req_sense + (mpt->m_req_sense_size * (SMID-1));
8725 8721 cmd->cmd_arq_buf = arsbuf;
8726 8722 ars_size = mpt->m_req_sense_size;
8727 8723 ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
8728 8724 (mpt->m_req_sense_size * (SMID-1))) &
8729 8725 0xffffffffu;
8730 8726 }
8731 8727 bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
8732 8728 bzero(arsbuf, ars_size);
8733 8729
8734 8730 ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
8735 8731 (MPI2_SCSI_IO_REQUEST, SGL) / 4);
8736 8732 mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
8737 8733 MPI2_FUNCTION_SCSI_IO_REQUEST);
8738 8734
8739 8735 (void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
8740 8736 io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
8741 8737
8742 8738 io_flags = cmd->cmd_cdblen;
8743 8739 if (mptsas_use_fastpath &&
8744 8740 ptgt->m_io_flags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
8745 8741 io_flags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
8746 8742 request_desc = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
8747 8743 } else {
8748 8744 request_desc = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
8749 8745 }
8750 8746 ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
8751 8747 /*
8752 8748 * setup the Scatter/Gather DMA list for this request
8753 8749 */
8754 8750 if (cmd->cmd_cookiec > 0) {
8755 8751 mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
8756 8752 } else {
8757 8753 ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
8758 8754 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
8759 8755 MPI2_SGE_FLAGS_END_OF_BUFFER |
8760 8756 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
8761 8757 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
8762 8758 }
8763 8759
8764 8760 /*
8765 8761 * save ARQ information
8766 8762 */
8767 8763 ddi_put8(acc_hdl, &io_request->SenseBufferLength, ars_size);
8768 8764 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress, ars_dmaaddrlow);
8769 8765
8770 8766 ddi_put32(acc_hdl, &io_request->Control, control);
8771 8767
8772 8768 NDBG31(("starting message=%d(0x%p), with cmd=0x%p",
8773 8769 SMID, (void *)io_request, (void *)cmd));
8774 8770
8775 8771 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
8776 8772 (void) ddi_dma_sync(mpt->m_dma_req_sense_hdl, 0, 0,
8777 8773 DDI_DMA_SYNC_FORDEV);
8778 8774
8779 8775 /*
8780 8776 * Build request descriptor and write it to the request desc post reg.
8781 8777 */
8782 8778 request_desc |= (SMID << 16);
8783 8779 request_desc |= (uint64_t)ptgt->m_devhdl << 48;
8784 8780 MPTSAS_START_CMD(mpt, request_desc);
8785 8781
8786 8782 /*
8787 8783 * Start timeout.
8788 8784 */
8789 8785 cmd->cmd_active_expiration =
8790 8786 gethrtime() + (hrtime_t)pkt->pkt_time * NANOSEC;
8791 8787 #ifdef MPTSAS_TEST
8792 8788 /*
8793 8789 * Force timeouts to happen immediately.
8794 8790 */
8795 8791 if (mptsas_test_timeouts)
8796 8792 cmd->cmd_active_expiration = gethrtime();
8797 8793 #endif
8798 8794 c = TAILQ_FIRST(&ptgt->m_active_cmdq);
8799 8795 if (c == NULL ||
8800 8796 c->cmd_active_expiration < cmd->cmd_active_expiration) {
8801 8797 /*
8802 8798 * Common case is that this is the last pending expiration
8803 8799 * (or queue is empty). Insert at head of the queue.
8804 8800 */
8805 8801 TAILQ_INSERT_HEAD(&ptgt->m_active_cmdq, cmd, cmd_active_link);
8806 8802 } else {
8807 8803 /*
8808 8804 * Queue is not empty and first element expires later than
8809 8805 * this command. Search for element expiring sooner.
8810 8806 */
8811 8807 while ((c = TAILQ_NEXT(c, cmd_active_link)) != NULL) {
8812 8808 if (c->cmd_active_expiration <
8813 8809 cmd->cmd_active_expiration) {
8814 8810 TAILQ_INSERT_BEFORE(c, cmd, cmd_active_link);
8815 8811 break;
8816 8812 }
8817 8813 }
8818 8814 if (c == NULL) {
8819 8815 /*
8820 8816 * No element found expiring sooner, append to
8821 8817 * non-empty queue.
8822 8818 */
8823 8819 TAILQ_INSERT_TAIL(&ptgt->m_active_cmdq, cmd,
8824 8820 cmd_active_link);
8825 8821 }
8826 8822 }
8827 8823
8828 8824 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8829 8825 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8830 8826 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8831 8827 return (DDI_FAILURE);
8832 8828 }
8833 8829 return (DDI_SUCCESS);
8834 8830 }
8835 8831
8836 8832 /*
8837 8833 * Select a helper thread to handle current doneq
8838 8834 */
8839 8835 static void
8840 8836 mptsas_deliver_doneq_thread(mptsas_t *mpt)
8841 8837 {
8842 8838 uint64_t t, i;
8843 8839 uint32_t min = 0xffffffff;
8844 8840 mptsas_doneq_thread_list_t *item;
8845 8841
8846 8842 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
8847 8843 item = &mpt->m_doneq_thread_id[i];
8848 8844 /*
8849 8845 * If the completed command on help thread[i] less than
8850 8846 * doneq_thread_threshold, then pick the thread[i]. Otherwise
8851 8847 * pick a thread which has least completed command.
8852 8848 */
8853 8849
8854 8850 mutex_enter(&item->mutex);
8855 8851 if (item->len < mpt->m_doneq_thread_threshold) {
8856 8852 t = i;
8857 8853 mutex_exit(&item->mutex);
8858 8854 break;
8859 8855 }
8860 8856 if (item->len < min) {
8861 8857 min = item->len;
8862 8858 t = i;
8863 8859 }
8864 8860 mutex_exit(&item->mutex);
8865 8861 }
8866 8862 mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
8867 8863 mptsas_doneq_mv(mpt, t);
8868 8864 cv_signal(&mpt->m_doneq_thread_id[t].cv);
8869 8865 mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
8870 8866 }
8871 8867
8872 8868 /*
8873 8869 * move the current global doneq to the doneq of thead[t]
8874 8870 */
8875 8871 static void
8876 8872 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
8877 8873 {
8878 8874 mptsas_cmd_t *cmd;
8879 8875 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8880 8876
8881 8877 ASSERT(mutex_owned(&item->mutex));
8882 8878 while ((cmd = mpt->m_doneq) != NULL) {
8883 8879 if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
8884 8880 mpt->m_donetail = &mpt->m_doneq;
8885 8881 }
8886 8882 cmd->cmd_linkp = NULL;
8887 8883 *item->donetail = cmd;
8888 8884 item->donetail = &cmd->cmd_linkp;
8889 8885 mpt->m_doneq_len--;
8890 8886 item->len++;
8891 8887 }
8892 8888 }
8893 8889
8894 8890 void
8895 8891 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
8896 8892 {
8897 8893 struct scsi_pkt *pkt = CMD2PKT(cmd);
8898 8894
8899 8895 /* Check all acc and dma handles */
8900 8896 if ((mptsas_check_acc_handle(mpt->m_datap) !=
8901 8897 DDI_SUCCESS) ||
8902 8898 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
8903 8899 DDI_SUCCESS) ||
8904 8900 (mptsas_check_acc_handle(mpt->m_acc_req_sense_hdl) !=
8905 8901 DDI_SUCCESS) ||
8906 8902 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
8907 8903 DDI_SUCCESS) ||
8908 8904 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
8909 8905 DDI_SUCCESS) ||
8910 8906 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
8911 8907 DDI_SUCCESS) ||
8912 8908 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
8913 8909 DDI_SUCCESS) ||
8914 8910 (mptsas_check_acc_handle(mpt->m_config_handle) !=
8915 8911 DDI_SUCCESS)) {
8916 8912 ddi_fm_service_impact(mpt->m_dip,
8917 8913 DDI_SERVICE_UNAFFECTED);
8918 8914 ddi_fm_acc_err_clear(mpt->m_config_handle,
8919 8915 DDI_FME_VER0);
8920 8916 pkt->pkt_reason = CMD_TRAN_ERR;
8921 8917 pkt->pkt_statistics = 0;
8922 8918 }
8923 8919 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
8924 8920 DDI_SUCCESS) ||
8925 8921 (mptsas_check_dma_handle(mpt->m_dma_req_sense_hdl) !=
8926 8922 DDI_SUCCESS) ||
8927 8923 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
8928 8924 DDI_SUCCESS) ||
8929 8925 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
8930 8926 DDI_SUCCESS) ||
8931 8927 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
8932 8928 DDI_SUCCESS) ||
8933 8929 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
8934 8930 DDI_SUCCESS)) {
8935 8931 ddi_fm_service_impact(mpt->m_dip,
8936 8932 DDI_SERVICE_UNAFFECTED);
8937 8933 pkt->pkt_reason = CMD_TRAN_ERR;
8938 8934 pkt->pkt_statistics = 0;
8939 8935 }
8940 8936 if (cmd->cmd_dmahandle &&
8941 8937 (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
8942 8938 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8943 8939 pkt->pkt_reason = CMD_TRAN_ERR;
8944 8940 pkt->pkt_statistics = 0;
8945 8941 }
8946 8942 if ((cmd->cmd_extra_frames &&
8947 8943 ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
8948 8944 DDI_SUCCESS) ||
8949 8945 (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
8950 8946 DDI_SUCCESS)))) {
8951 8947 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8952 8948 pkt->pkt_reason = CMD_TRAN_ERR;
8953 8949 pkt->pkt_statistics = 0;
8954 8950 }
8955 8951 }
8956 8952
8957 8953 /*
8958 8954 * These routines manipulate the queue of commands that
8959 8955 * are waiting for their completion routines to be called.
8960 8956 * The queue is usually in FIFO order but on an MP system
8961 8957 * it's possible for the completion routines to get out
8962 8958 * of order. If that's a problem you need to add a global
8963 8959 * mutex around the code that calls the completion routine
8964 8960 * in the interrupt handler.
8965 8961 */
8966 8962 static void
8967 8963 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8968 8964 {
8969 8965 struct scsi_pkt *pkt = CMD2PKT(cmd);
8970 8966
8971 8967 NDBG31(("mptsas_doneq_add: cmd=0x%p", (void *)cmd));
8972 8968
8973 8969 ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
8974 8970 cmd->cmd_linkp = NULL;
8975 8971 cmd->cmd_flags |= CFLAG_FINISHED;
8976 8972 cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
8977 8973
8978 8974 mptsas_fma_check(mpt, cmd);
8979 8975
8980 8976 /*
8981 8977 * only add scsi pkts that have completion routines to
8982 8978 * the doneq. no intr cmds do not have callbacks.
8983 8979 */
8984 8980 if (pkt && (pkt->pkt_comp)) {
8985 8981 *mpt->m_donetail = cmd;
8986 8982 mpt->m_donetail = &cmd->cmd_linkp;
8987 8983 mpt->m_doneq_len++;
8988 8984 }
8989 8985 }
8990 8986
8991 8987 static mptsas_cmd_t *
8992 8988 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
8993 8989 {
8994 8990 mptsas_cmd_t *cmd;
8995 8991 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8996 8992
8997 8993 /* pop one off the done queue */
8998 8994 if ((cmd = item->doneq) != NULL) {
8999 8995 /* if the queue is now empty fix the tail pointer */
9000 8996 NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
9001 8997 if ((item->doneq = cmd->cmd_linkp) == NULL) {
9002 8998 item->donetail = &item->doneq;
9003 8999 }
9004 9000 cmd->cmd_linkp = NULL;
9005 9001 item->len--;
9006 9002 }
9007 9003 return (cmd);
9008 9004 }
9009 9005
9010 9006 static void
9011 9007 mptsas_doneq_empty(mptsas_t *mpt)
9012 9008 {
9013 9009 if (mpt->m_doneq && !mpt->m_in_callback) {
9014 9010 mptsas_cmd_t *cmd, *next;
9015 9011 struct scsi_pkt *pkt;
9016 9012
9017 9013 mpt->m_in_callback = 1;
9018 9014 cmd = mpt->m_doneq;
9019 9015 mpt->m_doneq = NULL;
9020 9016 mpt->m_donetail = &mpt->m_doneq;
9021 9017 mpt->m_doneq_len = 0;
9022 9018
9023 9019 mutex_exit(&mpt->m_mutex);
9024 9020 /*
9025 9021 * run the completion routines of all the
9026 9022 * completed commands
9027 9023 */
9028 9024 while (cmd != NULL) {
9029 9025 next = cmd->cmd_linkp;
9030 9026 cmd->cmd_linkp = NULL;
9031 9027 /* run this command's completion routine */
9032 9028 cmd->cmd_flags |= CFLAG_COMPLETED;
9033 9029 pkt = CMD2PKT(cmd);
9034 9030 mptsas_pkt_comp(pkt, cmd);
9035 9031 cmd = next;
9036 9032 }
9037 9033 mutex_enter(&mpt->m_mutex);
9038 9034 mpt->m_in_callback = 0;
9039 9035 }
9040 9036 }
9041 9037
9042 9038 /*
9043 9039 * These routines manipulate the target's queue of pending requests
9044 9040 */
9045 9041 void
9046 9042 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
9047 9043 {
9048 9044 NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
9049 9045 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
9050 9046 cmd->cmd_queued = TRUE;
9051 9047 if (ptgt)
9052 9048 ptgt->m_t_nwait++;
9053 9049 if (cmd->cmd_pkt_flags & FLAG_HEAD) {
9054 9050 if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
9055 9051 mpt->m_waitqtail = &cmd->cmd_linkp;
9056 9052 }
9057 9053 mpt->m_waitq = cmd;
9058 9054 } else {
9059 9055 cmd->cmd_linkp = NULL;
9060 9056 *(mpt->m_waitqtail) = cmd;
9061 9057 mpt->m_waitqtail = &cmd->cmd_linkp;
9062 9058 }
9063 9059 }
9064 9060
9065 9061 static mptsas_cmd_t *
9066 9062 mptsas_waitq_rm(mptsas_t *mpt)
9067 9063 {
9068 9064 mptsas_cmd_t *cmd;
9069 9065 mptsas_target_t *ptgt;
9070 9066 NDBG7(("mptsas_waitq_rm"));
9071 9067
9072 9068 MPTSAS_WAITQ_RM(mpt, cmd);
9073 9069
9074 9070 NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
9075 9071 if (cmd) {
9076 9072 ptgt = cmd->cmd_tgt_addr;
9077 9073 if (ptgt) {
9078 9074 ptgt->m_t_nwait--;
9079 9075 ASSERT(ptgt->m_t_nwait >= 0);
9080 9076 }
9081 9077 }
9082 9078 return (cmd);
9083 9079 }
9084 9080
9085 9081 /*
9086 9082 * remove specified cmd from the middle of the wait queue.
9087 9083 */
9088 9084 static void
9089 9085 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
9090 9086 {
9091 9087 mptsas_cmd_t *prevp = mpt->m_waitq;
9092 9088 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
9093 9089
9094 9090 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
9095 9091 (void *)mpt, (void *)cmd));
9096 9092 if (ptgt) {
9097 9093 ptgt->m_t_nwait--;
9098 9094 ASSERT(ptgt->m_t_nwait >= 0);
9099 9095 }
9100 9096
9101 9097 if (prevp == cmd) {
9102 9098 if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
9103 9099 mpt->m_waitqtail = &mpt->m_waitq;
9104 9100
9105 9101 cmd->cmd_linkp = NULL;
9106 9102 cmd->cmd_queued = FALSE;
9107 9103 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
9108 9104 (void *)mpt, (void *)cmd));
9109 9105 return;
9110 9106 }
9111 9107
9112 9108 while (prevp != NULL) {
9113 9109 if (prevp->cmd_linkp == cmd) {
9114 9110 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
9115 9111 mpt->m_waitqtail = &prevp->cmd_linkp;
9116 9112
9117 9113 cmd->cmd_linkp = NULL;
9118 9114 cmd->cmd_queued = FALSE;
9119 9115 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
9120 9116 (void *)mpt, (void *)cmd));
9121 9117 return;
9122 9118 }
9123 9119 prevp = prevp->cmd_linkp;
9124 9120 }
9125 9121 cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
9126 9122 }
9127 9123
9128 9124 static mptsas_cmd_t *
9129 9125 mptsas_tx_waitq_rm(mptsas_t *mpt)
9130 9126 {
9131 9127 mptsas_cmd_t *cmd;
9132 9128 NDBG7(("mptsas_tx_waitq_rm"));
9133 9129
9134 9130 MPTSAS_TX_WAITQ_RM(mpt, cmd);
9135 9131
9136 9132 NDBG7(("mptsas_tx_waitq_rm: cmd=0x%p", (void *)cmd));
9137 9133
9138 9134 return (cmd);
9139 9135 }
9140 9136
9141 9137 /*
9142 9138 * remove specified cmd from the middle of the tx_waitq.
9143 9139 */
9144 9140 static void
9145 9141 mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
9146 9142 {
9147 9143 mptsas_cmd_t *prevp = mpt->m_tx_waitq;
9148 9144
9149 9145 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
9150 9146 (void *)mpt, (void *)cmd));
9151 9147
9152 9148 if (prevp == cmd) {
9153 9149 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL)
9154 9150 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
9155 9151
9156 9152 cmd->cmd_linkp = NULL;
9157 9153 cmd->cmd_queued = FALSE;
9158 9154 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
9159 9155 (void *)mpt, (void *)cmd));
9160 9156 return;
9161 9157 }
9162 9158
9163 9159 while (prevp != NULL) {
9164 9160 if (prevp->cmd_linkp == cmd) {
9165 9161 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
9166 9162 mpt->m_tx_waitqtail = &prevp->cmd_linkp;
9167 9163
9168 9164 cmd->cmd_linkp = NULL;
9169 9165 cmd->cmd_queued = FALSE;
9170 9166 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
9171 9167 (void *)mpt, (void *)cmd));
9172 9168 return;
9173 9169 }
9174 9170 prevp = prevp->cmd_linkp;
9175 9171 }
9176 9172 cmn_err(CE_PANIC, "mpt: mptsas_tx_waitq_delete: queue botch");
9177 9173 }
9178 9174
9179 9175 /*
9180 9176 * device and bus reset handling
9181 9177 *
9182 9178 * Notes:
9183 9179 * - RESET_ALL: reset the controller
9184 9180 * - RESET_TARGET: reset the target specified in scsi_address
9185 9181 */
9186 9182 static int
9187 9183 mptsas_scsi_reset(struct scsi_address *ap, int level)
9188 9184 {
9189 9185 mptsas_t *mpt = ADDR2MPT(ap);
9190 9186 int rval;
9191 9187 mptsas_tgt_private_t *tgt_private;
9192 9188 mptsas_target_t *ptgt = NULL;
9193 9189
9194 9190 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
9195 9191 ptgt = tgt_private->t_private;
9196 9192 if (ptgt == NULL) {
9197 9193 return (FALSE);
9198 9194 }
9199 9195 NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,
9200 9196 level));
9201 9197
9202 9198 mutex_enter(&mpt->m_mutex);
9203 9199 /*
9204 9200 * if we are not in panic set up a reset delay for this target
9205 9201 */
9206 9202 if (!ddi_in_panic()) {
9207 9203 mptsas_setup_bus_reset_delay(mpt);
9208 9204 } else {
9209 9205 drv_usecwait(mpt->m_scsi_reset_delay * 1000);
9210 9206 }
9211 9207 rval = mptsas_do_scsi_reset(mpt, ptgt->m_devhdl);
9212 9208 mutex_exit(&mpt->m_mutex);
9213 9209
9214 9210 /*
9215 9211 * The transport layer expect to only see TRUE and
9216 9212 * FALSE. Therefore, we will adjust the return value
9217 9213 * if mptsas_do_scsi_reset returns FAILED.
9218 9214 */
9219 9215 if (rval == FAILED)
9220 9216 rval = FALSE;
9221 9217 return (rval);
9222 9218 }
9223 9219
9224 9220 static int
9225 9221 mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl)
9226 9222 {
9227 9223 int rval = FALSE;
9228 9224 uint8_t config, disk;
9229 9225
9230 9226 ASSERT(mutex_owned(&mpt->m_mutex));
9231 9227
9232 9228 if (mptsas_debug_resets) {
9233 9229 mptsas_log(mpt, CE_WARN, "mptsas_do_scsi_reset: target=%d",
9234 9230 devhdl);
9235 9231 }
9236 9232
9237 9233 /*
9238 9234 * Issue a Target Reset message to the target specified but not to a
9239 9235 * disk making up a raid volume. Just look through the RAID config
9240 9236 * Phys Disk list of DevHandles. If the target's DevHandle is in this
9241 9237 * list, then don't reset this target.
9242 9238 */
9243 9239 for (config = 0; config < mpt->m_num_raid_configs; config++) {
9244 9240 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
9245 9241 if (devhdl == mpt->m_raidconfig[config].
9246 9242 m_physdisk_devhdl[disk]) {
9247 9243 return (TRUE);
9248 9244 }
9249 9245 }
9250 9246 }
9251 9247
9252 9248 rval = mptsas_ioc_task_management(mpt,
9253 9249 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, devhdl, 0, NULL, 0, 0);
9254 9250
9255 9251 mptsas_doneq_empty(mpt);
9256 9252 return (rval);
9257 9253 }
9258 9254
9259 9255 static int
9260 9256 mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
9261 9257 void (*callback)(caddr_t), caddr_t arg)
9262 9258 {
9263 9259 mptsas_t *mpt = ADDR2MPT(ap);
9264 9260
9265 9261 NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap->a_target));
9266 9262
9267 9263 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
9268 9264 &mpt->m_mutex, &mpt->m_reset_notify_listf));
9269 9265 }
9270 9266
9271 9267 static int
9272 9268 mptsas_get_name(struct scsi_device *sd, char *name, int len)
9273 9269 {
9274 9270 dev_info_t *lun_dip = NULL;
9275 9271
9276 9272 ASSERT(sd != NULL);
9277 9273 ASSERT(name != NULL);
9278 9274 lun_dip = sd->sd_dev;
9279 9275 ASSERT(lun_dip != NULL);
9280 9276
9281 9277 if (mptsas_name_child(lun_dip, name, len) == DDI_SUCCESS) {
9282 9278 return (1);
9283 9279 } else {
9284 9280 return (0);
9285 9281 }
9286 9282 }
9287 9283
9288 9284 static int
9289 9285 mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len)
9290 9286 {
9291 9287 return (mptsas_get_name(sd, name, len));
9292 9288 }
9293 9289
9294 9290 void
9295 9291 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
9296 9292 {
9297 9293
9298 9294 NDBG25(("mptsas_set_throttle: throttle=%x", what));
9299 9295
9300 9296 /*
9301 9297 * if the bus is draining/quiesced, no changes to the throttles
9302 9298 * are allowed. Not allowing change of throttles during draining
9303 9299 * limits error recovery but will reduce draining time
9304 9300 *
9305 9301 * all throttles should have been set to HOLD_THROTTLE
9306 9302 */
9307 9303 if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
9308 9304 return;
9309 9305 }
9310 9306
9311 9307 if (what == HOLD_THROTTLE) {
9312 9308 ptgt->m_t_throttle = HOLD_THROTTLE;
9313 9309 } else if (ptgt->m_reset_delay == 0) {
9314 9310 ptgt->m_t_throttle = what;
9315 9311 }
9316 9312 }
9317 9313
9318 9314 /*
9319 9315 * Clean up from a device reset.
9320 9316 * For the case of target reset, this function clears the waitq of all
9321 9317 * commands for a particular target. For the case of abort task set, this
9322 9318 * function clears the waitq of all commonds for a particular target/lun.
9323 9319 */
9324 9320 static void
9325 9321 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
9326 9322 {
9327 9323 mptsas_slots_t *slots = mpt->m_active;
9328 9324 mptsas_cmd_t *cmd, *next_cmd;
9329 9325 int slot;
9330 9326 uchar_t reason;
9331 9327 uint_t stat;
9332 9328 hrtime_t timestamp;
9333 9329
9334 9330 NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
9335 9331
9336 9332 timestamp = gethrtime();
9337 9333
9338 9334 /*
9339 9335 * Make sure the I/O Controller has flushed all cmds
9340 9336 * that are associated with this target for a target reset
9341 9337 * and target/lun for abort task set.
9342 9338 * Account for TM requests, which use the last SMID.
9343 9339 */
9344 9340 for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
9345 9341 if ((cmd = slots->m_slot[slot]) == NULL)
9346 9342 continue;
9347 9343 reason = CMD_RESET;
9348 9344 stat = STAT_DEV_RESET;
9349 9345 switch (tasktype) {
9350 9346 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
9351 9347 if (Tgt(cmd) == target) {
9352 9348 if (cmd->cmd_active_expiration <= timestamp) {
9353 9349 /*
9354 9350 * When timeout requested, propagate
9355 9351 * proper reason and statistics to
9356 9352 * target drivers.
9357 9353 */
9358 9354 reason = CMD_TIMEOUT;
9359 9355 stat |= STAT_TIMEOUT;
9360 9356 }
9361 9357 NDBG25(("mptsas_flush_target discovered non-"
9362 9358 "NULL cmd in slot %d, tasktype 0x%x", slot,
9363 9359 tasktype));
9364 9360 mptsas_dump_cmd(mpt, cmd);
9365 9361 mptsas_remove_cmd(mpt, cmd);
9366 9362 mptsas_set_pkt_reason(mpt, cmd, reason, stat);
9367 9363 mptsas_doneq_add(mpt, cmd);
9368 9364 }
9369 9365 break;
9370 9366 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9371 9367 reason = CMD_ABORTED;
9372 9368 stat = STAT_ABORTED;
9373 9369 /*FALLTHROUGH*/
9374 9370 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9375 9371 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9376 9372
9377 9373 NDBG25(("mptsas_flush_target discovered non-"
9378 9374 "NULL cmd in slot %d, tasktype 0x%x", slot,
9379 9375 tasktype));
9380 9376 mptsas_dump_cmd(mpt, cmd);
9381 9377 mptsas_remove_cmd(mpt, cmd);
9382 9378 mptsas_set_pkt_reason(mpt, cmd, reason,
9383 9379 stat);
9384 9380 mptsas_doneq_add(mpt, cmd);
9385 9381 }
9386 9382 break;
9387 9383 default:
9388 9384 break;
9389 9385 }
9390 9386 }
9391 9387
9392 9388 /*
9393 9389 * Flush the waitq and tx_waitq of this target's cmds
9394 9390 */
9395 9391 cmd = mpt->m_waitq;
9396 9392
9397 9393 reason = CMD_RESET;
9398 9394 stat = STAT_DEV_RESET;
9399 9395
9400 9396 switch (tasktype) {
9401 9397 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
9402 9398 while (cmd != NULL) {
9403 9399 next_cmd = cmd->cmd_linkp;
9404 9400 if (Tgt(cmd) == target) {
9405 9401 mptsas_waitq_delete(mpt, cmd);
9406 9402 mptsas_set_pkt_reason(mpt, cmd,
9407 9403 reason, stat);
9408 9404 mptsas_doneq_add(mpt, cmd);
9409 9405 }
9410 9406 cmd = next_cmd;
9411 9407 }
9412 9408 mutex_enter(&mpt->m_tx_waitq_mutex);
9413 9409 cmd = mpt->m_tx_waitq;
9414 9410 while (cmd != NULL) {
9415 9411 next_cmd = cmd->cmd_linkp;
9416 9412 if (Tgt(cmd) == target) {
9417 9413 mptsas_tx_waitq_delete(mpt, cmd);
9418 9414 mutex_exit(&mpt->m_tx_waitq_mutex);
9419 9415 mptsas_set_pkt_reason(mpt, cmd,
9420 9416 reason, stat);
9421 9417 mptsas_doneq_add(mpt, cmd);
9422 9418 mutex_enter(&mpt->m_tx_waitq_mutex);
9423 9419 }
9424 9420 cmd = next_cmd;
9425 9421 }
9426 9422 mutex_exit(&mpt->m_tx_waitq_mutex);
9427 9423 break;
9428 9424 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9429 9425 reason = CMD_ABORTED;
9430 9426 stat = STAT_ABORTED;
9431 9427 /*FALLTHROUGH*/
9432 9428 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9433 9429 while (cmd != NULL) {
9434 9430 next_cmd = cmd->cmd_linkp;
9435 9431 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9436 9432 mptsas_waitq_delete(mpt, cmd);
9437 9433 mptsas_set_pkt_reason(mpt, cmd,
9438 9434 reason, stat);
9439 9435 mptsas_doneq_add(mpt, cmd);
9440 9436 }
9441 9437 cmd = next_cmd;
9442 9438 }
9443 9439 mutex_enter(&mpt->m_tx_waitq_mutex);
9444 9440 cmd = mpt->m_tx_waitq;
9445 9441 while (cmd != NULL) {
9446 9442 next_cmd = cmd->cmd_linkp;
9447 9443 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9448 9444 mptsas_tx_waitq_delete(mpt, cmd);
9449 9445 mutex_exit(&mpt->m_tx_waitq_mutex);
9450 9446 mptsas_set_pkt_reason(mpt, cmd,
9451 9447 reason, stat);
9452 9448 mptsas_doneq_add(mpt, cmd);
9453 9449 mutex_enter(&mpt->m_tx_waitq_mutex);
9454 9450 }
9455 9451 cmd = next_cmd;
9456 9452 }
9457 9453 mutex_exit(&mpt->m_tx_waitq_mutex);
9458 9454 break;
9459 9455 default:
9460 9456 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
9461 9457 tasktype);
9462 9458 break;
9463 9459 }
9464 9460 }
9465 9461
9466 9462 /*
9467 9463 * Clean up hba state, abort all outstanding command and commands in waitq
9468 9464 * reset timeout of all targets.
9469 9465 */
9470 9466 static void
9471 9467 mptsas_flush_hba(mptsas_t *mpt)
9472 9468 {
9473 9469 mptsas_slots_t *slots = mpt->m_active;
9474 9470 mptsas_cmd_t *cmd;
9475 9471 int slot;
9476 9472
9477 9473 NDBG25(("mptsas_flush_hba"));
9478 9474
9479 9475 /*
9480 9476 * The I/O Controller should have already sent back
9481 9477 * all commands via the scsi I/O reply frame. Make
9482 9478 * sure all commands have been flushed.
9483 9479 * Account for TM request, which use the last SMID.
9484 9480 */
9485 9481 for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
9486 9482 if ((cmd = slots->m_slot[slot]) == NULL)
9487 9483 continue;
9488 9484
9489 9485 if (cmd->cmd_flags & CFLAG_CMDIOC) {
9490 9486 /*
9491 9487 * Need to make sure to tell everyone that might be
9492 9488 * waiting on this command that it's going to fail. If
9493 9489 * we get here, this command will never timeout because
9494 9490 * the active command table is going to be re-allocated,
9495 9491 * so there will be nothing to check against a time out.
9496 9492 * Instead, mark the command as failed due to reset.
9497 9493 */
9498 9494 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
9499 9495 STAT_BUS_RESET);
9500 9496 if ((cmd->cmd_flags &
9501 9497 (CFLAG_PASSTHRU | CFLAG_CONFIG | CFLAG_FW_DIAG))) {
9502 9498 cmd->cmd_flags |= CFLAG_FINISHED;
9503 9499 cv_broadcast(&mpt->m_passthru_cv);
9504 9500 cv_broadcast(&mpt->m_config_cv);
9505 9501 cv_broadcast(&mpt->m_fw_diag_cv);
9506 9502 }
9507 9503 continue;
9508 9504 }
9509 9505
9510 9506 NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
9511 9507 slot));
9512 9508 mptsas_dump_cmd(mpt, cmd);
9513 9509
9514 9510 mptsas_remove_cmd(mpt, cmd);
9515 9511 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9516 9512 mptsas_doneq_add(mpt, cmd);
9517 9513 }
9518 9514
9519 9515 /*
9520 9516 * Flush the waitq.
9521 9517 */
9522 9518 while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
9523 9519 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9524 9520 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9525 9521 (cmd->cmd_flags & CFLAG_CONFIG) ||
9526 9522 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9527 9523 cmd->cmd_flags |= CFLAG_FINISHED;
9528 9524 cv_broadcast(&mpt->m_passthru_cv);
9529 9525 cv_broadcast(&mpt->m_config_cv);
9530 9526 cv_broadcast(&mpt->m_fw_diag_cv);
9531 9527 } else {
9532 9528 mptsas_doneq_add(mpt, cmd);
9533 9529 }
9534 9530 }
9535 9531
9536 9532 /*
9537 9533 * Flush the tx_waitq
9538 9534 */
9539 9535 mutex_enter(&mpt->m_tx_waitq_mutex);
9540 9536 while ((cmd = mptsas_tx_waitq_rm(mpt)) != NULL) {
9541 9537 mutex_exit(&mpt->m_tx_waitq_mutex);
9542 9538 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9543 9539 mptsas_doneq_add(mpt, cmd);
9544 9540 mutex_enter(&mpt->m_tx_waitq_mutex);
9545 9541 }
9546 9542 mutex_exit(&mpt->m_tx_waitq_mutex);
9547 9543
9548 9544 /*
9549 9545 * Drain the taskqs prior to reallocating resources. The thread
9550 9546 * passing through here could be launched from either (dr)
9551 9547 * or (event) taskqs so only wait on the 'other' queue since
9552 9548 * waiting on 'this' queue is a deadlock condition.
9553 9549 */
9554 9550 mutex_exit(&mpt->m_mutex);
9555 9551 if (!taskq_member((taskq_t *)mpt->m_event_taskq, curthread))
9556 9552 ddi_taskq_wait(mpt->m_event_taskq);
9557 9553 if (!taskq_member((taskq_t *)mpt->m_dr_taskq, curthread))
9558 9554 ddi_taskq_wait(mpt->m_dr_taskq);
9559 9555
9560 9556 mutex_enter(&mpt->m_mutex);
9561 9557 }
9562 9558
9563 9559 /*
9564 9560 * set pkt_reason and OR in pkt_statistics flag
9565 9561 */
9566 9562 static void
9567 9563 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
9568 9564 uint_t stat)
9569 9565 {
9570 9566 #ifndef __lock_lint
9571 9567 _NOTE(ARGUNUSED(mpt))
9572 9568 #endif
9573 9569
9574 9570 NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
9575 9571 (void *)cmd, reason, stat));
9576 9572
9577 9573 if (cmd) {
9578 9574 if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
9579 9575 cmd->cmd_pkt->pkt_reason = reason;
9580 9576 }
9581 9577 cmd->cmd_pkt->pkt_statistics |= stat;
9582 9578 }
9583 9579 }
9584 9580
9585 9581 static void
9586 9582 mptsas_start_watch_reset_delay()
9587 9583 {
9588 9584 NDBG22(("mptsas_start_watch_reset_delay"));
9589 9585
9590 9586 mutex_enter(&mptsas_global_mutex);
9591 9587 if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
9592 9588 mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
9593 9589 drv_usectohz((clock_t)
9594 9590 MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
9595 9591 ASSERT(mptsas_reset_watch != NULL);
9596 9592 }
9597 9593 mutex_exit(&mptsas_global_mutex);
9598 9594 }
9599 9595
9600 9596 static void
9601 9597 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
9602 9598 {
9603 9599 mptsas_target_t *ptgt = NULL;
9604 9600
9605 9601 ASSERT(MUTEX_HELD(&mpt->m_mutex));
9606 9602
9607 9603 NDBG22(("mptsas_setup_bus_reset_delay"));
9608 9604 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9609 9605 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9610 9606 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9611 9607 ptgt->m_reset_delay = mpt->m_scsi_reset_delay;
9612 9608 }
9613 9609
9614 9610 mptsas_start_watch_reset_delay();
9615 9611 }
9616 9612
9617 9613 /*
9618 9614 * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
9619 9615 * mpt instance for active reset delays
9620 9616 */
9621 9617 static void
9622 9618 mptsas_watch_reset_delay(void *arg)
9623 9619 {
9624 9620 #ifndef __lock_lint
9625 9621 _NOTE(ARGUNUSED(arg))
9626 9622 #endif
9627 9623
9628 9624 mptsas_t *mpt;
9629 9625 int not_done = 0;
9630 9626
9631 9627 NDBG22(("mptsas_watch_reset_delay"));
9632 9628
9633 9629 mutex_enter(&mptsas_global_mutex);
9634 9630 mptsas_reset_watch = 0;
9635 9631 mutex_exit(&mptsas_global_mutex);
9636 9632 rw_enter(&mptsas_global_rwlock, RW_READER);
9637 9633 for (mpt = mptsas_head; mpt != NULL; mpt = mpt->m_next) {
9638 9634 if (mpt->m_tran == 0) {
9639 9635 continue;
9640 9636 }
9641 9637 mutex_enter(&mpt->m_mutex);
9642 9638 not_done += mptsas_watch_reset_delay_subr(mpt);
9643 9639 mutex_exit(&mpt->m_mutex);
9644 9640 }
9645 9641 rw_exit(&mptsas_global_rwlock);
9646 9642
9647 9643 if (not_done) {
9648 9644 mptsas_start_watch_reset_delay();
9649 9645 }
9650 9646 }
9651 9647
9652 9648 static int
9653 9649 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
9654 9650 {
9655 9651 int done = 0;
9656 9652 int restart = 0;
9657 9653 mptsas_target_t *ptgt = NULL;
9658 9654
9659 9655 NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
9660 9656
9661 9657 ASSERT(mutex_owned(&mpt->m_mutex));
9662 9658
9663 9659 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9664 9660 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9665 9661 if (ptgt->m_reset_delay != 0) {
9666 9662 ptgt->m_reset_delay -=
9667 9663 MPTSAS_WATCH_RESET_DELAY_TICK;
9668 9664 if (ptgt->m_reset_delay <= 0) {
9669 9665 ptgt->m_reset_delay = 0;
9670 9666 mptsas_set_throttle(mpt, ptgt,
9671 9667 MAX_THROTTLE);
9672 9668 restart++;
9673 9669 } else {
9674 9670 done = -1;
9675 9671 }
9676 9672 }
9677 9673 }
9678 9674
9679 9675 if (restart > 0) {
9680 9676 mptsas_restart_hba(mpt);
9681 9677 }
9682 9678 return (done);
9683 9679 }
9684 9680
9685 9681 #ifdef MPTSAS_TEST
9686 9682 static void
9687 9683 mptsas_test_reset(mptsas_t *mpt, int target)
9688 9684 {
9689 9685 mptsas_target_t *ptgt = NULL;
9690 9686
9691 9687 if (mptsas_rtest == target) {
9692 9688 if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
9693 9689 mptsas_rtest = -1;
9694 9690 }
9695 9691 if (mptsas_rtest == -1) {
9696 9692 NDBG22(("mptsas_test_reset success"));
9697 9693 }
9698 9694 }
9699 9695 }
9700 9696 #endif
9701 9697
9702 9698 /*
9703 9699 * abort handling:
9704 9700 *
9705 9701 * Notes:
9706 9702 * - if pkt is not NULL, abort just that command
9707 9703 * - if pkt is NULL, abort all outstanding commands for target
9708 9704 */
9709 9705 static int
9710 9706 mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
9711 9707 {
9712 9708 mptsas_t *mpt = ADDR2MPT(ap);
9713 9709 int rval;
9714 9710 mptsas_tgt_private_t *tgt_private;
9715 9711 int target, lun;
9716 9712
9717 9713 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
9718 9714 tran_tgt_private;
9719 9715 ASSERT(tgt_private != NULL);
9720 9716 target = tgt_private->t_private->m_devhdl;
9721 9717 lun = tgt_private->t_lun;
9722 9718
9723 9719 NDBG23(("mptsas_scsi_abort: target=%d.%d", target, lun));
9724 9720
9725 9721 mutex_enter(&mpt->m_mutex);
9726 9722 rval = mptsas_do_scsi_abort(mpt, target, lun, pkt);
9727 9723 mutex_exit(&mpt->m_mutex);
9728 9724 return (rval);
9729 9725 }
9730 9726
9731 9727 static int
9732 9728 mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, struct scsi_pkt *pkt)
9733 9729 {
9734 9730 mptsas_cmd_t *sp = NULL;
9735 9731 mptsas_slots_t *slots = mpt->m_active;
9736 9732 int rval = FALSE;
9737 9733
9738 9734 ASSERT(mutex_owned(&mpt->m_mutex));
9739 9735
9740 9736 /*
9741 9737 * Abort the command pkt on the target/lun in ap. If pkt is
9742 9738 * NULL, abort all outstanding commands on that target/lun.
9743 9739 * If you can abort them, return 1, else return 0.
9744 9740 * Each packet that's aborted should be sent back to the target
9745 9741 * driver through the callback routine, with pkt_reason set to
9746 9742 * CMD_ABORTED.
9747 9743 *
9748 9744 * abort cmd pkt on HBA hardware; clean out of outstanding
9749 9745 * command lists, etc.
9750 9746 */
9751 9747 if (pkt != NULL) {
9752 9748 /* abort the specified packet */
9753 9749 sp = PKT2CMD(pkt);
9754 9750
9755 9751 if (sp->cmd_queued) {
9756 9752 NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
9757 9753 (void *)sp));
9758 9754 mptsas_waitq_delete(mpt, sp);
9759 9755 mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
9760 9756 STAT_ABORTED);
9761 9757 mptsas_doneq_add(mpt, sp);
9762 9758 rval = TRUE;
9763 9759 goto done;
9764 9760 }
9765 9761
9766 9762 /*
9767 9763 * Have mpt firmware abort this command
9768 9764 */
9769 9765
9770 9766 if (slots->m_slot[sp->cmd_slot] != NULL) {
9771 9767 rval = mptsas_ioc_task_management(mpt,
9772 9768 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
9773 9769 lun, NULL, 0, 0);
9774 9770
9775 9771 /*
9776 9772 * The transport layer expects only TRUE and FALSE.
9777 9773 * Therefore, if mptsas_ioc_task_management returns
9778 9774 * FAILED we will return FALSE.
9779 9775 */
9780 9776 if (rval == FAILED)
9781 9777 rval = FALSE;
9782 9778 goto done;
9783 9779 }
9784 9780 }
9785 9781
9786 9782 /*
9787 9783 * If pkt is NULL then abort task set
9788 9784 */
9789 9785 rval = mptsas_ioc_task_management(mpt,
9790 9786 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0);
9791 9787
9792 9788 /*
9793 9789 * The transport layer expects only TRUE and FALSE.
9794 9790 * Therefore, if mptsas_ioc_task_management returns
9795 9791 * FAILED we will return FALSE.
9796 9792 */
9797 9793 if (rval == FAILED)
9798 9794 rval = FALSE;
9799 9795
9800 9796 #ifdef MPTSAS_TEST
9801 9797 if (rval && mptsas_test_stop) {
9802 9798 debug_enter("mptsas_do_scsi_abort");
9803 9799 }
9804 9800 #endif
9805 9801
9806 9802 done:
9807 9803 mptsas_doneq_empty(mpt);
9808 9804 return (rval);
9809 9805 }
9810 9806
9811 9807 /*
9812 9808 * capability handling:
9813 9809 * (*tran_getcap). Get the capability named, and return its value.
9814 9810 */
9815 9811 static int
9816 9812 mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
9817 9813 {
9818 9814 mptsas_t *mpt = ADDR2MPT(ap);
9819 9815 int ckey;
9820 9816 int rval = FALSE;
9821 9817
9822 9818 NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
9823 9819 ap->a_target, cap, tgtonly));
9824 9820
9825 9821 mutex_enter(&mpt->m_mutex);
9826 9822
9827 9823 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9828 9824 mutex_exit(&mpt->m_mutex);
9829 9825 return (UNDEFINED);
9830 9826 }
9831 9827
9832 9828 switch (ckey) {
9833 9829 case SCSI_CAP_DMA_MAX:
9834 9830 rval = (int)mpt->m_msg_dma_attr.dma_attr_maxxfer;
9835 9831 break;
9836 9832 case SCSI_CAP_ARQ:
9837 9833 rval = TRUE;
9838 9834 break;
9839 9835 case SCSI_CAP_MSG_OUT:
9840 9836 case SCSI_CAP_PARITY:
9841 9837 case SCSI_CAP_UNTAGGED_QING:
9842 9838 rval = TRUE;
9843 9839 break;
9844 9840 case SCSI_CAP_TAGGED_QING:
9845 9841 rval = TRUE;
9846 9842 break;
9847 9843 case SCSI_CAP_RESET_NOTIFICATION:
9848 9844 rval = TRUE;
9849 9845 break;
9850 9846 case SCSI_CAP_LINKED_CMDS:
9851 9847 rval = FALSE;
9852 9848 break;
9853 9849 case SCSI_CAP_QFULL_RETRIES:
9854 9850 rval = ((mptsas_tgt_private_t *)(ap->a_hba_tran->
9855 9851 tran_tgt_private))->t_private->m_qfull_retries;
9856 9852 break;
9857 9853 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9858 9854 rval = drv_hztousec(((mptsas_tgt_private_t *)
9859 9855 (ap->a_hba_tran->tran_tgt_private))->
9860 9856 t_private->m_qfull_retry_interval) / 1000;
9861 9857 break;
9862 9858 case SCSI_CAP_CDB_LEN:
9863 9859 rval = CDB_GROUP4;
9864 9860 break;
9865 9861 case SCSI_CAP_INTERCONNECT_TYPE:
9866 9862 rval = INTERCONNECT_SAS;
9867 9863 break;
9868 9864 case SCSI_CAP_TRAN_LAYER_RETRIES:
9869 9865 if (mpt->m_ioc_capabilities &
9870 9866 MPI2_IOCFACTS_CAPABILITY_TLR)
9871 9867 rval = TRUE;
9872 9868 else
9873 9869 rval = FALSE;
9874 9870 break;
9875 9871 default:
9876 9872 rval = UNDEFINED;
9877 9873 break;
9878 9874 }
9879 9875
9880 9876 NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
9881 9877
9882 9878 mutex_exit(&mpt->m_mutex);
9883 9879 return (rval);
9884 9880 }
9885 9881
9886 9882 /*
9887 9883 * (*tran_setcap). Set the capability named to the value given.
9888 9884 */
9889 9885 static int
9890 9886 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
9891 9887 {
9892 9888 mptsas_t *mpt = ADDR2MPT(ap);
9893 9889 int ckey;
9894 9890 int rval = FALSE;
9895 9891
9896 9892 NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
9897 9893 ap->a_target, cap, value, tgtonly));
9898 9894
9899 9895 if (!tgtonly) {
9900 9896 return (rval);
9901 9897 }
9902 9898
9903 9899 mutex_enter(&mpt->m_mutex);
9904 9900
9905 9901 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9906 9902 mutex_exit(&mpt->m_mutex);
9907 9903 return (UNDEFINED);
9908 9904 }
9909 9905
9910 9906 switch (ckey) {
9911 9907 case SCSI_CAP_DMA_MAX:
9912 9908 case SCSI_CAP_MSG_OUT:
9913 9909 case SCSI_CAP_PARITY:
9914 9910 case SCSI_CAP_INITIATOR_ID:
9915 9911 case SCSI_CAP_LINKED_CMDS:
9916 9912 case SCSI_CAP_UNTAGGED_QING:
9917 9913 case SCSI_CAP_RESET_NOTIFICATION:
9918 9914 /*
9919 9915 * None of these are settable via
9920 9916 * the capability interface.
9921 9917 */
9922 9918 break;
9923 9919 case SCSI_CAP_ARQ:
9924 9920 /*
9925 9921 * We cannot turn off arq so return false if asked to
9926 9922 */
9927 9923 if (value) {
9928 9924 rval = TRUE;
9929 9925 } else {
9930 9926 rval = FALSE;
9931 9927 }
9932 9928 break;
9933 9929 case SCSI_CAP_TAGGED_QING:
9934 9930 mptsas_set_throttle(mpt, ((mptsas_tgt_private_t *)
9935 9931 (ap->a_hba_tran->tran_tgt_private))->t_private,
9936 9932 MAX_THROTTLE);
9937 9933 rval = TRUE;
9938 9934 break;
9939 9935 case SCSI_CAP_QFULL_RETRIES:
9940 9936 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9941 9937 t_private->m_qfull_retries = (uchar_t)value;
9942 9938 rval = TRUE;
9943 9939 break;
9944 9940 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9945 9941 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9946 9942 t_private->m_qfull_retry_interval =
9947 9943 drv_usectohz(value * 1000);
9948 9944 rval = TRUE;
9949 9945 break;
9950 9946 default:
9951 9947 rval = UNDEFINED;
9952 9948 break;
9953 9949 }
9954 9950 mutex_exit(&mpt->m_mutex);
9955 9951 return (rval);
9956 9952 }
9957 9953
9958 9954 /*
9959 9955 * Utility routine for mptsas_ifsetcap/ifgetcap
9960 9956 */
9961 9957 /*ARGSUSED*/
9962 9958 static int
9963 9959 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp)
9964 9960 {
9965 9961 NDBG24(("mptsas_scsi_capchk: cap=%s", cap));
9966 9962
9967 9963 if (!cap)
9968 9964 return (FALSE);
9969 9965
9970 9966 *cidxp = scsi_hba_lookup_capstr(cap);
9971 9967 return (TRUE);
9972 9968 }
9973 9969
9974 9970 static int
9975 9971 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
9976 9972 {
9977 9973 mptsas_slots_t *old_active = mpt->m_active;
9978 9974 mptsas_slots_t *new_active;
9979 9975 size_t size;
9980 9976
9981 9977 /*
9982 9978 * if there are active commands, then we cannot
9983 9979 * change size of active slots array.
9984 9980 */
9985 9981 ASSERT(mpt->m_ncmds == 0);
9986 9982
9987 9983 size = MPTSAS_SLOTS_SIZE(mpt);
9988 9984 new_active = kmem_zalloc(size, flag);
9989 9985 if (new_active == NULL) {
9990 9986 NDBG1(("new active alloc failed"));
9991 9987 return (-1);
9992 9988 }
9993 9989 /*
9994 9990 * Since SMID 0 is reserved and the TM slot is reserved, the
9995 9991 * number of slots that can be used at any one time is
9996 9992 * m_max_requests - 2.
9997 9993 */
9998 9994 new_active->m_n_normal = (mpt->m_max_requests - 2);
9999 9995 new_active->m_size = size;
10000 9996 new_active->m_rotor = 1;
10001 9997 if (old_active)
10002 9998 mptsas_free_active_slots(mpt);
10003 9999 mpt->m_active = new_active;
10004 10000
10005 10001 return (0);
10006 10002 }
10007 10003
10008 10004 static void
10009 10005 mptsas_free_active_slots(mptsas_t *mpt)
10010 10006 {
10011 10007 mptsas_slots_t *active = mpt->m_active;
10012 10008 size_t size;
10013 10009
10014 10010 if (active == NULL)
10015 10011 return;
10016 10012 size = active->m_size;
10017 10013 kmem_free(active, size);
10018 10014 mpt->m_active = NULL;
10019 10015 }
10020 10016
10021 10017 /*
10022 10018 * Error logging, printing, and debug print routines.
10023 10019 */
10024 10020 static char *mptsas_label = "mpt_sas";
10025 10021
10026 10022 /*PRINTFLIKE3*/
10027 10023 void
10028 10024 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
10029 10025 {
10030 10026 dev_info_t *dev;
10031 10027 va_list ap;
10032 10028
10033 10029 if (mpt) {
10034 10030 dev = mpt->m_dip;
10035 10031 } else {
10036 10032 dev = 0;
10037 10033 }
10038 10034
10039 10035 mutex_enter(&mptsas_log_mutex);
10040 10036
10041 10037 va_start(ap, fmt);
10042 10038 (void) vsprintf(mptsas_log_buf, fmt, ap);
10043 10039 va_end(ap);
10044 10040
10045 10041 if (level == CE_CONT) {
10046 10042 scsi_log(dev, mptsas_label, level, "%s\n", mptsas_log_buf);
10047 10043 } else {
10048 10044 scsi_log(dev, mptsas_label, level, "%s", mptsas_log_buf);
10049 10045 }
10050 10046
10051 10047 mutex_exit(&mptsas_log_mutex);
10052 10048 }
10053 10049
10054 10050 #ifdef MPTSAS_DEBUG
10055 10051 /*
10056 10052 * Use a circular buffer to log messages to private memory.
10057 10053 * Increment idx atomically to minimize risk to miss lines.
10058 10054 * It's fast and does not hold up the proceedings too much.
10059 10055 */
10060 10056 static const size_t mptsas_dbglog_linecnt = MPTSAS_DBGLOG_LINECNT;
10061 10057 static const size_t mptsas_dbglog_linelen = MPTSAS_DBGLOG_LINELEN;
10062 10058 static char mptsas_dbglog_bufs[MPTSAS_DBGLOG_LINECNT][MPTSAS_DBGLOG_LINELEN];
10063 10059 static uint32_t mptsas_dbglog_idx = 0;
10064 10060
10065 10061 /*PRINTFLIKE1*/
10066 10062 void
10067 10063 mptsas_debug_log(char *fmt, ...)
10068 10064 {
10069 10065 va_list ap;
10070 10066 uint32_t idx;
10071 10067
10072 10068 idx = atomic_inc_32_nv(&mptsas_dbglog_idx) &
10073 10069 (mptsas_dbglog_linecnt - 1);
10074 10070
10075 10071 va_start(ap, fmt);
10076 10072 (void) vsnprintf(mptsas_dbglog_bufs[idx],
10077 10073 mptsas_dbglog_linelen, fmt, ap);
10078 10074 va_end(ap);
10079 10075 }
10080 10076
10081 10077 /*PRINTFLIKE1*/
10082 10078 void
10083 10079 mptsas_printf(char *fmt, ...)
10084 10080 {
10085 10081 dev_info_t *dev = 0;
10086 10082 va_list ap;
10087 10083
10088 10084 mutex_enter(&mptsas_log_mutex);
10089 10085
10090 10086 va_start(ap, fmt);
10091 10087 (void) vsprintf(mptsas_log_buf, fmt, ap);
10092 10088 va_end(ap);
10093 10089
10094 10090 #ifdef PROM_PRINTF
10095 10091 prom_printf("%s:\t%s\n", mptsas_label, mptsas_log_buf);
10096 10092 #else
10097 10093 scsi_log(dev, mptsas_label, CE_CONT, "!%s\n", mptsas_log_buf);
10098 10094 #endif
10099 10095 mutex_exit(&mptsas_log_mutex);
10100 10096 }
10101 10097 #endif
10102 10098
10103 10099 /*
10104 10100 * timeout handling
10105 10101 */
10106 10102 static void
10107 10103 mptsas_watch(void *arg)
10108 10104 {
10109 10105 #ifndef __lock_lint
10110 10106 _NOTE(ARGUNUSED(arg))
10111 10107 #endif
10112 10108
10113 10109 mptsas_t *mpt;
10114 10110 uint32_t doorbell;
10115 10111
10116 10112 NDBG30(("mptsas_watch"));
10117 10113
10118 10114 rw_enter(&mptsas_global_rwlock, RW_READER);
10119 10115 for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
10120 10116
10121 10117 mutex_enter(&mpt->m_mutex);
10122 10118
10123 10119 /* Skip device if not powered on */
10124 10120 if (mpt->m_options & MPTSAS_OPT_PM) {
10125 10121 if (mpt->m_power_level == PM_LEVEL_D0) {
10126 10122 (void) pm_busy_component(mpt->m_dip, 0);
10127 10123 mpt->m_busy = 1;
10128 10124 } else {
10129 10125 mutex_exit(&mpt->m_mutex);
10130 10126 continue;
10131 10127 }
10132 10128 }
10133 10129
10134 10130 /*
10135 10131 * Check if controller is in a FAULT state. If so, reset it.
10136 10132 */
10137 10133 doorbell = ddi_get32(mpt->m_datap, &mpt->m_reg->Doorbell);
10138 10134 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
10139 10135 doorbell &= MPI2_DOORBELL_DATA_MASK;
10140 10136 mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
10141 10137 "code: %04x", doorbell);
10142 10138 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
10143 10139 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
10144 10140 mptsas_log(mpt, CE_WARN, "Reset failed"
10145 10141 "after fault was detected");
10146 10142 }
10147 10143 }
10148 10144
10149 10145 /*
10150 10146 * For now, always call mptsas_watchsubr.
10151 10147 */
10152 10148 mptsas_watchsubr(mpt);
10153 10149
10154 10150 if (mpt->m_options & MPTSAS_OPT_PM) {
10155 10151 mpt->m_busy = 0;
10156 10152 (void) pm_idle_component(mpt->m_dip, 0);
10157 10153 }
10158 10154
10159 10155 mutex_exit(&mpt->m_mutex);
10160 10156 }
10161 10157 rw_exit(&mptsas_global_rwlock);
10162 10158
10163 10159 mutex_enter(&mptsas_global_mutex);
10164 10160 if (mptsas_timeouts_enabled)
10165 10161 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
10166 10162 mutex_exit(&mptsas_global_mutex);
10167 10163 }
10168 10164
10169 10165 static void
10170 10166 mptsas_watchsubr_tgt(mptsas_t *mpt, mptsas_target_t *ptgt, hrtime_t timestamp)
10171 10167 {
10172 10168 mptsas_cmd_t *cmd;
10173 10169
10174 10170 /*
10175 10171 * If we were draining due to a qfull condition,
10176 10172 * go back to full throttle.
10177 10173 */
10178 10174 if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
10179 10175 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
10180 10176 (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
10181 10177 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10182 10178 mptsas_restart_hba(mpt);
10183 10179 }
10184 10180
10185 10181 cmd = TAILQ_LAST(&ptgt->m_active_cmdq, mptsas_active_cmdq);
10186 10182 if (cmd == NULL)
10187 10183 return;
10188 10184
10189 10185 if (cmd->cmd_active_expiration <= timestamp) {
10190 10186 /*
10191 10187 * Earliest command timeout expired. Drain throttle.
10192 10188 */
10193 10189 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
10194 10190
10195 10191 /*
10196 10192 * Check for remaining commands.
10197 10193 */
10198 10194 cmd = TAILQ_FIRST(&ptgt->m_active_cmdq);
10199 10195 if (cmd->cmd_active_expiration > timestamp) {
10200 10196 /*
10201 10197 * Wait for remaining commands to complete or
10202 10198 * time out.
10203 10199 */
10204 10200 NDBG23(("command timed out, pending drain"));
10205 10201 return;
10206 10202 }
10207 10203
10208 10204 /*
10209 10205 * All command timeouts expired.
10210 10206 */
10211 10207 mptsas_log(mpt, CE_NOTE, "Timeout of %d seconds "
10212 10208 "expired with %d commands on target %d lun %d.",
10213 10209 cmd->cmd_pkt->pkt_time, ptgt->m_t_ncmds,
10214 10210 ptgt->m_devhdl, Lun(cmd));
10215 10211
10216 10212 mptsas_cmd_timeout(mpt, ptgt);
10217 10213 } else if (cmd->cmd_active_expiration <=
10218 10214 timestamp + (hrtime_t)mptsas_scsi_watchdog_tick * NANOSEC) {
10219 10215 NDBG23(("pending timeout"));
10220 10216 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
10221 10217 }
10222 10218 }
10223 10219
10224 10220 static void
10225 10221 mptsas_watchsubr(mptsas_t *mpt)
10226 10222 {
10227 10223 int i;
10228 10224 mptsas_cmd_t *cmd;
10229 10225 mptsas_target_t *ptgt = NULL;
10230 10226 hrtime_t timestamp = gethrtime();
10231 10227
10232 10228 ASSERT(MUTEX_HELD(&mpt->m_mutex));
10233 10229
10234 10230 NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
10235 10231
10236 10232 #ifdef MPTSAS_TEST
10237 10233 if (mptsas_enable_untagged) {
10238 10234 mptsas_test_untagged++;
10239 10235 }
10240 10236 #endif
10241 10237
10242 10238 /*
10243 10239 * Check for commands stuck in active slot
10244 10240 * Account for TM requests, which use the last SMID.
10245 10241 */
10246 10242 for (i = 0; i <= mpt->m_active->m_n_normal; i++) {
10247 10243 if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
10248 10244 if (cmd->cmd_active_expiration <= timestamp) {
10249 10245 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
10250 10246 /*
10251 10247 * There seems to be a command stuck
10252 10248 * in the active slot. Drain throttle.
10253 10249 */
10254 10250 mptsas_set_throttle(mpt,
10255 10251 cmd->cmd_tgt_addr,
10256 10252 DRAIN_THROTTLE);
10257 10253 } else if (cmd->cmd_flags &
10258 10254 (CFLAG_PASSTHRU | CFLAG_CONFIG |
10259 10255 CFLAG_FW_DIAG)) {
10260 10256 /*
10261 10257 * passthrough command timeout
10262 10258 */
10263 10259 cmd->cmd_flags |= (CFLAG_FINISHED |
10264 10260 CFLAG_TIMEOUT);
10265 10261 cv_broadcast(&mpt->m_passthru_cv);
10266 10262 cv_broadcast(&mpt->m_config_cv);
10267 10263 cv_broadcast(&mpt->m_fw_diag_cv);
10268 10264 }
10269 10265 }
10270 10266 }
10271 10267 }
10272 10268
10273 10269 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10274 10270 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10275 10271 mptsas_watchsubr_tgt(mpt, ptgt, timestamp);
10276 10272 }
10277 10273
10278 10274 for (ptgt = refhash_first(mpt->m_tmp_targets); ptgt != NULL;
10279 10275 ptgt = refhash_next(mpt->m_tmp_targets, ptgt)) {
10280 10276 mptsas_watchsubr_tgt(mpt, ptgt, timestamp);
10281 10277 }
10282 10278 }
10283 10279
10284 10280 /*
10285 10281 * timeout recovery
10286 10282 */
10287 10283 static void
10288 10284 mptsas_cmd_timeout(mptsas_t *mpt, mptsas_target_t *ptgt)
10289 10285 {
10290 10286 uint16_t devhdl;
10291 10287 uint64_t sas_wwn;
10292 10288 uint8_t phy;
10293 10289 char wwn_str[MPTSAS_WWN_STRLEN];
10294 10290
10295 10291 devhdl = ptgt->m_devhdl;
10296 10292 sas_wwn = ptgt->m_addr.mta_wwn;
10297 10293 phy = ptgt->m_phynum;
10298 10294 if (sas_wwn == 0) {
10299 10295 (void) sprintf(wwn_str, "p%x", phy);
10300 10296 } else {
10301 10297 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
10302 10298 }
10303 10299
10304 10300 NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
10305 10301 mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
10306 10302 "target %d %s, enclosure %u", devhdl, wwn_str,
10307 10303 ptgt->m_enclosure);
10308 10304
10309 10305 /*
10310 10306 * Abort all outstanding commands on the device.
10311 10307 */
10312 10308 NDBG29(("mptsas_cmd_timeout: device reset"));
10313 10309 if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
10314 10310 mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
10315 10311 "recovery failed!", devhdl);
10316 10312 }
10317 10313 }
10318 10314
10319 10315 /*
10320 10316 * Device / Hotplug control
10321 10317 */
10322 10318 static int
10323 10319 mptsas_scsi_quiesce(dev_info_t *dip)
10324 10320 {
10325 10321 mptsas_t *mpt;
10326 10322 scsi_hba_tran_t *tran;
10327 10323
10328 10324 tran = ddi_get_driver_private(dip);
10329 10325 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
10330 10326 return (-1);
10331 10327
10332 10328 return (mptsas_quiesce_bus(mpt));
10333 10329 }
10334 10330
10335 10331 static int
10336 10332 mptsas_scsi_unquiesce(dev_info_t *dip)
10337 10333 {
10338 10334 mptsas_t *mpt;
10339 10335 scsi_hba_tran_t *tran;
10340 10336
10341 10337 tran = ddi_get_driver_private(dip);
10342 10338 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
10343 10339 return (-1);
10344 10340
10345 10341 return (mptsas_unquiesce_bus(mpt));
10346 10342 }
10347 10343
10348 10344 static int
10349 10345 mptsas_quiesce_bus(mptsas_t *mpt)
10350 10346 {
10351 10347 mptsas_target_t *ptgt = NULL;
10352 10348
10353 10349 NDBG28(("mptsas_quiesce_bus"));
10354 10350 mutex_enter(&mpt->m_mutex);
10355 10351
10356 10352 /* Set all the throttles to zero */
10357 10353 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10358 10354 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10359 10355 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10360 10356 }
10361 10357
10362 10358 /* If there are any outstanding commands in the queue */
10363 10359 if (mpt->m_ncmds) {
10364 10360 mpt->m_softstate |= MPTSAS_SS_DRAINING;
10365 10361 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
10366 10362 mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
10367 10363 if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
10368 10364 /*
10369 10365 * Quiesce has been interrupted
10370 10366 */
10371 10367 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
10372 10368 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10373 10369 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10374 10370 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10375 10371 }
10376 10372 mptsas_restart_hba(mpt);
10377 10373 if (mpt->m_quiesce_timeid != 0) {
10378 10374 timeout_id_t tid = mpt->m_quiesce_timeid;
10379 10375 mpt->m_quiesce_timeid = 0;
10380 10376 mutex_exit(&mpt->m_mutex);
10381 10377 (void) untimeout(tid);
10382 10378 return (-1);
10383 10379 }
10384 10380 mutex_exit(&mpt->m_mutex);
10385 10381 return (-1);
10386 10382 } else {
10387 10383 /* Bus has been quiesced */
10388 10384 ASSERT(mpt->m_quiesce_timeid == 0);
10389 10385 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
10390 10386 mpt->m_softstate |= MPTSAS_SS_QUIESCED;
10391 10387 mutex_exit(&mpt->m_mutex);
10392 10388 return (0);
10393 10389 }
10394 10390 }
10395 10391 /* Bus was not busy - QUIESCED */
10396 10392 mutex_exit(&mpt->m_mutex);
10397 10393
10398 10394 return (0);
10399 10395 }
10400 10396
10401 10397 static int
10402 10398 mptsas_unquiesce_bus(mptsas_t *mpt)
10403 10399 {
10404 10400 mptsas_target_t *ptgt = NULL;
10405 10401
10406 10402 NDBG28(("mptsas_unquiesce_bus"));
10407 10403 mutex_enter(&mpt->m_mutex);
10408 10404 mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
10409 10405 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10410 10406 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10411 10407 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10412 10408 }
10413 10409 mptsas_restart_hba(mpt);
10414 10410 mutex_exit(&mpt->m_mutex);
10415 10411 return (0);
10416 10412 }
10417 10413
10418 10414 static void
10419 10415 mptsas_ncmds_checkdrain(void *arg)
10420 10416 {
10421 10417 mptsas_t *mpt = arg;
10422 10418 mptsas_target_t *ptgt = NULL;
10423 10419
10424 10420 mutex_enter(&mpt->m_mutex);
10425 10421 if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
10426 10422 mpt->m_quiesce_timeid = 0;
10427 10423 if (mpt->m_ncmds == 0) {
10428 10424 /* Command queue has been drained */
10429 10425 cv_signal(&mpt->m_cv);
10430 10426 } else {
10431 10427 /*
10432 10428 * The throttle may have been reset because
10433 10429 * of a SCSI bus reset
10434 10430 */
10435 10431 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10436 10432 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10437 10433 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10438 10434 }
10439 10435
10440 10436 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
10441 10437 mpt, (MPTSAS_QUIESCE_TIMEOUT *
10442 10438 drv_usectohz(1000000)));
10443 10439 }
10444 10440 }
10445 10441 mutex_exit(&mpt->m_mutex);
10446 10442 }
10447 10443
10448 10444 /*ARGSUSED*/
10449 10445 static void
10450 10446 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
10451 10447 {
10452 10448 int i;
10453 10449 uint8_t *cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
10454 10450 char buf[128];
10455 10451
10456 10452 buf[0] = '\0';
10457 10453 NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd,
10458 10454 Tgt(cmd), Lun(cmd)));
10459 10455 (void) sprintf(&buf[0], "\tcdb=[");
10460 10456 for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
10461 10457 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
10462 10458 }
10463 10459 (void) sprintf(&buf[strlen(buf)], " ]");
10464 10460 NDBG25(("?%s\n", buf));
10465 10461 NDBG25(("?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
10466 10462 cmd->cmd_pkt->pkt_flags, cmd->cmd_pkt->pkt_statistics,
10467 10463 cmd->cmd_pkt->pkt_state));
10468 10464 NDBG25(("?pkt_scbp=0x%x cmd_flags=0x%x\n", cmd->cmd_pkt->pkt_scbp ?
10469 10465 *(cmd->cmd_pkt->pkt_scbp) : 0, cmd->cmd_flags));
10470 10466 }
10471 10467
10472 10468 static void
10473 10469 mptsas_passthru_sge(ddi_acc_handle_t acc_hdl, mptsas_pt_request_t *pt,
10474 10470 pMpi2SGESimple64_t sgep)
10475 10471 {
10476 10472 uint32_t sge_flags;
10477 10473 uint32_t data_size, dataout_size;
10478 10474 ddi_dma_cookie_t data_cookie;
10479 10475 ddi_dma_cookie_t dataout_cookie;
10480 10476
10481 10477 data_size = pt->data_size;
10482 10478 dataout_size = pt->dataout_size;
10483 10479 data_cookie = pt->data_cookie;
10484 10480 dataout_cookie = pt->dataout_cookie;
10485 10481
10486 10482 if (dataout_size) {
10487 10483 sge_flags = dataout_size |
10488 10484 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10489 10485 MPI2_SGE_FLAGS_END_OF_BUFFER |
10490 10486 MPI2_SGE_FLAGS_HOST_TO_IOC |
10491 10487 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10492 10488 MPI2_SGE_FLAGS_SHIFT);
10493 10489 ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
10494 10490 ddi_put32(acc_hdl, &sgep->Address.Low,
10495 10491 (uint32_t)(dataout_cookie.dmac_laddress &
10496 10492 0xffffffffull));
10497 10493 ddi_put32(acc_hdl, &sgep->Address.High,
10498 10494 (uint32_t)(dataout_cookie.dmac_laddress
10499 10495 >> 32));
10500 10496 sgep++;
10501 10497 }
10502 10498 sge_flags = data_size;
10503 10499 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10504 10500 MPI2_SGE_FLAGS_LAST_ELEMENT |
10505 10501 MPI2_SGE_FLAGS_END_OF_BUFFER |
10506 10502 MPI2_SGE_FLAGS_END_OF_LIST |
10507 10503 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10508 10504 MPI2_SGE_FLAGS_SHIFT);
10509 10505 if (pt->direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10510 10506 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
10511 10507 MPI2_SGE_FLAGS_SHIFT);
10512 10508 } else {
10513 10509 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
10514 10510 MPI2_SGE_FLAGS_SHIFT);
10515 10511 }
10516 10512 ddi_put32(acc_hdl, &sgep->FlagsLength,
10517 10513 sge_flags);
10518 10514 ddi_put32(acc_hdl, &sgep->Address.Low,
10519 10515 (uint32_t)(data_cookie.dmac_laddress &
10520 10516 0xffffffffull));
10521 10517 ddi_put32(acc_hdl, &sgep->Address.High,
10522 10518 (uint32_t)(data_cookie.dmac_laddress >> 32));
10523 10519 }
10524 10520
10525 10521 static void
10526 10522 mptsas_passthru_ieee_sge(ddi_acc_handle_t acc_hdl, mptsas_pt_request_t *pt,
10527 10523 pMpi2IeeeSgeSimple64_t ieeesgep)
10528 10524 {
10529 10525 uint8_t sge_flags;
10530 10526 uint32_t data_size, dataout_size;
10531 10527 ddi_dma_cookie_t data_cookie;
10532 10528 ddi_dma_cookie_t dataout_cookie;
10533 10529
10534 10530 data_size = pt->data_size;
10535 10531 dataout_size = pt->dataout_size;
10536 10532 data_cookie = pt->data_cookie;
10537 10533 dataout_cookie = pt->dataout_cookie;
10538 10534
10539 10535 sge_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
10540 10536 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
10541 10537 if (dataout_size) {
10542 10538 ddi_put32(acc_hdl, &ieeesgep->Length, dataout_size);
10543 10539 ddi_put32(acc_hdl, &ieeesgep->Address.Low,
10544 10540 (uint32_t)(dataout_cookie.dmac_laddress &
10545 10541 0xffffffffull));
10546 10542 ddi_put32(acc_hdl, &ieeesgep->Address.High,
10547 10543 (uint32_t)(dataout_cookie.dmac_laddress >> 32));
10548 10544 ddi_put8(acc_hdl, &ieeesgep->Flags, sge_flags);
10549 10545 ieeesgep++;
10550 10546 }
10551 10547 sge_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
10552 10548 ddi_put32(acc_hdl, &ieeesgep->Length, data_size);
10553 10549 ddi_put32(acc_hdl, &ieeesgep->Address.Low,
10554 10550 (uint32_t)(data_cookie.dmac_laddress & 0xffffffffull));
10555 10551 ddi_put32(acc_hdl, &ieeesgep->Address.High,
10556 10552 (uint32_t)(data_cookie.dmac_laddress >> 32));
10557 10553 ddi_put8(acc_hdl, &ieeesgep->Flags, sge_flags);
10558 10554 }
10559 10555
10560 10556 static void
10561 10557 mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd)
10562 10558 {
10563 10559 caddr_t memp;
10564 10560 pMPI2RequestHeader_t request_hdrp;
10565 10561 struct scsi_pkt *pkt = cmd->cmd_pkt;
10566 10562 mptsas_pt_request_t *pt = pkt->pkt_ha_private;
10567 10563 uint32_t request_size;
10568 10564 uint32_t i;
10569 10565 uint64_t request_desc = 0;
10570 10566 uint8_t desc_type;
10571 10567 uint16_t SMID;
10572 10568 uint8_t *request, function;
10573 10569 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
10574 10570 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
10575 10571
10576 10572 desc_type = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10577 10573
10578 10574 request = pt->request;
10579 10575 request_size = pt->request_size;
10580 10576
10581 10577 SMID = cmd->cmd_slot;
10582 10578
10583 10579 /*
10584 10580 * Store the passthrough message in memory location
10585 10581 * corresponding to our slot number
10586 10582 */
10587 10583 memp = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
10588 10584 request_hdrp = (pMPI2RequestHeader_t)memp;
10589 10585 bzero(memp, mpt->m_req_frame_size);
10590 10586
10591 10587 for (i = 0; i < request_size; i++) {
10592 10588 bcopy(request + i, memp + i, 1);
10593 10589 }
10594 10590
10595 10591 NDBG15(("mptsas_start_passthru: Func 0x%x, MsgFlags 0x%x, "
10596 10592 "size=%d, in %d, out %d, SMID %d", request_hdrp->Function,
10597 10593 request_hdrp->MsgFlags, request_size,
10598 10594 pt->data_size, pt->dataout_size, SMID));
10599 10595
10600 10596 /*
10601 10597 * Add an SGE, even if the length is zero.
10602 10598 */
10603 10599 if (mpt->m_MPI25 && pt->simple == 0) {
10604 10600 mptsas_passthru_ieee_sge(acc_hdl, pt,
10605 10601 (pMpi2IeeeSgeSimple64_t)
10606 10602 ((uint8_t *)request_hdrp + pt->sgl_offset));
10607 10603 } else {
10608 10604 mptsas_passthru_sge(acc_hdl, pt,
10609 10605 (pMpi2SGESimple64_t)
10610 10606 ((uint8_t *)request_hdrp + pt->sgl_offset));
10611 10607 }
10612 10608
10613 10609 function = request_hdrp->Function;
10614 10610 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10615 10611 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10616 10612 pMpi2SCSIIORequest_t scsi_io_req;
10617 10613 caddr_t arsbuf;
10618 10614 uint8_t ars_size;
10619 10615 uint32_t ars_dmaaddrlow;
10620 10616
10621 10617 NDBG15(("mptsas_start_passthru: Is SCSI IO Req"));
10622 10618 scsi_io_req = (pMpi2SCSIIORequest_t)request_hdrp;
10623 10619
10624 10620 if (cmd->cmd_extrqslen != 0) {
10625 10621 /*
10626 10622 * Mapping of the buffer was done in
10627 10623 * mptsas_do_passthru().
10628 10624 * Calculate the DMA address with the same offset.
10629 10625 */
10630 10626 arsbuf = cmd->cmd_arq_buf;
10631 10627 ars_size = cmd->cmd_extrqslen;
10632 10628 ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
10633 10629 ((uintptr_t)arsbuf - (uintptr_t)mpt->m_req_sense)) &
10634 10630 0xffffffffu;
10635 10631 } else {
10636 10632 arsbuf = mpt->m_req_sense +
10637 10633 (mpt->m_req_sense_size * (SMID-1));
10638 10634 cmd->cmd_arq_buf = arsbuf;
10639 10635 ars_size = mpt->m_req_sense_size;
10640 10636 ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
10641 10637 (mpt->m_req_sense_size * (SMID-1))) &
10642 10638 0xffffffffu;
10643 10639 }
10644 10640 bzero(arsbuf, ars_size);
10645 10641
10646 10642 ddi_put8(acc_hdl, &scsi_io_req->SenseBufferLength, ars_size);
10647 10643 ddi_put32(acc_hdl, &scsi_io_req->SenseBufferLowAddress,
10648 10644 ars_dmaaddrlow);
10649 10645
10650 10646 /*
10651 10647 * Put SGE for data and data_out buffer at the end of
10652 10648 * scsi_io_request message header.(64 bytes in total)
10653 10649 * Set SGLOffset0 value
10654 10650 */
10655 10651 ddi_put8(acc_hdl, &scsi_io_req->SGLOffset0,
10656 10652 offsetof(MPI2_SCSI_IO_REQUEST, SGL) / 4);
10657 10653
10658 10654 /*
10659 10655 * Setup descriptor info. RAID passthrough must use the
10660 10656 * default request descriptor which is already set, so if this
10661 10657 * is a SCSI IO request, change the descriptor to SCSI IO.
10662 10658 */
10663 10659 if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
10664 10660 desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
10665 10661 request_desc = ((uint64_t)ddi_get16(acc_hdl,
10666 10662 &scsi_io_req->DevHandle) << 48);
10667 10663 }
10668 10664 (void) ddi_dma_sync(mpt->m_dma_req_sense_hdl, 0, 0,
10669 10665 DDI_DMA_SYNC_FORDEV);
10670 10666 }
10671 10667
10672 10668 /*
10673 10669 * We must wait till the message has been completed before
10674 10670 * beginning the next message so we wait for this one to
10675 10671 * finish.
10676 10672 */
10677 10673 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
10678 10674 request_desc |= (SMID << 16) + desc_type;
10679 10675 cmd->cmd_rfm = NULL;
10680 10676 MPTSAS_START_CMD(mpt, request_desc);
10681 10677 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
10682 10678 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
10683 10679 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10684 10680 }
10685 10681 }
10686 10682
10687 10683 typedef void (mptsas_pre_f)(mptsas_t *, mptsas_pt_request_t *);
10688 10684 static mptsas_pre_f mpi_pre_ioc_facts;
10689 10685 static mptsas_pre_f mpi_pre_port_facts;
10690 10686 static mptsas_pre_f mpi_pre_fw_download;
10691 10687 static mptsas_pre_f mpi_pre_fw_25_download;
10692 10688 static mptsas_pre_f mpi_pre_fw_upload;
10693 10689 static mptsas_pre_f mpi_pre_fw_25_upload;
10694 10690 static mptsas_pre_f mpi_pre_sata_passthrough;
10695 10691 static mptsas_pre_f mpi_pre_smp_passthrough;
10696 10692 static mptsas_pre_f mpi_pre_config;
10697 10693 static mptsas_pre_f mpi_pre_sas_io_unit_control;
10698 10694 static mptsas_pre_f mpi_pre_scsi_io_req;
10699 10695
10700 10696 /*
10701 10697 * Prepare the pt for a SAS2 FW_DOWNLOAD request.
10702 10698 */
10703 10699 static void
10704 10700 mpi_pre_fw_download(mptsas_t *mpt, mptsas_pt_request_t *pt)
10705 10701 {
10706 10702 pMpi2FWDownloadTCSGE_t tcsge;
10707 10703 pMpi2FWDownloadRequest req;
10708 10704
10709 10705 /*
10710 10706 * If SAS3, call separate function.
10711 10707 */
10712 10708 if (mpt->m_MPI25) {
10713 10709 mpi_pre_fw_25_download(mpt, pt);
10714 10710 return;
10715 10711 }
10716 10712
10717 10713 /*
10718 10714 * User requests should come in with the Transaction
10719 10715 * context element where the SGL will go. Putting the
10720 10716 * SGL after that seems to work, but don't really know
10721 10717 * why. Other drivers tend to create an extra SGL and
10722 10718 * refer to the TCE through that.
10723 10719 */
10724 10720 req = (pMpi2FWDownloadRequest)pt->request;
10725 10721 tcsge = (pMpi2FWDownloadTCSGE_t)&req->SGL;
10726 10722 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10727 10723 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10728 10724 mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10729 10725 }
10730 10726
10731 10727 pt->sgl_offset = offsetof(MPI2_FW_DOWNLOAD_REQUEST, SGL) +
10732 10728 sizeof (*tcsge);
10733 10729 if (pt->request_size != pt->sgl_offset) {
10734 10730 NDBG15(("mpi_pre_fw_download(): Incorrect req size, "
10735 10731 "0x%x, should be 0x%x, dataoutsz 0x%x",
10736 10732 (int)pt->request_size, (int)pt->sgl_offset,
10737 10733 (int)pt->dataout_size));
10738 10734 }
10739 10735 if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY)) {
10740 10736 NDBG15(("mpi_pre_fw_download(): Incorrect rep size, "
10741 10737 "0x%x, should be 0x%x", pt->data_size,
10742 10738 (int)sizeof (MPI2_FW_DOWNLOAD_REPLY)));
10743 10739 }
10744 10740 }
10745 10741
10746 10742 /*
10747 10743 * Prepare the pt for a SAS3 FW_DOWNLOAD request.
10748 10744 */
10749 10745 static void
10750 10746 mpi_pre_fw_25_download(mptsas_t *mpt, mptsas_pt_request_t *pt)
10751 10747 {
10752 10748 pMpi2FWDownloadTCSGE_t tcsge;
10753 10749 pMpi2FWDownloadRequest req2;
10754 10750 pMpi25FWDownloadRequest req25;
10755 10751
10756 10752 /*
10757 10753 * User requests should come in with the Transaction
10758 10754 * context element where the SGL will go. The new firmware
10759 10755 * Doesn't use TCE and has space in the main request for
10760 10756 * this information. So move to the right place.
10761 10757 */
10762 10758 req2 = (pMpi2FWDownloadRequest)pt->request;
10763 10759 req25 = (pMpi25FWDownloadRequest)pt->request;
10764 10760 tcsge = (pMpi2FWDownloadTCSGE_t)&req2->SGL;
10765 10761 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10766 10762 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10767 10763 mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10768 10764 }
10769 10765 req25->ImageOffset = tcsge->ImageOffset;
10770 10766 req25->ImageSize = tcsge->ImageSize;
10771 10767
10772 10768 pt->sgl_offset = offsetof(MPI25_FW_DOWNLOAD_REQUEST, SGL);
10773 10769 if (pt->request_size != pt->sgl_offset) {
10774 10770 NDBG15(("mpi_pre_fw_25_download(): Incorrect req size, "
10775 10771 "0x%x, should be 0x%x, dataoutsz 0x%x",
10776 10772 pt->request_size, pt->sgl_offset,
10777 10773 pt->dataout_size));
10778 10774 }
10779 10775 if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY)) {
10780 10776 NDBG15(("mpi_pre_fw_25_download(): Incorrect rep size, "
10781 10777 "0x%x, should be 0x%x", pt->data_size,
10782 10778 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10783 10779 }
10784 10780 }
10785 10781
10786 10782 /*
10787 10783 * Prepare the pt for a SAS2 FW_UPLOAD request.
10788 10784 */
10789 10785 static void
10790 10786 mpi_pre_fw_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10791 10787 {
10792 10788 pMpi2FWUploadTCSGE_t tcsge;
10793 10789 pMpi2FWUploadRequest_t req;
10794 10790
10795 10791 /*
10796 10792 * If SAS3, call separate function.
10797 10793 */
10798 10794 if (mpt->m_MPI25) {
10799 10795 mpi_pre_fw_25_upload(mpt, pt);
10800 10796 return;
10801 10797 }
10802 10798
10803 10799 /*
10804 10800 * User requests should come in with the Transaction
10805 10801 * context element where the SGL will go. Putting the
10806 10802 * SGL after that seems to work, but don't really know
10807 10803 * why. Other drivers tend to create an extra SGL and
10808 10804 * refer to the TCE through that.
10809 10805 */
10810 10806 req = (pMpi2FWUploadRequest_t)pt->request;
10811 10807 tcsge = (pMpi2FWUploadTCSGE_t)&req->SGL;
10812 10808 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10813 10809 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10814 10810 mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10815 10811 }
10816 10812
10817 10813 pt->sgl_offset = offsetof(MPI2_FW_UPLOAD_REQUEST, SGL) +
10818 10814 sizeof (*tcsge);
10819 10815 if (pt->request_size != pt->sgl_offset) {
10820 10816 NDBG15(("mpi_pre_fw_upload(): Incorrect req size, "
10821 10817 "0x%x, should be 0x%x, dataoutsz 0x%x",
10822 10818 pt->request_size, pt->sgl_offset,
10823 10819 pt->dataout_size));
10824 10820 }
10825 10821 if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY)) {
10826 10822 NDBG15(("mpi_pre_fw_upload(): Incorrect rep size, "
10827 10823 "0x%x, should be 0x%x", pt->data_size,
10828 10824 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10829 10825 }
10830 10826 }
10831 10827
10832 10828 /*
10833 10829 * Prepare the pt a SAS3 FW_UPLOAD request.
10834 10830 */
10835 10831 static void
10836 10832 mpi_pre_fw_25_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10837 10833 {
10838 10834 pMpi2FWUploadTCSGE_t tcsge;
10839 10835 pMpi2FWUploadRequest_t req2;
10840 10836 pMpi25FWUploadRequest_t req25;
10841 10837
10842 10838 /*
10843 10839 * User requests should come in with the Transaction
10844 10840 * context element where the SGL will go. The new firmware
10845 10841 * Doesn't use TCE and has space in the main request for
10846 10842 * this information. So move to the right place.
10847 10843 */
10848 10844 req2 = (pMpi2FWUploadRequest_t)pt->request;
10849 10845 req25 = (pMpi25FWUploadRequest_t)pt->request;
10850 10846 tcsge = (pMpi2FWUploadTCSGE_t)&req2->SGL;
10851 10847 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10852 10848 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10853 10849 mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10854 10850 }
10855 10851 req25->ImageOffset = tcsge->ImageOffset;
10856 10852 req25->ImageSize = tcsge->ImageSize;
10857 10853
10858 10854 pt->sgl_offset = offsetof(MPI25_FW_UPLOAD_REQUEST, SGL);
10859 10855 if (pt->request_size != pt->sgl_offset) {
10860 10856 NDBG15(("mpi_pre_fw_25_upload(): Incorrect req size, "
10861 10857 "0x%x, should be 0x%x, dataoutsz 0x%x",
10862 10858 pt->request_size, pt->sgl_offset,
10863 10859 pt->dataout_size));
10864 10860 }
10865 10861 if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY)) {
10866 10862 NDBG15(("mpi_pre_fw_25_upload(): Incorrect rep size, "
10867 10863 "0x%x, should be 0x%x", pt->data_size,
10868 10864 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10869 10865 }
10870 10866 }
10871 10867
10872 10868 /*
10873 10869 * Prepare the pt for an IOC_FACTS request.
10874 10870 */
10875 10871 static void
10876 10872 mpi_pre_ioc_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
10877 10873 {
10878 10874 #ifndef __lock_lint
10879 10875 _NOTE(ARGUNUSED(mpt))
10880 10876 #endif
10881 10877 if (pt->request_size != sizeof (MPI2_IOC_FACTS_REQUEST)) {
10882 10878 NDBG15(("mpi_pre_ioc_facts(): Incorrect req size, "
10883 10879 "0x%x, should be 0x%x, dataoutsz 0x%x",
10884 10880 pt->request_size,
10885 10881 (int)sizeof (MPI2_IOC_FACTS_REQUEST),
10886 10882 pt->dataout_size));
10887 10883 }
10888 10884 if (pt->data_size != sizeof (MPI2_IOC_FACTS_REPLY)) {
10889 10885 NDBG15(("mpi_pre_ioc_facts(): Incorrect rep size, "
10890 10886 "0x%x, should be 0x%x", pt->data_size,
10891 10887 (int)sizeof (MPI2_IOC_FACTS_REPLY)));
10892 10888 }
10893 10889 pt->sgl_offset = (uint16_t)pt->request_size;
10894 10890 }
10895 10891
10896 10892 /*
10897 10893 * Prepare the pt for a PORT_FACTS request.
10898 10894 */
10899 10895 static void
10900 10896 mpi_pre_port_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
10901 10897 {
10902 10898 #ifndef __lock_lint
10903 10899 _NOTE(ARGUNUSED(mpt))
10904 10900 #endif
10905 10901 if (pt->request_size != sizeof (MPI2_PORT_FACTS_REQUEST)) {
10906 10902 NDBG15(("mpi_pre_port_facts(): Incorrect req size, "
10907 10903 "0x%x, should be 0x%x, dataoutsz 0x%x",
10908 10904 pt->request_size,
10909 10905 (int)sizeof (MPI2_PORT_FACTS_REQUEST),
10910 10906 pt->dataout_size));
10911 10907 }
10912 10908 if (pt->data_size != sizeof (MPI2_PORT_FACTS_REPLY)) {
10913 10909 NDBG15(("mpi_pre_port_facts(): Incorrect rep size, "
10914 10910 "0x%x, should be 0x%x", pt->data_size,
10915 10911 (int)sizeof (MPI2_PORT_FACTS_REPLY)));
10916 10912 }
10917 10913 pt->sgl_offset = (uint16_t)pt->request_size;
10918 10914 }
10919 10915
10920 10916 /*
10921 10917 * Prepare pt for a SATA_PASSTHROUGH request.
10922 10918 */
10923 10919 static void
10924 10920 mpi_pre_sata_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
10925 10921 {
10926 10922 #ifndef __lock_lint
10927 10923 _NOTE(ARGUNUSED(mpt))
10928 10924 #endif
10929 10925 pt->sgl_offset = offsetof(MPI2_SATA_PASSTHROUGH_REQUEST, SGL);
10930 10926 if (pt->request_size != pt->sgl_offset) {
10931 10927 NDBG15(("mpi_pre_sata_passthrough(): Incorrect req size, "
10932 10928 "0x%x, should be 0x%x, dataoutsz 0x%x",
10933 10929 pt->request_size, pt->sgl_offset,
10934 10930 pt->dataout_size));
10935 10931 }
10936 10932 if (pt->data_size != sizeof (MPI2_SATA_PASSTHROUGH_REPLY)) {
10937 10933 NDBG15(("mpi_pre_sata_passthrough(): Incorrect rep size, "
10938 10934 "0x%x, should be 0x%x", pt->data_size,
10939 10935 (int)sizeof (MPI2_SATA_PASSTHROUGH_REPLY)));
10940 10936 }
10941 10937 }
10942 10938
10943 10939 static void
10944 10940 mpi_pre_smp_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
10945 10941 {
10946 10942 #ifndef __lock_lint
10947 10943 _NOTE(ARGUNUSED(mpt))
10948 10944 #endif
10949 10945 pt->sgl_offset = offsetof(MPI2_SMP_PASSTHROUGH_REQUEST, SGL);
10950 10946 if (pt->request_size != pt->sgl_offset) {
10951 10947 NDBG15(("mpi_pre_smp_passthrough(): Incorrect req size, "
10952 10948 "0x%x, should be 0x%x, dataoutsz 0x%x",
10953 10949 pt->request_size, pt->sgl_offset,
10954 10950 pt->dataout_size));
10955 10951 }
10956 10952 if (pt->data_size != sizeof (MPI2_SMP_PASSTHROUGH_REPLY)) {
10957 10953 NDBG15(("mpi_pre_smp_passthrough(): Incorrect rep size, "
10958 10954 "0x%x, should be 0x%x", pt->data_size,
10959 10955 (int)sizeof (MPI2_SMP_PASSTHROUGH_REPLY)));
10960 10956 }
10961 10957 }
10962 10958
10963 10959 /*
10964 10960 * Prepare pt for a CONFIG request.
10965 10961 */
10966 10962 static void
10967 10963 mpi_pre_config(mptsas_t *mpt, mptsas_pt_request_t *pt)
10968 10964 {
10969 10965 #ifndef __lock_lint
10970 10966 _NOTE(ARGUNUSED(mpt))
10971 10967 #endif
10972 10968 pt->sgl_offset = offsetof(MPI2_CONFIG_REQUEST, PageBufferSGE);
10973 10969 if (pt->request_size != pt->sgl_offset) {
10974 10970 NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
10975 10971 "should be 0x%x, dataoutsz 0x%x", pt->request_size,
10976 10972 pt->sgl_offset, pt->dataout_size));
10977 10973 }
10978 10974 if (pt->data_size != sizeof (MPI2_CONFIG_REPLY)) {
10979 10975 NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
10980 10976 "should be 0x%x", pt->data_size,
10981 10977 (int)sizeof (MPI2_CONFIG_REPLY)));
10982 10978 }
10983 10979 pt->simple = 1;
10984 10980 }
10985 10981
10986 10982 /*
10987 10983 * Prepare pt for a SCSI_IO_REQ request.
10988 10984 */
10989 10985 static void
10990 10986 mpi_pre_scsi_io_req(mptsas_t *mpt, mptsas_pt_request_t *pt)
10991 10987 {
10992 10988 #ifndef __lock_lint
10993 10989 _NOTE(ARGUNUSED(mpt))
10994 10990 #endif
10995 10991 pt->sgl_offset = offsetof(MPI2_SCSI_IO_REQUEST, SGL);
10996 10992 if (pt->request_size != pt->sgl_offset) {
10997 10993 NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
10998 10994 "should be 0x%x, dataoutsz 0x%x", pt->request_size,
10999 10995 pt->sgl_offset,
11000 10996 pt->dataout_size));
11001 10997 }
11002 10998 if (pt->data_size != sizeof (MPI2_SCSI_IO_REPLY)) {
11003 10999 NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
11004 11000 "should be 0x%x", pt->data_size,
11005 11001 (int)sizeof (MPI2_SCSI_IO_REPLY)));
11006 11002 }
11007 11003 }
11008 11004
11009 11005 /*
11010 11006 * Prepare the mptsas_cmd for a SAS_IO_UNIT_CONTROL request.
11011 11007 */
11012 11008 static void
11013 11009 mpi_pre_sas_io_unit_control(mptsas_t *mpt, mptsas_pt_request_t *pt)
11014 11010 {
11015 11011 #ifndef __lock_lint
11016 11012 _NOTE(ARGUNUSED(mpt))
11017 11013 #endif
11018 11014 pt->sgl_offset = (uint16_t)pt->request_size;
11019 11015 }
11020 11016
11021 11017 /*
11022 11018 * A set of functions to prepare an mptsas_cmd for the various
11023 11019 * supported requests.
11024 11020 */
11025 11021 static struct mptsas_func {
11026 11022 U8 Function;
11027 11023 char *Name;
11028 11024 mptsas_pre_f *f_pre;
11029 11025 } mptsas_func_list[] = {
11030 11026 { MPI2_FUNCTION_IOC_FACTS, "IOC_FACTS", mpi_pre_ioc_facts },
11031 11027 { MPI2_FUNCTION_PORT_FACTS, "PORT_FACTS", mpi_pre_port_facts },
11032 11028 { MPI2_FUNCTION_FW_DOWNLOAD, "FW_DOWNLOAD", mpi_pre_fw_download },
11033 11029 { MPI2_FUNCTION_FW_UPLOAD, "FW_UPLOAD", mpi_pre_fw_upload },
11034 11030 { MPI2_FUNCTION_SATA_PASSTHROUGH, "SATA_PASSTHROUGH",
11035 11031 mpi_pre_sata_passthrough },
11036 11032 { MPI2_FUNCTION_SMP_PASSTHROUGH, "SMP_PASSTHROUGH",
11037 11033 mpi_pre_smp_passthrough},
11038 11034 { MPI2_FUNCTION_SCSI_IO_REQUEST, "SCSI_IO_REQUEST",
11039 11035 mpi_pre_scsi_io_req},
11040 11036 { MPI2_FUNCTION_CONFIG, "CONFIG", mpi_pre_config},
11041 11037 { MPI2_FUNCTION_SAS_IO_UNIT_CONTROL, "SAS_IO_UNIT_CONTROL",
11042 11038 mpi_pre_sas_io_unit_control },
11043 11039 { 0xFF, NULL, NULL } /* list end */
11044 11040 };
11045 11041
11046 11042 static void
11047 11043 mptsas_prep_sgl_offset(mptsas_t *mpt, mptsas_pt_request_t *pt)
11048 11044 {
11049 11045 pMPI2RequestHeader_t hdr;
11050 11046 struct mptsas_func *f;
11051 11047
11052 11048 hdr = (pMPI2RequestHeader_t)pt->request;
11053 11049
11054 11050 for (f = mptsas_func_list; f->f_pre != NULL; f++) {
11055 11051 if (hdr->Function == f->Function) {
11056 11052 f->f_pre(mpt, pt);
11057 11053 NDBG15(("mptsas_prep_sgl_offset: Function %s,"
11058 11054 " sgl_offset 0x%x", f->Name,
11059 11055 pt->sgl_offset));
11060 11056 return;
11061 11057 }
11062 11058 }
11063 11059 NDBG15(("mptsas_prep_sgl_offset: Unknown Function 0x%02x,"
11064 11060 " returning req_size 0x%x for sgl_offset",
11065 11061 hdr->Function, pt->request_size));
11066 11062 pt->sgl_offset = (uint16_t)pt->request_size;
11067 11063 }
11068 11064
11069 11065
11070 11066 static int
11071 11067 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
11072 11068 uint8_t *data, uint32_t request_size, uint32_t reply_size,
11073 11069 uint32_t data_size, uint32_t direction, uint8_t *dataout,
11074 11070 uint32_t dataout_size, short timeout, int mode)
11075 11071 {
11076 11072 mptsas_pt_request_t pt;
11077 11073 mptsas_dma_alloc_state_t data_dma_state;
11078 11074 mptsas_dma_alloc_state_t dataout_dma_state;
11079 11075 caddr_t memp;
11080 11076 mptsas_cmd_t *cmd = NULL;
11081 11077 struct scsi_pkt *pkt;
11082 11078 uint32_t reply_len = 0, sense_len = 0;
11083 11079 pMPI2RequestHeader_t request_hdrp;
11084 11080 pMPI2RequestHeader_t request_msg;
11085 11081 pMPI2DefaultReply_t reply_msg;
11086 11082 Mpi2SCSIIOReply_t rep_msg;
11087 11083 int rvalue;
11088 11084 int i, status = 0, pt_flags = 0, rv = 0;
11089 11085 uint8_t function;
11090 11086
11091 11087 ASSERT(mutex_owned(&mpt->m_mutex));
11092 11088
11093 11089 reply_msg = (pMPI2DefaultReply_t)(&rep_msg);
11094 11090 bzero(reply_msg, sizeof (MPI2_DEFAULT_REPLY));
11095 11091 request_msg = kmem_zalloc(request_size, KM_SLEEP);
11096 11092
11097 11093 mutex_exit(&mpt->m_mutex);
11098 11094 /*
11099 11095 * copy in the request buffer since it could be used by
11100 11096 * another thread when the pt request into waitq
11101 11097 */
11102 11098 if (ddi_copyin(request, request_msg, request_size, mode)) {
11103 11099 mutex_enter(&mpt->m_mutex);
11104 11100 status = EFAULT;
11105 11101 mptsas_log(mpt, CE_WARN, "failed to copy request data");
11106 11102 goto out;
11107 11103 }
11108 11104 NDBG27(("mptsas_do_passthru: mode 0x%x, size 0x%x, Func 0x%x",
11109 11105 mode, request_size, request_msg->Function));
11110 11106 mutex_enter(&mpt->m_mutex);
11111 11107
11112 11108 function = request_msg->Function;
11113 11109 if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
11114 11110 pMpi2SCSITaskManagementRequest_t task;
11115 11111 task = (pMpi2SCSITaskManagementRequest_t)request_msg;
11116 11112 mptsas_setup_bus_reset_delay(mpt);
11117 11113 rv = mptsas_ioc_task_management(mpt, task->TaskType,
11118 11114 task->DevHandle, (int)task->LUN[1], reply, reply_size,
11119 11115 mode);
11120 11116
11121 11117 if (rv != TRUE) {
11122 11118 status = EIO;
11123 11119 mptsas_log(mpt, CE_WARN, "task management failed");
11124 11120 }
11125 11121 goto out;
11126 11122 }
11127 11123
11128 11124 if (data_size != 0) {
11129 11125 data_dma_state.size = data_size;
11130 11126 if (mptsas_dma_alloc(mpt, &data_dma_state) != DDI_SUCCESS) {
11131 11127 status = ENOMEM;
11132 11128 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
11133 11129 "resource");
11134 11130 goto out;
11135 11131 }
11136 11132 pt_flags |= MPTSAS_DATA_ALLOCATED;
11137 11133 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
11138 11134 mutex_exit(&mpt->m_mutex);
11139 11135 for (i = 0; i < data_size; i++) {
11140 11136 if (ddi_copyin(data + i, (uint8_t *)
11141 11137 data_dma_state.memp + i, 1, mode)) {
11142 11138 mutex_enter(&mpt->m_mutex);
11143 11139 status = EFAULT;
11144 11140 mptsas_log(mpt, CE_WARN, "failed to "
11145 11141 "copy read data");
11146 11142 goto out;
11147 11143 }
11148 11144 }
11149 11145 mutex_enter(&mpt->m_mutex);
11150 11146 }
11151 11147 } else {
11152 11148 bzero(&data_dma_state, sizeof (data_dma_state));
11153 11149 }
11154 11150
11155 11151 if (dataout_size != 0) {
11156 11152 dataout_dma_state.size = dataout_size;
11157 11153 if (mptsas_dma_alloc(mpt, &dataout_dma_state) != DDI_SUCCESS) {
11158 11154 status = ENOMEM;
11159 11155 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
11160 11156 "resource");
11161 11157 goto out;
11162 11158 }
11163 11159 pt_flags |= MPTSAS_DATAOUT_ALLOCATED;
11164 11160 mutex_exit(&mpt->m_mutex);
11165 11161 for (i = 0; i < dataout_size; i++) {
11166 11162 if (ddi_copyin(dataout + i, (uint8_t *)
11167 11163 dataout_dma_state.memp + i, 1, mode)) {
11168 11164 mutex_enter(&mpt->m_mutex);
11169 11165 mptsas_log(mpt, CE_WARN, "failed to copy out"
11170 11166 " data");
11171 11167 status = EFAULT;
11172 11168 goto out;
11173 11169 }
11174 11170 }
11175 11171 mutex_enter(&mpt->m_mutex);
11176 11172 } else {
11177 11173 bzero(&dataout_dma_state, sizeof (dataout_dma_state));
11178 11174 }
11179 11175
11180 11176 if ((rvalue = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11181 11177 status = EAGAIN;
11182 11178 mptsas_log(mpt, CE_NOTE, "event ack command pool is full");
11183 11179 goto out;
11184 11180 }
11185 11181 pt_flags |= MPTSAS_REQUEST_POOL_CMD;
11186 11182
11187 11183 bzero((caddr_t)cmd, sizeof (*cmd));
11188 11184 bzero((caddr_t)pkt, scsi_pkt_size());
11189 11185 bzero((caddr_t)&pt, sizeof (pt));
11190 11186
11191 11187 cmd->ioc_cmd_slot = (uint32_t)(rvalue);
11192 11188
11193 11189 pt.request = (uint8_t *)request_msg;
11194 11190 pt.direction = direction;
11195 11191 pt.simple = 0;
11196 11192 pt.request_size = request_size;
11197 11193 pt.data_size = data_size;
11198 11194 pt.dataout_size = dataout_size;
11199 11195 pt.data_cookie = data_dma_state.cookie;
11200 11196 pt.dataout_cookie = dataout_dma_state.cookie;
11201 11197 mptsas_prep_sgl_offset(mpt, &pt);
11202 11198
11203 11199 /*
11204 11200 * Form a blank cmd/pkt to store the acknowledgement message
11205 11201 */
11206 11202 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb[0];
11207 11203 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
11208 11204 pkt->pkt_ha_private = (opaque_t)&pt;
11209 11205 pkt->pkt_flags = FLAG_HEAD;
11210 11206 pkt->pkt_time = timeout;
11211 11207 cmd->cmd_pkt = pkt;
11212 11208 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_PASSTHRU;
11213 11209
11214 11210 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
11215 11211 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
11216 11212 uint8_t com, cdb_group_id;
11217 11213 boolean_t ret;
11218 11214
11219 11215 pkt->pkt_cdbp = ((pMpi2SCSIIORequest_t)request_msg)->CDB.CDB32;
11220 11216 com = pkt->pkt_cdbp[0];
11221 11217 cdb_group_id = CDB_GROUPID(com);
11222 11218 switch (cdb_group_id) {
11223 11219 case CDB_GROUPID_0: cmd->cmd_cdblen = CDB_GROUP0; break;
11224 11220 case CDB_GROUPID_1: cmd->cmd_cdblen = CDB_GROUP1; break;
11225 11221 case CDB_GROUPID_2: cmd->cmd_cdblen = CDB_GROUP2; break;
11226 11222 case CDB_GROUPID_4: cmd->cmd_cdblen = CDB_GROUP4; break;
11227 11223 case CDB_GROUPID_5: cmd->cmd_cdblen = CDB_GROUP5; break;
11228 11224 default:
11229 11225 NDBG27(("mptsas_do_passthru: SCSI_IO, reserved "
11230 11226 "CDBGROUP 0x%x requested!", cdb_group_id));
11231 11227 break;
11232 11228 }
11233 11229
11234 11230 reply_len = sizeof (MPI2_SCSI_IO_REPLY);
11235 11231 sense_len = reply_size - reply_len;
11236 11232 ret = mptsas_cmdarqsize(mpt, cmd, sense_len, KM_SLEEP);
11237 11233 VERIFY(ret == B_TRUE);
11238 11234 } else {
11239 11235 reply_len = reply_size;
11240 11236 sense_len = 0;
11241 11237 }
11242 11238
11243 11239 NDBG27(("mptsas_do_passthru: %s, dsz 0x%x, dosz 0x%x, replen 0x%x, "
11244 11240 "snslen 0x%x",
11245 11241 (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE)?"Write":"Read",
11246 11242 data_size, dataout_size, reply_len, sense_len));
11247 11243
11248 11244 /*
11249 11245 * Save the command in a slot
11250 11246 */
11251 11247 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11252 11248 /*
11253 11249 * Once passthru command get slot, set cmd_flags
11254 11250 * CFLAG_PREPARED.
11255 11251 */
11256 11252 cmd->cmd_flags |= CFLAG_PREPARED;
11257 11253 mptsas_start_passthru(mpt, cmd);
11258 11254 } else {
11259 11255 mptsas_waitq_add(mpt, cmd);
11260 11256 }
11261 11257
11262 11258 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11263 11259 cv_wait(&mpt->m_passthru_cv, &mpt->m_mutex);
11264 11260 }
11265 11261
11266 11262 NDBG27(("mptsas_do_passthru: Cmd complete, flags 0x%x, rfm 0x%x "
11267 11263 "pktreason 0x%x", cmd->cmd_flags, cmd->cmd_rfm,
11268 11264 pkt->pkt_reason));
11269 11265
11270 11266 if (cmd->cmd_flags & CFLAG_PREPARED) {
11271 11267 memp = mpt->m_req_frame + (mpt->m_req_frame_size *
11272 11268 cmd->cmd_slot);
11273 11269 request_hdrp = (pMPI2RequestHeader_t)memp;
11274 11270 }
11275 11271
11276 11272 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11277 11273 status = ETIMEDOUT;
11278 11274 mptsas_log(mpt, CE_WARN, "passthrough command timeout");
11279 11275 pt_flags |= MPTSAS_CMD_TIMEOUT;
11280 11276 goto out;
11281 11277 }
11282 11278
11283 11279 if (cmd->cmd_rfm) {
11284 11280 /*
11285 11281 * cmd_rfm is zero means the command reply is a CONTEXT
11286 11282 * reply and no PCI Write to post the free reply SMFA
11287 11283 * because no reply message frame is used.
11288 11284 * cmd_rfm is non-zero means the reply is a ADDRESS
11289 11285 * reply and reply message frame is used.
11290 11286 */
11291 11287 pt_flags |= MPTSAS_ADDRESS_REPLY;
11292 11288 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11293 11289 DDI_DMA_SYNC_FORCPU);
11294 11290 reply_msg = (pMPI2DefaultReply_t)
11295 11291 (mpt->m_reply_frame + (cmd->cmd_rfm -
11296 11292 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
11297 11293 }
11298 11294
11299 11295 mptsas_fma_check(mpt, cmd);
11300 11296 if (pkt->pkt_reason == CMD_TRAN_ERR) {
11301 11297 status = EAGAIN;
11302 11298 mptsas_log(mpt, CE_WARN, "passthru fma error");
11303 11299 goto out;
11304 11300 }
11305 11301 if (pkt->pkt_reason == CMD_RESET) {
11306 11302 status = EAGAIN;
11307 11303 mptsas_log(mpt, CE_WARN, "ioc reset abort passthru");
11308 11304 goto out;
11309 11305 }
11310 11306
11311 11307 if (pkt->pkt_reason == CMD_INCOMPLETE) {
11312 11308 status = EIO;
11313 11309 mptsas_log(mpt, CE_WARN, "passthrough command incomplete");
11314 11310 goto out;
11315 11311 }
11316 11312
11317 11313 mutex_exit(&mpt->m_mutex);
11318 11314 if (cmd->cmd_flags & CFLAG_PREPARED) {
11319 11315 function = request_hdrp->Function;
11320 11316 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
11321 11317 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
11322 11318 reply_len = sizeof (MPI2_SCSI_IO_REPLY);
11323 11319 sense_len = cmd->cmd_extrqslen ?
11324 11320 min(sense_len, cmd->cmd_extrqslen) :
11325 11321 min(sense_len, cmd->cmd_rqslen);
11326 11322 } else {
11327 11323 reply_len = reply_size;
11328 11324 sense_len = 0;
11329 11325 }
11330 11326
11331 11327 for (i = 0; i < reply_len; i++) {
11332 11328 if (ddi_copyout((uint8_t *)reply_msg + i, reply + i, 1,
11333 11329 mode)) {
11334 11330 mutex_enter(&mpt->m_mutex);
11335 11331 status = EFAULT;
11336 11332 mptsas_log(mpt, CE_WARN, "failed to copy out "
11337 11333 "reply data");
11338 11334 goto out;
11339 11335 }
11340 11336 }
11341 11337 for (i = 0; i < sense_len; i++) {
11342 11338 if (ddi_copyout((uint8_t *)request_hdrp + 64 + i,
11343 11339 reply + reply_len + i, 1, mode)) {
11344 11340 mutex_enter(&mpt->m_mutex);
11345 11341 status = EFAULT;
11346 11342 mptsas_log(mpt, CE_WARN, "failed to copy out "
11347 11343 "sense data");
11348 11344 goto out;
11349 11345 }
11350 11346 }
11351 11347 }
11352 11348
11353 11349 if (data_size) {
11354 11350 if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) {
11355 11351 (void) ddi_dma_sync(data_dma_state.handle, 0, 0,
11356 11352 DDI_DMA_SYNC_FORCPU);
11357 11353 for (i = 0; i < data_size; i++) {
11358 11354 if (ddi_copyout((uint8_t *)(
11359 11355 data_dma_state.memp + i), data + i, 1,
11360 11356 mode)) {
11361 11357 mutex_enter(&mpt->m_mutex);
11362 11358 status = EFAULT;
11363 11359 mptsas_log(mpt, CE_WARN, "failed to "
11364 11360 "copy out the reply data");
11365 11361 goto out;
11366 11362 }
11367 11363 }
11368 11364 }
11369 11365 }
11370 11366 mutex_enter(&mpt->m_mutex);
11371 11367 out:
11372 11368 /*
11373 11369 * Put the reply frame back on the free queue, increment the free
11374 11370 * index, and write the new index to the free index register. But only
11375 11371 * if this reply is an ADDRESS reply.
11376 11372 */
11377 11373 if (pt_flags & MPTSAS_ADDRESS_REPLY) {
11378 11374 ddi_put32(mpt->m_acc_free_queue_hdl,
11379 11375 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11380 11376 cmd->cmd_rfm);
11381 11377 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11382 11378 DDI_DMA_SYNC_FORDEV);
11383 11379 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11384 11380 mpt->m_free_index = 0;
11385 11381 }
11386 11382 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11387 11383 mpt->m_free_index);
11388 11384 }
11389 11385 if (cmd) {
11390 11386 if (cmd->cmd_extrqslen != 0) {
11391 11387 rmfree(mpt->m_erqsense_map, cmd->cmd_extrqschunks,
11392 11388 cmd->cmd_extrqsidx + 1);
11393 11389 }
11394 11390 if (cmd->cmd_flags & CFLAG_PREPARED) {
11395 11391 mptsas_remove_cmd(mpt, cmd);
11396 11392 pt_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11397 11393 }
11398 11394 }
11399 11395 if (pt_flags & MPTSAS_REQUEST_POOL_CMD)
11400 11396 mptsas_return_to_pool(mpt, cmd);
11401 11397 if (pt_flags & MPTSAS_DATA_ALLOCATED) {
11402 11398 if (mptsas_check_dma_handle(data_dma_state.handle) !=
11403 11399 DDI_SUCCESS) {
11404 11400 ddi_fm_service_impact(mpt->m_dip,
11405 11401 DDI_SERVICE_UNAFFECTED);
11406 11402 status = EFAULT;
11407 11403 }
11408 11404 mptsas_dma_free(&data_dma_state);
11409 11405 }
11410 11406 if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
11411 11407 if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
11412 11408 DDI_SUCCESS) {
11413 11409 ddi_fm_service_impact(mpt->m_dip,
11414 11410 DDI_SERVICE_UNAFFECTED);
11415 11411 status = EFAULT;
11416 11412 }
11417 11413 mptsas_dma_free(&dataout_dma_state);
11418 11414 }
11419 11415 if (pt_flags & MPTSAS_CMD_TIMEOUT) {
11420 11416 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
11421 11417 mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
11422 11418 }
11423 11419 }
11424 11420 if (request_msg)
11425 11421 kmem_free(request_msg, request_size);
11426 11422 NDBG27(("mptsas_do_passthru: Done status 0x%x", status));
11427 11423
11428 11424 return (status);
11429 11425 }
11430 11426
11431 11427 static int
11432 11428 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
11433 11429 {
11434 11430 /*
11435 11431 * If timeout is 0, set timeout to default of 60 seconds.
11436 11432 */
11437 11433 if (data->Timeout == 0) {
11438 11434 data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
11439 11435 }
11440 11436
11441 11437 if (((data->DataSize == 0) &&
11442 11438 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_NONE)) ||
11443 11439 ((data->DataSize != 0) &&
11444 11440 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_READ) ||
11445 11441 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_WRITE) ||
11446 11442 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) &&
11447 11443 (data->DataOutSize != 0))))) {
11448 11444 if (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) {
11449 11445 data->DataDirection = MPTSAS_PASS_THRU_DIRECTION_READ;
11450 11446 } else {
11451 11447 data->DataOutSize = 0;
11452 11448 }
11453 11449 /*
11454 11450 * Send passthru request messages
11455 11451 */
11456 11452 return (mptsas_do_passthru(mpt,
11457 11453 (uint8_t *)((uintptr_t)data->PtrRequest),
11458 11454 (uint8_t *)((uintptr_t)data->PtrReply),
11459 11455 (uint8_t *)((uintptr_t)data->PtrData),
11460 11456 data->RequestSize, data->ReplySize,
11461 11457 data->DataSize, data->DataDirection,
11462 11458 (uint8_t *)((uintptr_t)data->PtrDataOut),
11463 11459 data->DataOutSize, data->Timeout, mode));
11464 11460 } else {
11465 11461 return (EINVAL);
11466 11462 }
11467 11463 }
11468 11464
11469 11465 static uint8_t
11470 11466 mptsas_get_fw_diag_buffer_number(mptsas_t *mpt, uint32_t unique_id)
11471 11467 {
11472 11468 uint8_t index;
11473 11469
11474 11470 for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
11475 11471 if (mpt->m_fw_diag_buffer_list[index].unique_id == unique_id) {
11476 11472 return (index);
11477 11473 }
11478 11474 }
11479 11475
11480 11476 return (MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND);
11481 11477 }
11482 11478
11483 11479 static void
11484 11480 mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd)
11485 11481 {
11486 11482 pMpi2DiagBufferPostRequest_t pDiag_post_msg;
11487 11483 pMpi2DiagReleaseRequest_t pDiag_release_msg;
11488 11484 struct scsi_pkt *pkt = cmd->cmd_pkt;
11489 11485 mptsas_diag_request_t *diag = pkt->pkt_ha_private;
11490 11486 uint32_t i;
11491 11487 uint64_t request_desc;
11492 11488
11493 11489 ASSERT(mutex_owned(&mpt->m_mutex));
11494 11490
11495 11491 /*
11496 11492 * Form the diag message depending on the post or release function.
11497 11493 */
11498 11494 if (diag->function == MPI2_FUNCTION_DIAG_BUFFER_POST) {
11499 11495 pDiag_post_msg = (pMpi2DiagBufferPostRequest_t)
11500 11496 (mpt->m_req_frame + (mpt->m_req_frame_size *
11501 11497 cmd->cmd_slot));
11502 11498 bzero(pDiag_post_msg, mpt->m_req_frame_size);
11503 11499 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->Function,
11504 11500 diag->function);
11505 11501 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->BufferType,
11506 11502 diag->pBuffer->buffer_type);
11507 11503 ddi_put8(mpt->m_acc_req_frame_hdl,
11508 11504 &pDiag_post_msg->ExtendedType,
11509 11505 diag->pBuffer->extended_type);
11510 11506 ddi_put32(mpt->m_acc_req_frame_hdl,
11511 11507 &pDiag_post_msg->BufferLength,
11512 11508 diag->pBuffer->buffer_data.size);
11513 11509 for (i = 0; i < (sizeof (pDiag_post_msg->ProductSpecific) / 4);
11514 11510 i++) {
11515 11511 ddi_put32(mpt->m_acc_req_frame_hdl,
11516 11512 &pDiag_post_msg->ProductSpecific[i],
11517 11513 diag->pBuffer->product_specific[i]);
11518 11514 }
11519 11515 ddi_put32(mpt->m_acc_req_frame_hdl,
11520 11516 &pDiag_post_msg->BufferAddress.Low,
11521 11517 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
11522 11518 & 0xffffffffull));
11523 11519 ddi_put32(mpt->m_acc_req_frame_hdl,
11524 11520 &pDiag_post_msg->BufferAddress.High,
11525 11521 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
11526 11522 >> 32));
11527 11523 } else {
11528 11524 pDiag_release_msg = (pMpi2DiagReleaseRequest_t)
11529 11525 (mpt->m_req_frame + (mpt->m_req_frame_size *
11530 11526 cmd->cmd_slot));
11531 11527 bzero(pDiag_release_msg, mpt->m_req_frame_size);
11532 11528 ddi_put8(mpt->m_acc_req_frame_hdl,
11533 11529 &pDiag_release_msg->Function, diag->function);
11534 11530 ddi_put8(mpt->m_acc_req_frame_hdl,
11535 11531 &pDiag_release_msg->BufferType,
11536 11532 diag->pBuffer->buffer_type);
11537 11533 }
11538 11534
11539 11535 /*
11540 11536 * Send the message
11541 11537 */
11542 11538 (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
11543 11539 DDI_DMA_SYNC_FORDEV);
11544 11540 request_desc = (cmd->cmd_slot << 16) +
11545 11541 MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
11546 11542 cmd->cmd_rfm = NULL;
11547 11543 MPTSAS_START_CMD(mpt, request_desc);
11548 11544 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
11549 11545 DDI_SUCCESS) ||
11550 11546 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
11551 11547 DDI_SUCCESS)) {
11552 11548 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11553 11549 }
11554 11550 }
11555 11551
11556 11552 static int
11557 11553 mptsas_post_fw_diag_buffer(mptsas_t *mpt,
11558 11554 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
11559 11555 {
11560 11556 mptsas_diag_request_t diag;
11561 11557 int status, slot_num, post_flags = 0;
11562 11558 mptsas_cmd_t *cmd = NULL;
11563 11559 struct scsi_pkt *pkt;
11564 11560 pMpi2DiagBufferPostReply_t reply;
11565 11561 uint16_t iocstatus;
11566 11562 uint32_t iocloginfo, transfer_length;
11567 11563
11568 11564 /*
11569 11565 * If buffer is not enabled, just leave.
11570 11566 */
11571 11567 *return_code = MPTSAS_FW_DIAG_ERROR_POST_FAILED;
11572 11568 if (!pBuffer->enabled) {
11573 11569 status = DDI_FAILURE;
11574 11570 goto out;
11575 11571 }
11576 11572
11577 11573 /*
11578 11574 * Clear some flags initially.
11579 11575 */
11580 11576 pBuffer->force_release = FALSE;
11581 11577 pBuffer->valid_data = FALSE;
11582 11578 pBuffer->owned_by_firmware = FALSE;
11583 11579
11584 11580 /*
11585 11581 * Get a cmd buffer from the cmd buffer pool
11586 11582 */
11587 11583 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11588 11584 status = DDI_FAILURE;
11589 11585 mptsas_log(mpt, CE_NOTE, "command pool is full: Post FW Diag");
11590 11586 goto out;
11591 11587 }
11592 11588 post_flags |= MPTSAS_REQUEST_POOL_CMD;
11593 11589
11594 11590 bzero((caddr_t)cmd, sizeof (*cmd));
11595 11591 bzero((caddr_t)pkt, scsi_pkt_size());
11596 11592
11597 11593 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
11598 11594
11599 11595 diag.pBuffer = pBuffer;
11600 11596 diag.function = MPI2_FUNCTION_DIAG_BUFFER_POST;
11601 11597
11602 11598 /*
11603 11599 * Form a blank cmd/pkt to store the acknowledgement message
11604 11600 */
11605 11601 pkt->pkt_ha_private = (opaque_t)&diag;
11606 11602 pkt->pkt_flags = FLAG_HEAD;
11607 11603 pkt->pkt_time = 60;
11608 11604 cmd->cmd_pkt = pkt;
11609 11605 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
11610 11606
11611 11607 /*
11612 11608 * Save the command in a slot
11613 11609 */
11614 11610 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11615 11611 /*
11616 11612 * Once passthru command get slot, set cmd_flags
11617 11613 * CFLAG_PREPARED.
11618 11614 */
11619 11615 cmd->cmd_flags |= CFLAG_PREPARED;
11620 11616 mptsas_start_diag(mpt, cmd);
11621 11617 } else {
11622 11618 mptsas_waitq_add(mpt, cmd);
11623 11619 }
11624 11620
11625 11621 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11626 11622 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
11627 11623 }
11628 11624
11629 11625 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11630 11626 status = DDI_FAILURE;
11631 11627 mptsas_log(mpt, CE_WARN, "Post FW Diag command timeout");
11632 11628 goto out;
11633 11629 }
11634 11630
11635 11631 /*
11636 11632 * cmd_rfm points to the reply message if a reply was given. Check the
11637 11633 * IOCStatus to make sure everything went OK with the FW diag request
11638 11634 * and set buffer flags.
11639 11635 */
11640 11636 if (cmd->cmd_rfm) {
11641 11637 post_flags |= MPTSAS_ADDRESS_REPLY;
11642 11638 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11643 11639 DDI_DMA_SYNC_FORCPU);
11644 11640 reply = (pMpi2DiagBufferPostReply_t)(mpt->m_reply_frame +
11645 11641 (cmd->cmd_rfm -
11646 11642 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
11647 11643
11648 11644 /*
11649 11645 * Get the reply message data
11650 11646 */
11651 11647 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
11652 11648 &reply->IOCStatus);
11653 11649 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
11654 11650 &reply->IOCLogInfo);
11655 11651 transfer_length = ddi_get32(mpt->m_acc_reply_frame_hdl,
11656 11652 &reply->TransferLength);
11657 11653
11658 11654 /*
11659 11655 * If post failed quit.
11660 11656 */
11661 11657 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
11662 11658 status = DDI_FAILURE;
11663 11659 NDBG13(("post FW Diag Buffer failed: IOCStatus=0x%x, "
11664 11660 "IOCLogInfo=0x%x, TransferLength=0x%x", iocstatus,
11665 11661 iocloginfo, transfer_length));
11666 11662 goto out;
11667 11663 }
11668 11664
11669 11665 /*
11670 11666 * Post was successful.
11671 11667 */
11672 11668 pBuffer->valid_data = TRUE;
11673 11669 pBuffer->owned_by_firmware = TRUE;
11674 11670 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11675 11671 status = DDI_SUCCESS;
11676 11672 }
11677 11673
11678 11674 out:
11679 11675 /*
11680 11676 * Put the reply frame back on the free queue, increment the free
11681 11677 * index, and write the new index to the free index register. But only
11682 11678 * if this reply is an ADDRESS reply.
11683 11679 */
11684 11680 if (post_flags & MPTSAS_ADDRESS_REPLY) {
11685 11681 ddi_put32(mpt->m_acc_free_queue_hdl,
11686 11682 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11687 11683 cmd->cmd_rfm);
11688 11684 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11689 11685 DDI_DMA_SYNC_FORDEV);
11690 11686 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11691 11687 mpt->m_free_index = 0;
11692 11688 }
11693 11689 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11694 11690 mpt->m_free_index);
11695 11691 }
11696 11692 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
11697 11693 mptsas_remove_cmd(mpt, cmd);
11698 11694 post_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11699 11695 }
11700 11696 if (post_flags & MPTSAS_REQUEST_POOL_CMD) {
11701 11697 mptsas_return_to_pool(mpt, cmd);
11702 11698 }
11703 11699
11704 11700 return (status);
11705 11701 }
11706 11702
11707 11703 static int
11708 11704 mptsas_release_fw_diag_buffer(mptsas_t *mpt,
11709 11705 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
11710 11706 uint32_t diag_type)
11711 11707 {
11712 11708 mptsas_diag_request_t diag;
11713 11709 int status, slot_num, rel_flags = 0;
11714 11710 mptsas_cmd_t *cmd = NULL;
11715 11711 struct scsi_pkt *pkt;
11716 11712 pMpi2DiagReleaseReply_t reply;
11717 11713 uint16_t iocstatus;
11718 11714 uint32_t iocloginfo;
11719 11715
11720 11716 /*
11721 11717 * If buffer is not enabled, just leave.
11722 11718 */
11723 11719 *return_code = MPTSAS_FW_DIAG_ERROR_RELEASE_FAILED;
11724 11720 if (!pBuffer->enabled) {
11725 11721 mptsas_log(mpt, CE_NOTE, "This buffer type is not supported "
11726 11722 "by the IOC");
11727 11723 status = DDI_FAILURE;
11728 11724 goto out;
11729 11725 }
11730 11726
11731 11727 /*
11732 11728 * Clear some flags initially.
11733 11729 */
11734 11730 pBuffer->force_release = FALSE;
11735 11731 pBuffer->valid_data = FALSE;
11736 11732 pBuffer->owned_by_firmware = FALSE;
11737 11733
11738 11734 /*
11739 11735 * Get a cmd buffer from the cmd buffer pool
11740 11736 */
11741 11737 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11742 11738 status = DDI_FAILURE;
11743 11739 mptsas_log(mpt, CE_NOTE, "command pool is full: Release FW "
11744 11740 "Diag");
11745 11741 goto out;
11746 11742 }
11747 11743 rel_flags |= MPTSAS_REQUEST_POOL_CMD;
11748 11744
11749 11745 bzero((caddr_t)cmd, sizeof (*cmd));
11750 11746 bzero((caddr_t)pkt, scsi_pkt_size());
11751 11747
11752 11748 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
11753 11749
11754 11750 diag.pBuffer = pBuffer;
11755 11751 diag.function = MPI2_FUNCTION_DIAG_RELEASE;
11756 11752
11757 11753 /*
11758 11754 * Form a blank cmd/pkt to store the acknowledgement message
11759 11755 */
11760 11756 pkt->pkt_ha_private = (opaque_t)&diag;
11761 11757 pkt->pkt_flags = FLAG_HEAD;
11762 11758 pkt->pkt_time = 60;
11763 11759 cmd->cmd_pkt = pkt;
11764 11760 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
11765 11761
11766 11762 /*
11767 11763 * Save the command in a slot
11768 11764 */
11769 11765 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11770 11766 /*
11771 11767 * Once passthru command get slot, set cmd_flags
11772 11768 * CFLAG_PREPARED.
11773 11769 */
11774 11770 cmd->cmd_flags |= CFLAG_PREPARED;
11775 11771 mptsas_start_diag(mpt, cmd);
11776 11772 } else {
11777 11773 mptsas_waitq_add(mpt, cmd);
11778 11774 }
11779 11775
11780 11776 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11781 11777 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
11782 11778 }
11783 11779
11784 11780 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11785 11781 status = DDI_FAILURE;
11786 11782 mptsas_log(mpt, CE_WARN, "Release FW Diag command timeout");
11787 11783 goto out;
11788 11784 }
11789 11785
11790 11786 /*
11791 11787 * cmd_rfm points to the reply message if a reply was given. Check the
11792 11788 * IOCStatus to make sure everything went OK with the FW diag request
11793 11789 * and set buffer flags.
11794 11790 */
11795 11791 if (cmd->cmd_rfm) {
11796 11792 rel_flags |= MPTSAS_ADDRESS_REPLY;
11797 11793 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11798 11794 DDI_DMA_SYNC_FORCPU);
11799 11795 reply = (pMpi2DiagReleaseReply_t)(mpt->m_reply_frame +
11800 11796 (cmd->cmd_rfm -
11801 11797 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
11802 11798
11803 11799 /*
11804 11800 * Get the reply message data
11805 11801 */
11806 11802 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
11807 11803 &reply->IOCStatus);
11808 11804 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
11809 11805 &reply->IOCLogInfo);
11810 11806
11811 11807 /*
11812 11808 * If release failed quit.
11813 11809 */
11814 11810 if ((iocstatus != MPI2_IOCSTATUS_SUCCESS) ||
11815 11811 pBuffer->owned_by_firmware) {
11816 11812 status = DDI_FAILURE;
11817 11813 NDBG13(("release FW Diag Buffer failed: "
11818 11814 "IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus,
11819 11815 iocloginfo));
11820 11816 goto out;
11821 11817 }
11822 11818
11823 11819 /*
11824 11820 * Release was successful.
11825 11821 */
11826 11822 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11827 11823 status = DDI_SUCCESS;
11828 11824
11829 11825 /*
11830 11826 * If this was for an UNREGISTER diag type command, clear the
11831 11827 * unique ID.
11832 11828 */
11833 11829 if (diag_type == MPTSAS_FW_DIAG_TYPE_UNREGISTER) {
11834 11830 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
11835 11831 }
11836 11832 }
11837 11833
11838 11834 out:
11839 11835 /*
11840 11836 * Put the reply frame back on the free queue, increment the free
11841 11837 * index, and write the new index to the free index register. But only
11842 11838 * if this reply is an ADDRESS reply.
11843 11839 */
11844 11840 if (rel_flags & MPTSAS_ADDRESS_REPLY) {
11845 11841 ddi_put32(mpt->m_acc_free_queue_hdl,
11846 11842 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11847 11843 cmd->cmd_rfm);
11848 11844 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11849 11845 DDI_DMA_SYNC_FORDEV);
11850 11846 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11851 11847 mpt->m_free_index = 0;
11852 11848 }
11853 11849 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11854 11850 mpt->m_free_index);
11855 11851 }
11856 11852 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
11857 11853 mptsas_remove_cmd(mpt, cmd);
11858 11854 rel_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11859 11855 }
11860 11856 if (rel_flags & MPTSAS_REQUEST_POOL_CMD) {
11861 11857 mptsas_return_to_pool(mpt, cmd);
11862 11858 }
11863 11859
11864 11860 return (status);
11865 11861 }
11866 11862
11867 11863 static int
11868 11864 mptsas_diag_register(mptsas_t *mpt, mptsas_fw_diag_register_t *diag_register,
11869 11865 uint32_t *return_code)
11870 11866 {
11871 11867 mptsas_fw_diagnostic_buffer_t *pBuffer;
11872 11868 uint8_t extended_type, buffer_type, i;
11873 11869 uint32_t buffer_size;
11874 11870 uint32_t unique_id;
11875 11871 int status;
11876 11872
11877 11873 ASSERT(mutex_owned(&mpt->m_mutex));
11878 11874
11879 11875 extended_type = diag_register->ExtendedType;
11880 11876 buffer_type = diag_register->BufferType;
11881 11877 buffer_size = diag_register->RequestedBufferSize;
11882 11878 unique_id = diag_register->UniqueId;
11883 11879
11884 11880 /*
11885 11881 * Check for valid buffer type
11886 11882 */
11887 11883 if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
11888 11884 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11889 11885 return (DDI_FAILURE);
11890 11886 }
11891 11887
11892 11888 /*
11893 11889 * Get the current buffer and look up the unique ID. The unique ID
11894 11890 * should not be found. If it is, the ID is already in use.
11895 11891 */
11896 11892 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11897 11893 pBuffer = &mpt->m_fw_diag_buffer_list[buffer_type];
11898 11894 if (i != MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11899 11895 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11900 11896 return (DDI_FAILURE);
11901 11897 }
11902 11898
11903 11899 /*
11904 11900 * The buffer's unique ID should not be registered yet, and the given
11905 11901 * unique ID cannot be 0.
11906 11902 */
11907 11903 if ((pBuffer->unique_id != MPTSAS_FW_DIAG_INVALID_UID) ||
11908 11904 (unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
11909 11905 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11910 11906 return (DDI_FAILURE);
11911 11907 }
11912 11908
11913 11909 /*
11914 11910 * If this buffer is already posted as immediate, just change owner.
11915 11911 */
11916 11912 if (pBuffer->immediate && pBuffer->owned_by_firmware &&
11917 11913 (pBuffer->unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
11918 11914 pBuffer->immediate = FALSE;
11919 11915 pBuffer->unique_id = unique_id;
11920 11916 return (DDI_SUCCESS);
11921 11917 }
11922 11918
11923 11919 /*
11924 11920 * Post a new buffer after checking if it's enabled. The DMA buffer
11925 11921 * that is allocated will be contiguous (sgl_len = 1).
11926 11922 */
11927 11923 if (!pBuffer->enabled) {
11928 11924 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
11929 11925 return (DDI_FAILURE);
11930 11926 }
11931 11927 bzero(&pBuffer->buffer_data, sizeof (mptsas_dma_alloc_state_t));
11932 11928 pBuffer->buffer_data.size = buffer_size;
11933 11929 if (mptsas_dma_alloc(mpt, &pBuffer->buffer_data) != DDI_SUCCESS) {
11934 11930 mptsas_log(mpt, CE_WARN, "failed to alloc DMA resource for "
11935 11931 "diag buffer: size = %d bytes", buffer_size);
11936 11932 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
11937 11933 return (DDI_FAILURE);
11938 11934 }
11939 11935
11940 11936 /*
11941 11937 * Copy the given info to the diag buffer and post the buffer.
11942 11938 */
11943 11939 pBuffer->buffer_type = buffer_type;
11944 11940 pBuffer->immediate = FALSE;
11945 11941 if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
11946 11942 for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
11947 11943 i++) {
11948 11944 pBuffer->product_specific[i] =
11949 11945 diag_register->ProductSpecific[i];
11950 11946 }
11951 11947 }
11952 11948 pBuffer->extended_type = extended_type;
11953 11949 pBuffer->unique_id = unique_id;
11954 11950 status = mptsas_post_fw_diag_buffer(mpt, pBuffer, return_code);
11955 11951
11956 11952 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
11957 11953 DDI_SUCCESS) {
11958 11954 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed in "
11959 11955 "mptsas_diag_register.");
11960 11956 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11961 11957 status = DDI_FAILURE;
11962 11958 }
11963 11959
11964 11960 /*
11965 11961 * In case there was a failure, free the DMA buffer.
11966 11962 */
11967 11963 if (status == DDI_FAILURE) {
11968 11964 mptsas_dma_free(&pBuffer->buffer_data);
11969 11965 }
11970 11966
11971 11967 return (status);
11972 11968 }
11973 11969
11974 11970 static int
11975 11971 mptsas_diag_unregister(mptsas_t *mpt,
11976 11972 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
11977 11973 {
11978 11974 mptsas_fw_diagnostic_buffer_t *pBuffer;
11979 11975 uint8_t i;
11980 11976 uint32_t unique_id;
11981 11977 int status;
11982 11978
11983 11979 ASSERT(mutex_owned(&mpt->m_mutex));
11984 11980
11985 11981 unique_id = diag_unregister->UniqueId;
11986 11982
11987 11983 /*
11988 11984 * Get the current buffer and look up the unique ID. The unique ID
11989 11985 * should be there.
11990 11986 */
11991 11987 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11992 11988 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11993 11989 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11994 11990 return (DDI_FAILURE);
11995 11991 }
11996 11992
11997 11993 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11998 11994
11999 11995 /*
12000 11996 * Try to release the buffer from FW before freeing it. If release
12001 11997 * fails, don't free the DMA buffer in case FW tries to access it
12002 11998 * later. If buffer is not owned by firmware, can't release it.
12003 11999 */
12004 12000 if (!pBuffer->owned_by_firmware) {
12005 12001 status = DDI_SUCCESS;
12006 12002 } else {
12007 12003 status = mptsas_release_fw_diag_buffer(mpt, pBuffer,
12008 12004 return_code, MPTSAS_FW_DIAG_TYPE_UNREGISTER);
12009 12005 }
12010 12006
12011 12007 /*
12012 12008 * At this point, return the current status no matter what happens with
12013 12009 * the DMA buffer.
12014 12010 */
12015 12011 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
12016 12012 if (status == DDI_SUCCESS) {
12017 12013 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
12018 12014 DDI_SUCCESS) {
12019 12015 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed "
12020 12016 "in mptsas_diag_unregister.");
12021 12017 ddi_fm_service_impact(mpt->m_dip,
12022 12018 DDI_SERVICE_UNAFFECTED);
12023 12019 }
12024 12020 mptsas_dma_free(&pBuffer->buffer_data);
12025 12021 }
12026 12022
12027 12023 return (status);
12028 12024 }
12029 12025
12030 12026 static int
12031 12027 mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
12032 12028 uint32_t *return_code)
12033 12029 {
12034 12030 mptsas_fw_diagnostic_buffer_t *pBuffer;
12035 12031 uint8_t i;
12036 12032 uint32_t unique_id;
12037 12033
12038 12034 ASSERT(mutex_owned(&mpt->m_mutex));
12039 12035
12040 12036 unique_id = diag_query->UniqueId;
12041 12037
12042 12038 /*
12043 12039 * If ID is valid, query on ID.
12044 12040 * If ID is invalid, query on buffer type.
12045 12041 */
12046 12042 if (unique_id == MPTSAS_FW_DIAG_INVALID_UID) {
12047 12043 i = diag_query->BufferType;
12048 12044 if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
12049 12045 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12050 12046 return (DDI_FAILURE);
12051 12047 }
12052 12048 } else {
12053 12049 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
12054 12050 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
12055 12051 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12056 12052 return (DDI_FAILURE);
12057 12053 }
12058 12054 }
12059 12055
12060 12056 /*
12061 12057 * Fill query structure with the diag buffer info.
12062 12058 */
12063 12059 pBuffer = &mpt->m_fw_diag_buffer_list[i];
12064 12060 diag_query->BufferType = pBuffer->buffer_type;
12065 12061 diag_query->ExtendedType = pBuffer->extended_type;
12066 12062 if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
12067 12063 for (i = 0; i < (sizeof (diag_query->ProductSpecific) / 4);
12068 12064 i++) {
12069 12065 diag_query->ProductSpecific[i] =
12070 12066 pBuffer->product_specific[i];
12071 12067 }
12072 12068 }
12073 12069 diag_query->TotalBufferSize = pBuffer->buffer_data.size;
12074 12070 diag_query->DriverAddedBufferSize = 0;
12075 12071 diag_query->UniqueId = pBuffer->unique_id;
12076 12072 diag_query->ApplicationFlags = 0;
12077 12073 diag_query->DiagnosticFlags = 0;
12078 12074
12079 12075 /*
12080 12076 * Set/Clear application flags
12081 12077 */
12082 12078 if (pBuffer->immediate) {
12083 12079 diag_query->ApplicationFlags &= ~MPTSAS_FW_DIAG_FLAG_APP_OWNED;
12084 12080 } else {
12085 12081 diag_query->ApplicationFlags |= MPTSAS_FW_DIAG_FLAG_APP_OWNED;
12086 12082 }
12087 12083 if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
12088 12084 diag_query->ApplicationFlags |=
12089 12085 MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
12090 12086 } else {
12091 12087 diag_query->ApplicationFlags &=
12092 12088 ~MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
12093 12089 }
12094 12090 if (pBuffer->owned_by_firmware) {
12095 12091 diag_query->ApplicationFlags |=
12096 12092 MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
12097 12093 } else {
12098 12094 diag_query->ApplicationFlags &=
12099 12095 ~MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
12100 12096 }
12101 12097
12102 12098 return (DDI_SUCCESS);
12103 12099 }
12104 12100
12105 12101 static int
12106 12102 mptsas_diag_read_buffer(mptsas_t *mpt,
12107 12103 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
12108 12104 uint32_t *return_code, int ioctl_mode)
12109 12105 {
12110 12106 mptsas_fw_diagnostic_buffer_t *pBuffer;
12111 12107 uint8_t i, *pData;
12112 12108 uint32_t unique_id, byte;
12113 12109 int status;
12114 12110
12115 12111 ASSERT(mutex_owned(&mpt->m_mutex));
12116 12112
12117 12113 unique_id = diag_read_buffer->UniqueId;
12118 12114
12119 12115 /*
12120 12116 * Get the current buffer and look up the unique ID. The unique ID
12121 12117 * should be there.
12122 12118 */
12123 12119 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
12124 12120 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
12125 12121 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12126 12122 return (DDI_FAILURE);
12127 12123 }
12128 12124
12129 12125 pBuffer = &mpt->m_fw_diag_buffer_list[i];
12130 12126
12131 12127 /*
12132 12128 * Make sure requested read is within limits
12133 12129 */
12134 12130 if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
12135 12131 pBuffer->buffer_data.size) {
12136 12132 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12137 12133 return (DDI_FAILURE);
12138 12134 }
12139 12135
12140 12136 /*
12141 12137 * Copy the requested data from DMA to the diag_read_buffer. The DMA
12142 12138 * buffer that was allocated is one contiguous buffer.
12143 12139 */
12144 12140 pData = (uint8_t *)(pBuffer->buffer_data.memp +
12145 12141 diag_read_buffer->StartingOffset);
12146 12142 (void) ddi_dma_sync(pBuffer->buffer_data.handle, 0, 0,
12147 12143 DDI_DMA_SYNC_FORCPU);
12148 12144 for (byte = 0; byte < diag_read_buffer->BytesToRead; byte++) {
12149 12145 if (ddi_copyout(pData + byte, ioctl_buf + byte, 1, ioctl_mode)
12150 12146 != 0) {
12151 12147 return (DDI_FAILURE);
12152 12148 }
12153 12149 }
12154 12150 diag_read_buffer->Status = 0;
12155 12151
12156 12152 /*
12157 12153 * Set or clear the Force Release flag.
12158 12154 */
12159 12155 if (pBuffer->force_release) {
12160 12156 diag_read_buffer->Flags |= MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
12161 12157 } else {
12162 12158 diag_read_buffer->Flags &= ~MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
12163 12159 }
12164 12160
12165 12161 /*
12166 12162 * If buffer is to be reregistered, make sure it's not already owned by
12167 12163 * firmware first.
12168 12164 */
12169 12165 status = DDI_SUCCESS;
12170 12166 if (!pBuffer->owned_by_firmware) {
12171 12167 if (diag_read_buffer->Flags & MPTSAS_FW_DIAG_FLAG_REREGISTER) {
12172 12168 status = mptsas_post_fw_diag_buffer(mpt, pBuffer,
12173 12169 return_code);
12174 12170 }
12175 12171 }
12176 12172
12177 12173 return (status);
12178 12174 }
12179 12175
12180 12176 static int
12181 12177 mptsas_diag_release(mptsas_t *mpt, mptsas_fw_diag_release_t *diag_release,
12182 12178 uint32_t *return_code)
12183 12179 {
12184 12180 mptsas_fw_diagnostic_buffer_t *pBuffer;
12185 12181 uint8_t i;
12186 12182 uint32_t unique_id;
12187 12183 int status;
12188 12184
12189 12185 ASSERT(mutex_owned(&mpt->m_mutex));
12190 12186
12191 12187 unique_id = diag_release->UniqueId;
12192 12188
12193 12189 /*
12194 12190 * Get the current buffer and look up the unique ID. The unique ID
12195 12191 * should be there.
12196 12192 */
12197 12193 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
12198 12194 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
12199 12195 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12200 12196 return (DDI_FAILURE);
12201 12197 }
12202 12198
12203 12199 pBuffer = &mpt->m_fw_diag_buffer_list[i];
12204 12200
12205 12201 /*
12206 12202 * If buffer is not owned by firmware, it's already been released.
12207 12203 */
12208 12204 if (!pBuffer->owned_by_firmware) {
12209 12205 *return_code = MPTSAS_FW_DIAG_ERROR_ALREADY_RELEASED;
12210 12206 return (DDI_FAILURE);
12211 12207 }
12212 12208
12213 12209 /*
12214 12210 * Release the buffer.
12215 12211 */
12216 12212 status = mptsas_release_fw_diag_buffer(mpt, pBuffer, return_code,
12217 12213 MPTSAS_FW_DIAG_TYPE_RELEASE);
12218 12214 return (status);
12219 12215 }
12220 12216
12221 12217 static int
12222 12218 mptsas_do_diag_action(mptsas_t *mpt, uint32_t action, uint8_t *diag_action,
12223 12219 uint32_t length, uint32_t *return_code, int ioctl_mode)
12224 12220 {
12225 12221 mptsas_fw_diag_register_t diag_register;
12226 12222 mptsas_fw_diag_unregister_t diag_unregister;
12227 12223 mptsas_fw_diag_query_t diag_query;
12228 12224 mptsas_diag_read_buffer_t diag_read_buffer;
12229 12225 mptsas_fw_diag_release_t diag_release;
12230 12226 int status = DDI_SUCCESS;
12231 12227 uint32_t original_return_code, read_buf_len;
12232 12228
12233 12229 ASSERT(mutex_owned(&mpt->m_mutex));
12234 12230
12235 12231 original_return_code = *return_code;
12236 12232 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
12237 12233
12238 12234 switch (action) {
12239 12235 case MPTSAS_FW_DIAG_TYPE_REGISTER:
12240 12236 if (!length) {
12241 12237 *return_code =
12242 12238 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12243 12239 status = DDI_FAILURE;
12244 12240 break;
12245 12241 }
12246 12242 if (ddi_copyin(diag_action, &diag_register,
12247 12243 sizeof (diag_register), ioctl_mode) != 0) {
12248 12244 return (DDI_FAILURE);
12249 12245 }
12250 12246 status = mptsas_diag_register(mpt, &diag_register,
12251 12247 return_code);
12252 12248 break;
12253 12249
12254 12250 case MPTSAS_FW_DIAG_TYPE_UNREGISTER:
12255 12251 if (length < sizeof (diag_unregister)) {
12256 12252 *return_code =
12257 12253 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12258 12254 status = DDI_FAILURE;
12259 12255 break;
12260 12256 }
12261 12257 if (ddi_copyin(diag_action, &diag_unregister,
12262 12258 sizeof (diag_unregister), ioctl_mode) != 0) {
12263 12259 return (DDI_FAILURE);
12264 12260 }
12265 12261 status = mptsas_diag_unregister(mpt, &diag_unregister,
12266 12262 return_code);
12267 12263 break;
12268 12264
12269 12265 case MPTSAS_FW_DIAG_TYPE_QUERY:
12270 12266 if (length < sizeof (diag_query)) {
12271 12267 *return_code =
12272 12268 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12273 12269 status = DDI_FAILURE;
12274 12270 break;
12275 12271 }
12276 12272 if (ddi_copyin(diag_action, &diag_query,
12277 12273 sizeof (diag_query), ioctl_mode) != 0) {
12278 12274 return (DDI_FAILURE);
12279 12275 }
12280 12276 status = mptsas_diag_query(mpt, &diag_query,
12281 12277 return_code);
12282 12278 if (status == DDI_SUCCESS) {
12283 12279 if (ddi_copyout(&diag_query, diag_action,
12284 12280 sizeof (diag_query), ioctl_mode) != 0) {
12285 12281 return (DDI_FAILURE);
12286 12282 }
12287 12283 }
12288 12284 break;
12289 12285
12290 12286 case MPTSAS_FW_DIAG_TYPE_READ_BUFFER:
12291 12287 if (ddi_copyin(diag_action, &diag_read_buffer,
12292 12288 sizeof (diag_read_buffer) - 4, ioctl_mode) != 0) {
12293 12289 return (DDI_FAILURE);
12294 12290 }
12295 12291 read_buf_len = sizeof (diag_read_buffer) -
12296 12292 sizeof (diag_read_buffer.DataBuffer) +
12297 12293 diag_read_buffer.BytesToRead;
12298 12294 if (length < read_buf_len) {
12299 12295 *return_code =
12300 12296 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12301 12297 status = DDI_FAILURE;
12302 12298 break;
12303 12299 }
12304 12300 status = mptsas_diag_read_buffer(mpt,
12305 12301 &diag_read_buffer, diag_action +
12306 12302 sizeof (diag_read_buffer) - 4, return_code,
12307 12303 ioctl_mode);
12308 12304 if (status == DDI_SUCCESS) {
12309 12305 if (ddi_copyout(&diag_read_buffer, diag_action,
12310 12306 sizeof (diag_read_buffer) - 4, ioctl_mode)
12311 12307 != 0) {
12312 12308 return (DDI_FAILURE);
12313 12309 }
12314 12310 }
12315 12311 break;
12316 12312
12317 12313 case MPTSAS_FW_DIAG_TYPE_RELEASE:
12318 12314 if (length < sizeof (diag_release)) {
12319 12315 *return_code =
12320 12316 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12321 12317 status = DDI_FAILURE;
12322 12318 break;
12323 12319 }
12324 12320 if (ddi_copyin(diag_action, &diag_release,
12325 12321 sizeof (diag_release), ioctl_mode) != 0) {
12326 12322 return (DDI_FAILURE);
12327 12323 }
12328 12324 status = mptsas_diag_release(mpt, &diag_release,
12329 12325 return_code);
12330 12326 break;
12331 12327
12332 12328 default:
12333 12329 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12334 12330 status = DDI_FAILURE;
12335 12331 break;
12336 12332 }
12337 12333
12338 12334 if ((status == DDI_FAILURE) &&
12339 12335 (original_return_code == MPTSAS_FW_DIAG_NEW) &&
12340 12336 (*return_code != MPTSAS_FW_DIAG_ERROR_SUCCESS)) {
12341 12337 status = DDI_SUCCESS;
12342 12338 }
12343 12339
12344 12340 return (status);
12345 12341 }
12346 12342
12347 12343 static int
12348 12344 mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *user_data, int mode)
12349 12345 {
12350 12346 int status;
12351 12347 mptsas_diag_action_t driver_data;
12352 12348
12353 12349 ASSERT(mutex_owned(&mpt->m_mutex));
12354 12350
12355 12351 /*
12356 12352 * Copy the user data to a driver data buffer.
12357 12353 */
12358 12354 if (ddi_copyin(user_data, &driver_data, sizeof (mptsas_diag_action_t),
12359 12355 mode) == 0) {
12360 12356 /*
12361 12357 * Send diag action request if Action is valid
12362 12358 */
12363 12359 if (driver_data.Action == MPTSAS_FW_DIAG_TYPE_REGISTER ||
12364 12360 driver_data.Action == MPTSAS_FW_DIAG_TYPE_UNREGISTER ||
12365 12361 driver_data.Action == MPTSAS_FW_DIAG_TYPE_QUERY ||
12366 12362 driver_data.Action == MPTSAS_FW_DIAG_TYPE_READ_BUFFER ||
12367 12363 driver_data.Action == MPTSAS_FW_DIAG_TYPE_RELEASE) {
12368 12364 status = mptsas_do_diag_action(mpt, driver_data.Action,
12369 12365 (void *)(uintptr_t)driver_data.PtrDiagAction,
12370 12366 driver_data.Length, &driver_data.ReturnCode,
12371 12367 mode);
12372 12368 if (status == DDI_SUCCESS) {
12373 12369 if (ddi_copyout(&driver_data.ReturnCode,
12374 12370 &user_data->ReturnCode,
12375 12371 sizeof (user_data->ReturnCode), mode)
12376 12372 != 0) {
12377 12373 status = EFAULT;
12378 12374 } else {
12379 12375 status = 0;
12380 12376 }
12381 12377 } else {
12382 12378 status = EIO;
12383 12379 }
12384 12380 } else {
12385 12381 status = EINVAL;
12386 12382 }
12387 12383 } else {
12388 12384 status = EFAULT;
12389 12385 }
12390 12386
12391 12387 return (status);
12392 12388 }
12393 12389
12394 12390 /*
12395 12391 * This routine handles the "event query" ioctl.
12396 12392 */
12397 12393 static int
12398 12394 mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, int mode,
12399 12395 int *rval)
12400 12396 {
12401 12397 int status;
12402 12398 mptsas_event_query_t driverdata;
12403 12399 uint8_t i;
12404 12400
12405 12401 driverdata.Entries = MPTSAS_EVENT_QUEUE_SIZE;
12406 12402
12407 12403 mutex_enter(&mpt->m_mutex);
12408 12404 for (i = 0; i < 4; i++) {
12409 12405 driverdata.Types[i] = mpt->m_event_mask[i];
12410 12406 }
12411 12407 mutex_exit(&mpt->m_mutex);
12412 12408
12413 12409 if (ddi_copyout(&driverdata, data, sizeof (driverdata), mode) != 0) {
12414 12410 status = EFAULT;
12415 12411 } else {
12416 12412 *rval = MPTIOCTL_STATUS_GOOD;
12417 12413 status = 0;
12418 12414 }
12419 12415
12420 12416 return (status);
12421 12417 }
12422 12418
12423 12419 /*
12424 12420 * This routine handles the "event enable" ioctl.
12425 12421 */
12426 12422 static int
12427 12423 mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, int mode,
12428 12424 int *rval)
12429 12425 {
12430 12426 int status;
12431 12427 mptsas_event_enable_t driverdata;
12432 12428 uint8_t i;
12433 12429
12434 12430 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
12435 12431 mutex_enter(&mpt->m_mutex);
12436 12432 for (i = 0; i < 4; i++) {
12437 12433 mpt->m_event_mask[i] = driverdata.Types[i];
12438 12434 }
12439 12435 mutex_exit(&mpt->m_mutex);
12440 12436
12441 12437 *rval = MPTIOCTL_STATUS_GOOD;
12442 12438 status = 0;
12443 12439 } else {
12444 12440 status = EFAULT;
12445 12441 }
12446 12442 return (status);
12447 12443 }
12448 12444
12449 12445 /*
12450 12446 * This routine handles the "event report" ioctl.
12451 12447 */
12452 12448 static int
12453 12449 mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, int mode,
12454 12450 int *rval)
12455 12451 {
12456 12452 int status;
12457 12453 mptsas_event_report_t driverdata;
12458 12454
12459 12455 mutex_enter(&mpt->m_mutex);
12460 12456
12461 12457 if (ddi_copyin(&data->Size, &driverdata.Size, sizeof (driverdata.Size),
12462 12458 mode) == 0) {
12463 12459 if (driverdata.Size >= sizeof (mpt->m_events)) {
12464 12460 if (ddi_copyout(mpt->m_events, data->Events,
12465 12461 sizeof (mpt->m_events), mode) != 0) {
12466 12462 status = EFAULT;
12467 12463 } else {
12468 12464 if (driverdata.Size > sizeof (mpt->m_events)) {
12469 12465 driverdata.Size =
12470 12466 sizeof (mpt->m_events);
12471 12467 if (ddi_copyout(&driverdata.Size,
12472 12468 &data->Size,
12473 12469 sizeof (driverdata.Size),
12474 12470 mode) != 0) {
12475 12471 status = EFAULT;
12476 12472 } else {
12477 12473 *rval = MPTIOCTL_STATUS_GOOD;
12478 12474 status = 0;
12479 12475 }
12480 12476 } else {
12481 12477 *rval = MPTIOCTL_STATUS_GOOD;
12482 12478 status = 0;
12483 12479 }
12484 12480 }
12485 12481 } else {
12486 12482 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
12487 12483 status = 0;
12488 12484 }
12489 12485 } else {
12490 12486 status = EFAULT;
12491 12487 }
12492 12488
12493 12489 mutex_exit(&mpt->m_mutex);
12494 12490 return (status);
12495 12491 }
12496 12492
12497 12493 static void
12498 12494 mptsas_lookup_pci_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
12499 12495 {
12500 12496 int *reg_data;
12501 12497 uint_t reglen;
12502 12498
12503 12499 /*
12504 12500 * Lookup the 'reg' property and extract the other data
12505 12501 */
12506 12502 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
12507 12503 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
12508 12504 DDI_PROP_SUCCESS) {
12509 12505 /*
12510 12506 * Extract the PCI data from the 'reg' property first DWORD.
12511 12507 * The entry looks like the following:
12512 12508 * First DWORD:
12513 12509 * Bits 0 - 7 8-bit Register number
12514 12510 * Bits 8 - 10 3-bit Function number
12515 12511 * Bits 11 - 15 5-bit Device number
12516 12512 * Bits 16 - 23 8-bit Bus number
12517 12513 * Bits 24 - 25 2-bit Address Space type identifier
12518 12514 *
12519 12515 */
12520 12516 adapter_data->PciInformation.u.bits.BusNumber =
12521 12517 (reg_data[0] & 0x00FF0000) >> 16;
12522 12518 adapter_data->PciInformation.u.bits.DeviceNumber =
12523 12519 (reg_data[0] & 0x0000F800) >> 11;
12524 12520 adapter_data->PciInformation.u.bits.FunctionNumber =
12525 12521 (reg_data[0] & 0x00000700) >> 8;
12526 12522 ddi_prop_free((void *)reg_data);
12527 12523 } else {
12528 12524 /*
12529 12525 * If we can't determine the PCI data then we fill in FF's for
12530 12526 * the data to indicate this.
12531 12527 */
12532 12528 adapter_data->PCIDeviceHwId = 0xFFFFFFFF;
12533 12529 adapter_data->MpiPortNumber = 0xFFFFFFFF;
12534 12530 adapter_data->PciInformation.u.AsDWORD = 0xFFFFFFFF;
12535 12531 }
12536 12532
12537 12533 /*
12538 12534 * Saved in the mpt->m_fwversion
12539 12535 */
12540 12536 adapter_data->MpiFirmwareVersion = mpt->m_fwversion;
12541 12537 }
12542 12538
12543 12539 static void
12544 12540 mptsas_read_adapter_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
12545 12541 {
12546 12542 char *driver_verstr = MPTSAS_MOD_STRING;
12547 12543
12548 12544 mptsas_lookup_pci_data(mpt, adapter_data);
12549 12545 adapter_data->AdapterType = mpt->m_MPI25 ?
12550 12546 MPTIOCTL_ADAPTER_TYPE_SAS3 :
12551 12547 MPTIOCTL_ADAPTER_TYPE_SAS2;
12552 12548 adapter_data->PCIDeviceHwId = (uint32_t)mpt->m_devid;
12553 12549 adapter_data->PCIDeviceHwRev = (uint32_t)mpt->m_revid;
12554 12550 adapter_data->SubSystemId = (uint32_t)mpt->m_ssid;
12555 12551 adapter_data->SubsystemVendorId = (uint32_t)mpt->m_svid;
12556 12552 (void) strcpy((char *)&adapter_data->DriverVersion[0], driver_verstr);
12557 12553 adapter_data->BiosVersion = 0;
12558 12554 (void) mptsas_get_bios_page3(mpt, &adapter_data->BiosVersion);
12559 12555 }
12560 12556
12561 12557 static void
12562 12558 mptsas_read_pci_info(mptsas_t *mpt, mptsas_pci_info_t *pci_info)
12563 12559 {
12564 12560 int *reg_data, i;
12565 12561 uint_t reglen;
12566 12562
12567 12563 /*
12568 12564 * Lookup the 'reg' property and extract the other data
12569 12565 */
12570 12566 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
12571 12567 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
12572 12568 DDI_PROP_SUCCESS) {
12573 12569 /*
12574 12570 * Extract the PCI data from the 'reg' property first DWORD.
12575 12571 * The entry looks like the following:
12576 12572 * First DWORD:
12577 12573 * Bits 8 - 10 3-bit Function number
12578 12574 * Bits 11 - 15 5-bit Device number
12579 12575 * Bits 16 - 23 8-bit Bus number
12580 12576 */
12581 12577 pci_info->BusNumber = (reg_data[0] & 0x00FF0000) >> 16;
12582 12578 pci_info->DeviceNumber = (reg_data[0] & 0x0000F800) >> 11;
12583 12579 pci_info->FunctionNumber = (reg_data[0] & 0x00000700) >> 8;
12584 12580 ddi_prop_free((void *)reg_data);
12585 12581 } else {
12586 12582 /*
12587 12583 * If we can't determine the PCI info then we fill in FF's for
12588 12584 * the data to indicate this.
12589 12585 */
12590 12586 pci_info->BusNumber = 0xFFFFFFFF;
12591 12587 pci_info->DeviceNumber = 0xFF;
12592 12588 pci_info->FunctionNumber = 0xFF;
12593 12589 }
12594 12590
12595 12591 /*
12596 12592 * Now get the interrupt vector and the pci header. The vector can
12597 12593 * only be 0 right now. The header is the first 256 bytes of config
12598 12594 * space.
12599 12595 */
12600 12596 pci_info->InterruptVector = 0;
12601 12597 for (i = 0; i < sizeof (pci_info->PciHeader); i++) {
12602 12598 pci_info->PciHeader[i] = pci_config_get8(mpt->m_config_handle,
12603 12599 i);
12604 12600 }
12605 12601 }
12606 12602
12607 12603 static int
12608 12604 mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data, int mode)
12609 12605 {
12610 12606 int status = 0;
12611 12607 mptsas_reg_access_t driverdata;
12612 12608
12613 12609 mutex_enter(&mpt->m_mutex);
12614 12610 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
12615 12611 switch (driverdata.Command) {
12616 12612 /*
12617 12613 * IO access is not supported.
12618 12614 */
12619 12615 case REG_IO_READ:
12620 12616 case REG_IO_WRITE:
12621 12617 mptsas_log(mpt, CE_WARN, "IO access is not "
12622 12618 "supported. Use memory access.");
12623 12619 status = EINVAL;
12624 12620 break;
12625 12621
12626 12622 case REG_MEM_READ:
12627 12623 driverdata.RegData = ddi_get32(mpt->m_datap,
12628 12624 (uint32_t *)(void *)mpt->m_reg +
12629 12625 driverdata.RegOffset);
12630 12626 if (ddi_copyout(&driverdata.RegData,
12631 12627 &data->RegData,
12632 12628 sizeof (driverdata.RegData), mode) != 0) {
12633 12629 mptsas_log(mpt, CE_WARN, "Register "
12634 12630 "Read Failed");
12635 12631 status = EFAULT;
12636 12632 }
12637 12633 break;
12638 12634
12639 12635 case REG_MEM_WRITE:
12640 12636 ddi_put32(mpt->m_datap,
12641 12637 (uint32_t *)(void *)mpt->m_reg +
12642 12638 driverdata.RegOffset,
12643 12639 driverdata.RegData);
12644 12640 break;
12645 12641
12646 12642 default:
12647 12643 status = EINVAL;
12648 12644 break;
12649 12645 }
12650 12646 } else {
12651 12647 status = EFAULT;
12652 12648 }
12653 12649
12654 12650 mutex_exit(&mpt->m_mutex);
12655 12651 return (status);
12656 12652 }
12657 12653
12658 12654 static int
12659 12655 led_control(mptsas_t *mpt, intptr_t data, int mode)
12660 12656 {
12661 12657 int ret = 0;
12662 12658 mptsas_led_control_t lc;
12663 12659 mptsas_enclosure_t *mep;
12664 12660 uint16_t slotidx;
12665 12661
12666 12662 if (ddi_copyin((void *)data, &lc, sizeof (lc), mode) != 0) {
12667 12663 return (EFAULT);
12668 12664 }
12669 12665
12670 12666 if ((lc.Command != MPTSAS_LEDCTL_FLAG_SET &&
12671 12667 lc.Command != MPTSAS_LEDCTL_FLAG_GET) ||
12672 12668 lc.Led < MPTSAS_LEDCTL_LED_MIN ||
12673 12669 lc.Led > MPTSAS_LEDCTL_LED_MAX ||
12674 12670 (lc.Command == MPTSAS_LEDCTL_FLAG_SET && lc.LedStatus != 0 &&
12675 12671 lc.LedStatus != 1)) {
12676 12672 return (EINVAL);
12677 12673 }
12678 12674
12679 12675 if ((lc.Command == MPTSAS_LEDCTL_FLAG_SET && (mode & FWRITE) == 0) ||
12680 12676 (lc.Command == MPTSAS_LEDCTL_FLAG_GET && (mode & FREAD) == 0))
12681 12677 return (EACCES);
12682 12678
12683 12679 /* Locate the required enclosure */
12684 12680 mutex_enter(&mpt->m_mutex);
12685 12681 mep = mptsas_enc_lookup(mpt, lc.Enclosure);
12686 12682 if (mep == NULL) {
12687 12683 mutex_exit(&mpt->m_mutex);
12688 12684 return (ENOENT);
12689 12685 }
12690 12686
12691 12687 if (lc.Slot < mep->me_fslot) {
12692 12688 mutex_exit(&mpt->m_mutex);
12693 12689 return (ENOENT);
12694 12690 }
12695 12691
12696 12692 /*
12697 12693 * Slots on the enclosure are maintained in array where me_fslot is
12698 12694 * entry zero. We normalize the requested slot.
12699 12695 */
12700 12696 slotidx = lc.Slot - mep->me_fslot;
12701 12697 if (slotidx >= mep->me_nslots) {
12702 12698 mutex_exit(&mpt->m_mutex);
12703 12699 return (ENOENT);
12704 12700 }
12705 12701
12706 12702 if (lc.Command == MPTSAS_LEDCTL_FLAG_SET) {
12707 12703 /* Update our internal LED state. */
12708 12704 mep->me_slotleds[slotidx] &= ~(1 << (lc.Led - 1));
12709 12705 mep->me_slotleds[slotidx] |= lc.LedStatus << (lc.Led - 1);
12710 12706
12711 12707 /* Flush it to the controller. */
12712 12708 ret = mptsas_flush_led_status(mpt, mep, slotidx);
12713 12709 mutex_exit(&mpt->m_mutex);
12714 12710 return (ret);
12715 12711 }
12716 12712
12717 12713 /* Return our internal LED state. */
12718 12714 lc.LedStatus = (mep->me_slotleds[slotidx] >> (lc.Led - 1)) & 1;
12719 12715 mutex_exit(&mpt->m_mutex);
12720 12716
12721 12717 if (ddi_copyout(&lc, (void *)data, sizeof (lc), mode) != 0) {
12722 12718 return (EFAULT);
12723 12719 }
12724 12720
12725 12721 return (0);
12726 12722 }
12727 12723
12728 12724 static int
12729 12725 get_disk_info(mptsas_t *mpt, intptr_t data, int mode)
12730 12726 {
12731 12727 uint16_t i = 0;
12732 12728 uint16_t count = 0;
12733 12729 int ret = 0;
12734 12730 mptsas_target_t *ptgt;
12735 12731 mptsas_disk_info_t *di;
12736 12732 STRUCT_DECL(mptsas_get_disk_info, gdi);
12737 12733
12738 12734 if ((mode & FREAD) == 0)
12739 12735 return (EACCES);
12740 12736
12741 12737 STRUCT_INIT(gdi, get_udatamodel());
12742 12738
12743 12739 if (ddi_copyin((void *)data, STRUCT_BUF(gdi), STRUCT_SIZE(gdi),
12744 12740 mode) != 0) {
12745 12741 return (EFAULT);
12746 12742 }
12747 12743
12748 12744 /* Find out how many targets there are. */
12749 12745 mutex_enter(&mpt->m_mutex);
12750 12746 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12751 12747 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12752 12748 count++;
12753 12749 }
12754 12750 mutex_exit(&mpt->m_mutex);
12755 12751
12756 12752 /*
12757 12753 * If we haven't been asked to copy out information on each target,
12758 12754 * then just return the count.
12759 12755 */
12760 12756 STRUCT_FSET(gdi, DiskCount, count);
12761 12757 if (STRUCT_FGETP(gdi, PtrDiskInfoArray) == NULL)
12762 12758 goto copy_out;
12763 12759
12764 12760 /*
12765 12761 * If we haven't been given a large enough buffer to copy out into,
12766 12762 * let the caller know.
12767 12763 */
12768 12764 if (STRUCT_FGET(gdi, DiskInfoArraySize) <
12769 12765 count * sizeof (mptsas_disk_info_t)) {
12770 12766 ret = ENOSPC;
12771 12767 goto copy_out;
12772 12768 }
12773 12769
12774 12770 di = kmem_zalloc(count * sizeof (mptsas_disk_info_t), KM_SLEEP);
12775 12771
12776 12772 mutex_enter(&mpt->m_mutex);
12777 12773 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12778 12774 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12779 12775 if (i >= count) {
12780 12776 /*
12781 12777 * The number of targets changed while we weren't
12782 12778 * looking, so give up.
12783 12779 */
12784 12780 refhash_rele(mpt->m_targets, ptgt);
12785 12781 mutex_exit(&mpt->m_mutex);
12786 12782 kmem_free(di, count * sizeof (mptsas_disk_info_t));
12787 12783 return (EAGAIN);
12788 12784 }
12789 12785 di[i].Instance = mpt->m_instance;
12790 12786 di[i].Enclosure = ptgt->m_enclosure;
12791 12787 di[i].Slot = ptgt->m_slot_num;
12792 12788 di[i].SasAddress = ptgt->m_addr.mta_wwn;
12793 12789 i++;
12794 12790 }
12795 12791 mutex_exit(&mpt->m_mutex);
12796 12792 STRUCT_FSET(gdi, DiskCount, i);
12797 12793
12798 12794 /* Copy out the disk information to the caller. */
12799 12795 if (ddi_copyout((void *)di, STRUCT_FGETP(gdi, PtrDiskInfoArray),
12800 12796 i * sizeof (mptsas_disk_info_t), mode) != 0) {
12801 12797 ret = EFAULT;
12802 12798 }
12803 12799
12804 12800 kmem_free(di, count * sizeof (mptsas_disk_info_t));
12805 12801
12806 12802 copy_out:
12807 12803 if (ddi_copyout(STRUCT_BUF(gdi), (void *)data, STRUCT_SIZE(gdi),
12808 12804 mode) != 0) {
12809 12805 ret = EFAULT;
12810 12806 }
12811 12807
12812 12808 return (ret);
12813 12809 }
12814 12810
12815 12811 static int
12816 12812 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
12817 12813 int *rval)
12818 12814 {
12819 12815 int status = 0;
12820 12816 mptsas_t *mpt;
12821 12817 mptsas_update_flash_t flashdata;
12822 12818 mptsas_pass_thru_t passthru_data;
12823 12819 mptsas_adapter_data_t adapter_data;
12824 12820 mptsas_pci_info_t pci_info;
12825 12821 int copylen;
12826 12822
12827 12823 int iport_flag = 0;
12828 12824 dev_info_t *dip = NULL;
12829 12825 mptsas_phymask_t phymask = 0;
12830 12826 struct devctl_iocdata *dcp = NULL;
12831 12827 char *addr = NULL;
12832 12828 mptsas_target_t *ptgt = NULL;
12833 12829
12834 12830 *rval = MPTIOCTL_STATUS_GOOD;
12835 12831 if (secpolicy_sys_config(credp, B_FALSE) != 0) {
12836 12832 return (EPERM);
12837 12833 }
12838 12834
12839 12835 mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
12840 12836 if (mpt == NULL) {
12841 12837 /*
12842 12838 * Called from iport node, get the states
12843 12839 */
12844 12840 iport_flag = 1;
12845 12841 dip = mptsas_get_dip_from_dev(dev, &phymask);
12846 12842 if (dip == NULL) {
12847 12843 return (ENXIO);
12848 12844 }
12849 12845 mpt = DIP2MPT(dip);
12850 12846 }
12851 12847 /* Make sure power level is D0 before accessing registers */
12852 12848 mutex_enter(&mpt->m_mutex);
12853 12849 if (mpt->m_options & MPTSAS_OPT_PM) {
12854 12850 (void) pm_busy_component(mpt->m_dip, 0);
12855 12851 if (mpt->m_power_level != PM_LEVEL_D0) {
12856 12852 mutex_exit(&mpt->m_mutex);
12857 12853 if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
12858 12854 DDI_SUCCESS) {
12859 12855 mptsas_log(mpt, CE_WARN,
12860 12856 "mptsas%d: mptsas_ioctl: Raise power "
12861 12857 "request failed.", mpt->m_instance);
12862 12858 (void) pm_idle_component(mpt->m_dip, 0);
12863 12859 return (ENXIO);
12864 12860 }
12865 12861 } else {
12866 12862 mutex_exit(&mpt->m_mutex);
12867 12863 }
12868 12864 } else {
12869 12865 mutex_exit(&mpt->m_mutex);
12870 12866 }
12871 12867
12872 12868 if (iport_flag) {
12873 12869 status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
12874 12870 if (status != 0) {
12875 12871 goto out;
12876 12872 }
12877 12873 /*
12878 12874 * The following code control the OK2RM LED, it doesn't affect
12879 12875 * the ioctl return status.
12880 12876 */
12881 12877 if ((cmd == DEVCTL_DEVICE_ONLINE) ||
12882 12878 (cmd == DEVCTL_DEVICE_OFFLINE)) {
12883 12879 if (ndi_dc_allochdl((void *)data, &dcp) !=
12884 12880 NDI_SUCCESS) {
12885 12881 goto out;
12886 12882 }
12887 12883 addr = ndi_dc_getaddr(dcp);
12888 12884 ptgt = mptsas_addr_to_ptgt(mpt, addr, phymask);
12889 12885 if (ptgt == NULL) {
12890 12886 NDBG14(("mptsas_ioctl led control: tgt %s not "
12891 12887 "found", addr));
12892 12888 ndi_dc_freehdl(dcp);
12893 12889 goto out;
12894 12890 }
12895 12891 ndi_dc_freehdl(dcp);
12896 12892 }
12897 12893 goto out;
12898 12894 }
12899 12895 switch (cmd) {
12900 12896 case MPTIOCTL_GET_DISK_INFO:
12901 12897 status = get_disk_info(mpt, data, mode);
12902 12898 break;
12903 12899 case MPTIOCTL_LED_CONTROL:
12904 12900 status = led_control(mpt, data, mode);
12905 12901 break;
12906 12902 case MPTIOCTL_UPDATE_FLASH:
12907 12903 if (ddi_copyin((void *)data, &flashdata,
12908 12904 sizeof (struct mptsas_update_flash), mode)) {
12909 12905 status = EFAULT;
12910 12906 break;
12911 12907 }
12912 12908
12913 12909 mutex_enter(&mpt->m_mutex);
12914 12910 if (mptsas_update_flash(mpt,
12915 12911 (caddr_t)(long)flashdata.PtrBuffer,
12916 12912 flashdata.ImageSize, flashdata.ImageType, mode)) {
12917 12913 status = EFAULT;
12918 12914 }
12919 12915
12920 12916 /*
12921 12917 * Reset the chip to start using the new
12922 12918 * firmware. Reset if failed also.
12923 12919 */
12924 12920 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12925 12921 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
12926 12922 status = EFAULT;
12927 12923 }
12928 12924 mutex_exit(&mpt->m_mutex);
12929 12925 break;
12930 12926 case MPTIOCTL_PASS_THRU:
12931 12927 /*
12932 12928 * The user has requested to pass through a command to
12933 12929 * be executed by the MPT firmware. Call our routine
12934 12930 * which does this. Only allow one passthru IOCTL at
12935 12931 * one time. Other threads will block on
12936 12932 * m_passthru_mutex, which is of adaptive variant.
12937 12933 */
12938 12934 if (ddi_copyin((void *)data, &passthru_data,
12939 12935 sizeof (mptsas_pass_thru_t), mode)) {
12940 12936 status = EFAULT;
12941 12937 break;
12942 12938 }
12943 12939 mutex_enter(&mpt->m_passthru_mutex);
12944 12940 mutex_enter(&mpt->m_mutex);
12945 12941 status = mptsas_pass_thru(mpt, &passthru_data, mode);
12946 12942 mutex_exit(&mpt->m_mutex);
12947 12943 mutex_exit(&mpt->m_passthru_mutex);
12948 12944
12949 12945 break;
12950 12946 case MPTIOCTL_GET_ADAPTER_DATA:
12951 12947 /*
12952 12948 * The user has requested to read adapter data. Call
12953 12949 * our routine which does this.
12954 12950 */
12955 12951 bzero(&adapter_data, sizeof (mptsas_adapter_data_t));
12956 12952 if (ddi_copyin((void *)data, (void *)&adapter_data,
12957 12953 sizeof (mptsas_adapter_data_t), mode)) {
12958 12954 status = EFAULT;
12959 12955 break;
12960 12956 }
12961 12957 if (adapter_data.StructureLength >=
12962 12958 sizeof (mptsas_adapter_data_t)) {
12963 12959 adapter_data.StructureLength = (uint32_t)
12964 12960 sizeof (mptsas_adapter_data_t);
12965 12961 copylen = sizeof (mptsas_adapter_data_t);
12966 12962 mutex_enter(&mpt->m_mutex);
12967 12963 mptsas_read_adapter_data(mpt, &adapter_data);
12968 12964 mutex_exit(&mpt->m_mutex);
12969 12965 } else {
12970 12966 adapter_data.StructureLength = (uint32_t)
12971 12967 sizeof (mptsas_adapter_data_t);
12972 12968 copylen = sizeof (adapter_data.StructureLength);
12973 12969 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
12974 12970 }
12975 12971 if (ddi_copyout((void *)(&adapter_data), (void *)data,
12976 12972 copylen, mode) != 0) {
12977 12973 status = EFAULT;
12978 12974 }
12979 12975 break;
12980 12976 case MPTIOCTL_GET_PCI_INFO:
12981 12977 /*
12982 12978 * The user has requested to read pci info. Call
12983 12979 * our routine which does this.
12984 12980 */
12985 12981 bzero(&pci_info, sizeof (mptsas_pci_info_t));
12986 12982 mutex_enter(&mpt->m_mutex);
12987 12983 mptsas_read_pci_info(mpt, &pci_info);
12988 12984 mutex_exit(&mpt->m_mutex);
12989 12985 if (ddi_copyout((void *)(&pci_info), (void *)data,
12990 12986 sizeof (mptsas_pci_info_t), mode) != 0) {
12991 12987 status = EFAULT;
12992 12988 }
12993 12989 break;
12994 12990 case MPTIOCTL_RESET_ADAPTER:
12995 12991 mutex_enter(&mpt->m_mutex);
12996 12992 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12997 12993 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
12998 12994 mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
12999 12995 "failed");
13000 12996 status = EFAULT;
13001 12997 }
13002 12998 mutex_exit(&mpt->m_mutex);
13003 12999 break;
13004 13000 case MPTIOCTL_DIAG_ACTION:
13005 13001 /*
13006 13002 * The user has done a diag buffer action. Call our
13007 13003 * routine which does this. Only allow one diag action
13008 13004 * at one time.
13009 13005 */
13010 13006 mutex_enter(&mpt->m_mutex);
13011 13007 if (mpt->m_diag_action_in_progress) {
13012 13008 mutex_exit(&mpt->m_mutex);
13013 13009 return (EBUSY);
13014 13010 }
13015 13011 mpt->m_diag_action_in_progress = 1;
13016 13012 status = mptsas_diag_action(mpt,
13017 13013 (mptsas_diag_action_t *)data, mode);
13018 13014 mpt->m_diag_action_in_progress = 0;
13019 13015 mutex_exit(&mpt->m_mutex);
13020 13016 break;
13021 13017 case MPTIOCTL_EVENT_QUERY:
13022 13018 /*
13023 13019 * The user has done an event query. Call our routine
13024 13020 * which does this.
13025 13021 */
13026 13022 status = mptsas_event_query(mpt,
13027 13023 (mptsas_event_query_t *)data, mode, rval);
13028 13024 break;
13029 13025 case MPTIOCTL_EVENT_ENABLE:
13030 13026 /*
13031 13027 * The user has done an event enable. Call our routine
13032 13028 * which does this.
13033 13029 */
13034 13030 status = mptsas_event_enable(mpt,
13035 13031 (mptsas_event_enable_t *)data, mode, rval);
13036 13032 break;
13037 13033 case MPTIOCTL_EVENT_REPORT:
13038 13034 /*
13039 13035 * The user has done an event report. Call our routine
13040 13036 * which does this.
13041 13037 */
13042 13038 status = mptsas_event_report(mpt,
13043 13039 (mptsas_event_report_t *)data, mode, rval);
13044 13040 break;
13045 13041 case MPTIOCTL_REG_ACCESS:
13046 13042 /*
13047 13043 * The user has requested register access. Call our
13048 13044 * routine which does this.
13049 13045 */
13050 13046 status = mptsas_reg_access(mpt,
13051 13047 (mptsas_reg_access_t *)data, mode);
13052 13048 break;
13053 13049 default:
13054 13050 status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
13055 13051 rval);
13056 13052 break;
13057 13053 }
13058 13054
13059 13055 out:
13060 13056 return (status);
13061 13057 }
13062 13058
13063 13059 int
13064 13060 mptsas_restart_ioc(mptsas_t *mpt)
13065 13061 {
13066 13062 int rval = DDI_SUCCESS;
13067 13063 mptsas_target_t *ptgt = NULL;
13068 13064
13069 13065 ASSERT(mutex_owned(&mpt->m_mutex));
13070 13066
13071 13067 /*
13072 13068 * Set a flag telling I/O path that we're processing a reset. This is
13073 13069 * needed because after the reset is complete, the hash table still
13074 13070 * needs to be rebuilt. If I/Os are started before the hash table is
13075 13071 * rebuilt, I/O errors will occur. This flag allows I/Os to be marked
13076 13072 * so that they can be retried.
13077 13073 */
13078 13074 mpt->m_in_reset = TRUE;
13079 13075
13080 13076 /*
13081 13077 * Wait until all the allocated sense data buffers for DMA are freed.
13082 13078 */
13083 13079 while (mpt->m_extreq_sense_refcount > 0)
13084 13080 cv_wait(&mpt->m_extreq_sense_refcount_cv, &mpt->m_mutex);
13085 13081
13086 13082 /*
13087 13083 * Set all throttles to HOLD
13088 13084 */
13089 13085 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
13090 13086 ptgt = refhash_next(mpt->m_targets, ptgt)) {
13091 13087 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
13092 13088 }
13093 13089
13094 13090 /*
13095 13091 * Disable interrupts
13096 13092 */
13097 13093 MPTSAS_DISABLE_INTR(mpt);
13098 13094
13099 13095 /*
13100 13096 * Abort all commands: outstanding commands, commands in waitq and
13101 13097 * tx_waitq.
13102 13098 */
13103 13099 mptsas_flush_hba(mpt);
13104 13100
13105 13101 /*
13106 13102 * Reinitialize the chip.
13107 13103 */
13108 13104 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
13109 13105 rval = DDI_FAILURE;
13110 13106 }
13111 13107
13112 13108 /*
13113 13109 * Enable interrupts again
13114 13110 */
13115 13111 MPTSAS_ENABLE_INTR(mpt);
13116 13112
13117 13113 /*
13118 13114 * If mptsas_init_chip was successful, update the driver data.
13119 13115 */
13120 13116 if (rval == DDI_SUCCESS) {
13121 13117 mptsas_update_driver_data(mpt);
13122 13118 }
13123 13119
13124 13120 /*
13125 13121 * Reset the throttles
13126 13122 */
13127 13123 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
13128 13124 ptgt = refhash_next(mpt->m_targets, ptgt)) {
13129 13125 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
13130 13126 }
13131 13127
13132 13128 mptsas_doneq_empty(mpt);
13133 13129 mptsas_restart_hba(mpt);
13134 13130
13135 13131 if (rval != DDI_SUCCESS) {
13136 13132 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
13137 13133 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
13138 13134 }
13139 13135
13140 13136 /*
13141 13137 * Clear the reset flag so that I/Os can continue.
13142 13138 */
13143 13139 mpt->m_in_reset = FALSE;
13144 13140
13145 13141 return (rval);
13146 13142 }
13147 13143
13148 13144 static int
13149 13145 mptsas_init_chip(mptsas_t *mpt, int first_time)
13150 13146 {
13151 13147 ddi_dma_cookie_t cookie;
13152 13148 uint32_t i;
13153 13149 int rval;
13154 13150
13155 13151 /*
13156 13152 * Check to see if the firmware image is valid
13157 13153 */
13158 13154 if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) &
13159 13155 MPI2_DIAG_FLASH_BAD_SIG) {
13160 13156 mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
13161 13157 goto fail;
13162 13158 }
13163 13159
13164 13160 /*
13165 13161 * Reset the chip
13166 13162 */
13167 13163 rval = mptsas_ioc_reset(mpt, first_time);
13168 13164 if (rval == MPTSAS_RESET_FAIL) {
13169 13165 mptsas_log(mpt, CE_WARN, "hard reset failed!");
13170 13166 goto fail;
13171 13167 }
13172 13168
13173 13169 if ((rval == MPTSAS_SUCCESS_MUR) && (!first_time)) {
13174 13170 goto mur;
13175 13171 }
13176 13172 /*
13177 13173 * Setup configuration space
13178 13174 */
13179 13175 if (mptsas_config_space_init(mpt) == FALSE) {
13180 13176 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
13181 13177 "failed!");
13182 13178 goto fail;
13183 13179 }
13184 13180
13185 13181 /*
13186 13182 * IOC facts can change after a diag reset so all buffers that are
13187 13183 * based on these numbers must be de-allocated and re-allocated. Get
13188 13184 * new IOC facts each time chip is initialized.
13189 13185 */
13190 13186 if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
13191 13187 mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed");
13192 13188 goto fail;
13193 13189 }
13194 13190
13195 13191 if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
13196 13192 goto fail;
13197 13193 }
13198 13194 /*
13199 13195 * Allocate request message frames, reply free queue, reply descriptor
13200 13196 * post queue, and reply message frames using latest IOC facts.
13201 13197 */
13202 13198 if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
13203 13199 mptsas_log(mpt, CE_WARN, "mptsas_alloc_request_frames failed");
13204 13200 goto fail;
13205 13201 }
13206 13202 if (mptsas_alloc_sense_bufs(mpt) == DDI_FAILURE) {
13207 13203 mptsas_log(mpt, CE_WARN, "mptsas_alloc_sense_bufs failed");
13208 13204 goto fail;
13209 13205 }
13210 13206 if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
13211 13207 mptsas_log(mpt, CE_WARN, "mptsas_alloc_free_queue failed!");
13212 13208 goto fail;
13213 13209 }
13214 13210 if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
13215 13211 mptsas_log(mpt, CE_WARN, "mptsas_alloc_post_queue failed!");
13216 13212 goto fail;
13217 13213 }
13218 13214 if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
13219 13215 mptsas_log(mpt, CE_WARN, "mptsas_alloc_reply_frames failed!");
13220 13216 goto fail;
13221 13217 }
13222 13218
13223 13219 mur:
13224 13220 /*
13225 13221 * Re-Initialize ioc to operational state
13226 13222 */
13227 13223 if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
13228 13224 mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
13229 13225 goto fail;
13230 13226 }
13231 13227
13232 13228 mptsas_alloc_reply_args(mpt);
13233 13229
13234 13230 /*
13235 13231 * Initialize reply post index. Reply free index is initialized after
13236 13232 * the next loop.
13237 13233 */
13238 13234 mpt->m_post_index = 0;
13239 13235
13240 13236 /*
13241 13237 * Initialize the Reply Free Queue with the physical addresses of our
13242 13238 * reply frames.
13243 13239 */
13244 13240 cookie.dmac_address = mpt->m_reply_frame_dma_addr & 0xffffffffu;
13245 13241 for (i = 0; i < mpt->m_max_replies; i++) {
13246 13242 ddi_put32(mpt->m_acc_free_queue_hdl,
13247 13243 &((uint32_t *)(void *)mpt->m_free_queue)[i],
13248 13244 cookie.dmac_address);
13249 13245 cookie.dmac_address += mpt->m_reply_frame_size;
13250 13246 }
13251 13247 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
13252 13248 DDI_DMA_SYNC_FORDEV);
13253 13249
13254 13250 /*
13255 13251 * Initialize the reply free index to one past the last frame on the
13256 13252 * queue. This will signify that the queue is empty to start with.
13257 13253 */
13258 13254 mpt->m_free_index = i;
13259 13255 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, i);
13260 13256
13261 13257 /*
13262 13258 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
13263 13259 */
13264 13260 for (i = 0; i < mpt->m_post_queue_depth; i++) {
13265 13261 ddi_put64(mpt->m_acc_post_queue_hdl,
13266 13262 &((uint64_t *)(void *)mpt->m_post_queue)[i],
13267 13263 0xFFFFFFFFFFFFFFFF);
13268 13264 }
13269 13265 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
13270 13266 DDI_DMA_SYNC_FORDEV);
13271 13267
13272 13268 /*
13273 13269 * Enable ports
13274 13270 */
13275 13271 if (mptsas_ioc_enable_port(mpt) == DDI_FAILURE) {
13276 13272 mptsas_log(mpt, CE_WARN, "mptsas_ioc_enable_port failed");
13277 13273 goto fail;
13278 13274 }
13279 13275
13280 13276 /*
13281 13277 * enable events
13282 13278 */
13283 13279 if (mptsas_ioc_enable_event_notification(mpt)) {
13284 13280 mptsas_log(mpt, CE_WARN,
13285 13281 "mptsas_ioc_enable_event_notification failed");
13286 13282 goto fail;
13287 13283 }
13288 13284
13289 13285 /*
13290 13286 * We need checks in attach and these.
13291 13287 * chip_init is called in mult. places
13292 13288 */
13293 13289
13294 13290 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
13295 13291 DDI_SUCCESS) ||
13296 13292 (mptsas_check_dma_handle(mpt->m_dma_req_sense_hdl) !=
13297 13293 DDI_SUCCESS) ||
13298 13294 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
13299 13295 DDI_SUCCESS) ||
13300 13296 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
13301 13297 DDI_SUCCESS) ||
13302 13298 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
13303 13299 DDI_SUCCESS) ||
13304 13300 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
13305 13301 DDI_SUCCESS)) {
13306 13302 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
13307 13303 goto fail;
13308 13304 }
13309 13305
13310 13306 /* Check all acc handles */
13311 13307 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
13312 13308 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
13313 13309 DDI_SUCCESS) ||
13314 13310 (mptsas_check_acc_handle(mpt->m_acc_req_sense_hdl) !=
13315 13311 DDI_SUCCESS) ||
13316 13312 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
13317 13313 DDI_SUCCESS) ||
13318 13314 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
13319 13315 DDI_SUCCESS) ||
13320 13316 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
13321 13317 DDI_SUCCESS) ||
13322 13318 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
13323 13319 DDI_SUCCESS) ||
13324 13320 (mptsas_check_acc_handle(mpt->m_config_handle) !=
13325 13321 DDI_SUCCESS)) {
13326 13322 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
13327 13323 goto fail;
13328 13324 }
13329 13325
13330 13326 return (DDI_SUCCESS);
13331 13327
13332 13328 fail:
13333 13329 return (DDI_FAILURE);
13334 13330 }
13335 13331
13336 13332 static int
13337 13333 mptsas_get_pci_cap(mptsas_t *mpt)
13338 13334 {
13339 13335 ushort_t caps_ptr, cap, cap_count;
13340 13336
13341 13337 if (mpt->m_config_handle == NULL)
13342 13338 return (FALSE);
13343 13339 /*
13344 13340 * Check if capabilities list is supported and if so,
13345 13341 * get initial capabilities pointer and clear bits 0,1.
13346 13342 */
13347 13343 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
13348 13344 & PCI_STAT_CAP) {
13349 13345 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
13350 13346 PCI_CONF_CAP_PTR), 4);
13351 13347 } else {
13352 13348 caps_ptr = PCI_CAP_NEXT_PTR_NULL;
13353 13349 }
13354 13350
13355 13351 /*
13356 13352 * Walk capabilities if supported.
13357 13353 */
13358 13354 for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
13359 13355
13360 13356 /*
13361 13357 * Check that we haven't exceeded the maximum number of
13362 13358 * capabilities and that the pointer is in a valid range.
13363 13359 */
13364 13360 if (++cap_count > 48) {
13365 13361 mptsas_log(mpt, CE_WARN,
13366 13362 "too many device capabilities.\n");
13367 13363 break;
13368 13364 }
13369 13365 if (caps_ptr < 64) {
13370 13366 mptsas_log(mpt, CE_WARN,
13371 13367 "capabilities pointer 0x%x out of range.\n",
13372 13368 caps_ptr);
13373 13369 break;
13374 13370 }
13375 13371
13376 13372 /*
13377 13373 * Get next capability and check that it is valid.
13378 13374 * For now, we only support power management.
13379 13375 */
13380 13376 cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
13381 13377 switch (cap) {
13382 13378 case PCI_CAP_ID_PM:
13383 13379 mptsas_log(mpt, CE_NOTE,
13384 13380 "?mptsas%d supports power management.\n",
13385 13381 mpt->m_instance);
13386 13382 mpt->m_options |= MPTSAS_OPT_PM;
13387 13383
13388 13384 /* Save PMCSR offset */
13389 13385 mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
13390 13386 break;
13391 13387 /*
13392 13388 * The following capabilities are valid. Any others
13393 13389 * will cause a message to be logged.
13394 13390 */
13395 13391 case PCI_CAP_ID_VPD:
13396 13392 case PCI_CAP_ID_MSI:
13397 13393 case PCI_CAP_ID_PCIX:
13398 13394 case PCI_CAP_ID_PCI_E:
13399 13395 case PCI_CAP_ID_MSI_X:
13400 13396 break;
13401 13397 default:
13402 13398 mptsas_log(mpt, CE_NOTE,
13403 13399 "?mptsas%d unrecognized capability "
13404 13400 "0x%x.\n", mpt->m_instance, cap);
13405 13401 break;
13406 13402 }
13407 13403
13408 13404 /*
13409 13405 * Get next capabilities pointer and clear bits 0,1.
13410 13406 */
13411 13407 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
13412 13408 (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
13413 13409 }
13414 13410 return (TRUE);
13415 13411 }
13416 13412
13417 13413 static int
13418 13414 mptsas_init_pm(mptsas_t *mpt)
13419 13415 {
13420 13416 char pmc_name[16];
13421 13417 char *pmc[] = {
13422 13418 NULL,
13423 13419 "0=Off (PCI D3 State)",
13424 13420 "3=On (PCI D0 State)",
13425 13421 NULL
13426 13422 };
13427 13423 uint16_t pmcsr_stat;
13428 13424
13429 13425 if (mptsas_get_pci_cap(mpt) == FALSE) {
13430 13426 return (DDI_FAILURE);
13431 13427 }
13432 13428 /*
13433 13429 * If PCI's capability does not support PM, then don't need
13434 13430 * to registe the pm-components
13435 13431 */
13436 13432 if (!(mpt->m_options & MPTSAS_OPT_PM))
13437 13433 return (DDI_SUCCESS);
13438 13434 /*
13439 13435 * If power management is supported by this chip, create
13440 13436 * pm-components property for the power management framework
13441 13437 */
13442 13438 (void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
13443 13439 pmc[0] = pmc_name;
13444 13440 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
13445 13441 "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
13446 13442 mpt->m_options &= ~MPTSAS_OPT_PM;
13447 13443 mptsas_log(mpt, CE_WARN,
13448 13444 "mptsas%d: pm-component property creation failed.",
13449 13445 mpt->m_instance);
13450 13446 return (DDI_FAILURE);
13451 13447 }
13452 13448
13453 13449 /*
13454 13450 * Power on device.
13455 13451 */
13456 13452 (void) pm_busy_component(mpt->m_dip, 0);
13457 13453 pmcsr_stat = pci_config_get16(mpt->m_config_handle,
13458 13454 mpt->m_pmcsr_offset);
13459 13455 if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
13460 13456 mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
13461 13457 mpt->m_instance);
13462 13458 pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
13463 13459 PCI_PMCSR_D0);
13464 13460 }
13465 13461 if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
13466 13462 mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
13467 13463 return (DDI_FAILURE);
13468 13464 }
13469 13465 mpt->m_power_level = PM_LEVEL_D0;
13470 13466 /*
13471 13467 * Set pm idle delay.
13472 13468 */
13473 13469 mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
13474 13470 mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
13475 13471
13476 13472 return (DDI_SUCCESS);
13477 13473 }
13478 13474
13479 13475 static int
13480 13476 mptsas_register_intrs(mptsas_t *mpt)
13481 13477 {
13482 13478 dev_info_t *dip;
13483 13479 int intr_types;
13484 13480
13485 13481 dip = mpt->m_dip;
13486 13482
13487 13483 /* Get supported interrupt types */
13488 13484 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
13489 13485 mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
13490 13486 "failed\n");
13491 13487 return (FALSE);
13492 13488 }
13493 13489
13494 13490 NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
13495 13491
13496 13492 /*
13497 13493 * Try MSI, but fall back to FIXED
13498 13494 */
13499 13495 if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
13500 13496 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
13501 13497 NDBG0(("Using MSI interrupt type"));
13502 13498 mpt->m_intr_type = DDI_INTR_TYPE_MSI;
13503 13499 return (TRUE);
13504 13500 }
13505 13501 }
13506 13502 if (intr_types & DDI_INTR_TYPE_FIXED) {
13507 13503 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
13508 13504 NDBG0(("Using FIXED interrupt type"));
13509 13505 mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
13510 13506 return (TRUE);
13511 13507 } else {
13512 13508 NDBG0(("FIXED interrupt registration failed"));
13513 13509 return (FALSE);
13514 13510 }
13515 13511 }
13516 13512
13517 13513 return (FALSE);
13518 13514 }
13519 13515
13520 13516 static void
13521 13517 mptsas_unregister_intrs(mptsas_t *mpt)
13522 13518 {
13523 13519 mptsas_rem_intrs(mpt);
13524 13520 }
13525 13521
13526 13522 /*
13527 13523 * mptsas_add_intrs:
13528 13524 *
13529 13525 * Register FIXED or MSI interrupts.
13530 13526 */
13531 13527 static int
13532 13528 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
13533 13529 {
13534 13530 dev_info_t *dip = mpt->m_dip;
13535 13531 int avail, actual, count = 0;
13536 13532 int i, flag, ret;
13537 13533
13538 13534 NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
13539 13535
13540 13536 /* Get number of interrupts */
13541 13537 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
13542 13538 if ((ret != DDI_SUCCESS) || (count <= 0)) {
13543 13539 mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
13544 13540 "ret %d count %d\n", ret, count);
13545 13541
13546 13542 return (DDI_FAILURE);
13547 13543 }
13548 13544
13549 13545 /* Get number of available interrupts */
13550 13546 ret = ddi_intr_get_navail(dip, intr_type, &avail);
13551 13547 if ((ret != DDI_SUCCESS) || (avail == 0)) {
13552 13548 mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
13553 13549 "ret %d avail %d\n", ret, avail);
13554 13550
13555 13551 return (DDI_FAILURE);
13556 13552 }
13557 13553
13558 13554 if (avail < count) {
13559 13555 mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, "
13560 13556 "navail() returned %d", count, avail);
13561 13557 }
13562 13558
13563 13559 /* Mpt only have one interrupt routine */
13564 13560 if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
13565 13561 count = 1;
13566 13562 }
13567 13563
13568 13564 /* Allocate an array of interrupt handles */
13569 13565 mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
13570 13566 mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
13571 13567
13572 13568 flag = DDI_INTR_ALLOC_NORMAL;
13573 13569
13574 13570 /* call ddi_intr_alloc() */
13575 13571 ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
13576 13572 count, &actual, flag);
13577 13573
13578 13574 if ((ret != DDI_SUCCESS) || (actual == 0)) {
13579 13575 mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d\n",
13580 13576 ret);
13581 13577 kmem_free(mpt->m_htable, mpt->m_intr_size);
13582 13578 return (DDI_FAILURE);
13583 13579 }
13584 13580
13585 13581 /* use interrupt count returned or abort? */
13586 13582 if (actual < count) {
13587 13583 mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d\n",
13588 13584 count, actual);
13589 13585 }
13590 13586
13591 13587 mpt->m_intr_cnt = actual;
13592 13588
13593 13589 /*
13594 13590 * Get priority for first msi, assume remaining are all the same
13595 13591 */
13596 13592 if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
13597 13593 &mpt->m_intr_pri)) != DDI_SUCCESS) {
13598 13594 mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d\n", ret);
13599 13595
13600 13596 /* Free already allocated intr */
13601 13597 for (i = 0; i < actual; i++) {
13602 13598 (void) ddi_intr_free(mpt->m_htable[i]);
13603 13599 }
13604 13600
13605 13601 kmem_free(mpt->m_htable, mpt->m_intr_size);
13606 13602 return (DDI_FAILURE);
13607 13603 }
13608 13604
13609 13605 /* Test for high level mutex */
13610 13606 if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
13611 13607 mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
13612 13608 "Hi level interrupt not supported\n");
13613 13609
13614 13610 /* Free already allocated intr */
13615 13611 for (i = 0; i < actual; i++) {
13616 13612 (void) ddi_intr_free(mpt->m_htable[i]);
13617 13613 }
13618 13614
13619 13615 kmem_free(mpt->m_htable, mpt->m_intr_size);
13620 13616 return (DDI_FAILURE);
13621 13617 }
13622 13618
13623 13619 /* Call ddi_intr_add_handler() */
13624 13620 for (i = 0; i < actual; i++) {
13625 13621 if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
13626 13622 (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
13627 13623 mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
13628 13624 "failed %d\n", ret);
13629 13625
13630 13626 /* Free already allocated intr */
13631 13627 for (i = 0; i < actual; i++) {
13632 13628 (void) ddi_intr_free(mpt->m_htable[i]);
13633 13629 }
13634 13630
13635 13631 kmem_free(mpt->m_htable, mpt->m_intr_size);
13636 13632 return (DDI_FAILURE);
13637 13633 }
13638 13634 }
13639 13635
13640 13636 if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
13641 13637 != DDI_SUCCESS) {
13642 13638 mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d\n", ret);
13643 13639
13644 13640 /* Free already allocated intr */
13645 13641 for (i = 0; i < actual; i++) {
13646 13642 (void) ddi_intr_free(mpt->m_htable[i]);
13647 13643 }
13648 13644
13649 13645 kmem_free(mpt->m_htable, mpt->m_intr_size);
13650 13646 return (DDI_FAILURE);
13651 13647 }
13652 13648
13653 13649 /*
13654 13650 * Enable interrupts
13655 13651 */
13656 13652 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
13657 13653 /* Call ddi_intr_block_enable() for MSI interrupts */
13658 13654 (void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
13659 13655 } else {
13660 13656 /* Call ddi_intr_enable for MSI or FIXED interrupts */
13661 13657 for (i = 0; i < mpt->m_intr_cnt; i++) {
13662 13658 (void) ddi_intr_enable(mpt->m_htable[i]);
13663 13659 }
13664 13660 }
13665 13661 return (DDI_SUCCESS);
13666 13662 }
13667 13663
13668 13664 /*
13669 13665 * mptsas_rem_intrs:
13670 13666 *
13671 13667 * Unregister FIXED or MSI interrupts
13672 13668 */
13673 13669 static void
13674 13670 mptsas_rem_intrs(mptsas_t *mpt)
13675 13671 {
13676 13672 int i;
13677 13673
13678 13674 NDBG6(("mptsas_rem_intrs"));
13679 13675
13680 13676 /* Disable all interrupts */
13681 13677 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
13682 13678 /* Call ddi_intr_block_disable() */
13683 13679 (void) ddi_intr_block_disable(mpt->m_htable, mpt->m_intr_cnt);
13684 13680 } else {
13685 13681 for (i = 0; i < mpt->m_intr_cnt; i++) {
13686 13682 (void) ddi_intr_disable(mpt->m_htable[i]);
13687 13683 }
13688 13684 }
13689 13685
13690 13686 /* Call ddi_intr_remove_handler() */
13691 13687 for (i = 0; i < mpt->m_intr_cnt; i++) {
13692 13688 (void) ddi_intr_remove_handler(mpt->m_htable[i]);
13693 13689 (void) ddi_intr_free(mpt->m_htable[i]);
13694 13690 }
13695 13691
13696 13692 kmem_free(mpt->m_htable, mpt->m_intr_size);
13697 13693 }
13698 13694
13699 13695 /*
13700 13696 * The IO fault service error handling callback function
13701 13697 */
13702 13698 /*ARGSUSED*/
13703 13699 static int
13704 13700 mptsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
13705 13701 {
13706 13702 /*
13707 13703 * as the driver can always deal with an error in any dma or
13708 13704 * access handle, we can just return the fme_status value.
13709 13705 */
13710 13706 pci_ereport_post(dip, err, NULL);
13711 13707 return (err->fme_status);
13712 13708 }
13713 13709
13714 13710 /*
13715 13711 * mptsas_fm_init - initialize fma capabilities and register with IO
13716 13712 * fault services.
13717 13713 */
13718 13714 static void
13719 13715 mptsas_fm_init(mptsas_t *mpt)
13720 13716 {
13721 13717 /*
13722 13718 * Need to change iblock to priority for new MSI intr
13723 13719 */
13724 13720 ddi_iblock_cookie_t fm_ibc;
13725 13721
13726 13722 /* Only register with IO Fault Services if we have some capability */
13727 13723 if (mpt->m_fm_capabilities) {
13728 13724 /* Adjust access and dma attributes for FMA */
13729 13725 mpt->m_reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
13730 13726 mpt->m_msg_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
13731 13727 mpt->m_io_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
13732 13728
13733 13729 /*
13734 13730 * Register capabilities with IO Fault Services.
13735 13731 * mpt->m_fm_capabilities will be updated to indicate
13736 13732 * capabilities actually supported (not requested.)
13737 13733 */
13738 13734 ddi_fm_init(mpt->m_dip, &mpt->m_fm_capabilities, &fm_ibc);
13739 13735
13740 13736 /*
13741 13737 * Initialize pci ereport capabilities if ereport
13742 13738 * capable (should always be.)
13743 13739 */
13744 13740 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
13745 13741 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13746 13742 pci_ereport_setup(mpt->m_dip);
13747 13743 }
13748 13744
13749 13745 /*
13750 13746 * Register error callback if error callback capable.
13751 13747 */
13752 13748 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13753 13749 ddi_fm_handler_register(mpt->m_dip,
13754 13750 mptsas_fm_error_cb, (void *) mpt);
13755 13751 }
13756 13752 }
13757 13753 }
13758 13754
13759 13755 /*
13760 13756 * mptsas_fm_fini - Releases fma capabilities and un-registers with IO
13761 13757 * fault services.
13762 13758 *
13763 13759 */
13764 13760 static void
13765 13761 mptsas_fm_fini(mptsas_t *mpt)
13766 13762 {
13767 13763 /* Only unregister FMA capabilities if registered */
13768 13764 if (mpt->m_fm_capabilities) {
13769 13765
13770 13766 /*
13771 13767 * Un-register error callback if error callback capable.
13772 13768 */
13773 13769
13774 13770 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13775 13771 ddi_fm_handler_unregister(mpt->m_dip);
13776 13772 }
13777 13773
13778 13774 /*
13779 13775 * Release any resources allocated by pci_ereport_setup()
13780 13776 */
13781 13777
13782 13778 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
13783 13779 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13784 13780 pci_ereport_teardown(mpt->m_dip);
13785 13781 }
13786 13782
13787 13783 /* Unregister from IO Fault Services */
13788 13784 ddi_fm_fini(mpt->m_dip);
13789 13785
13790 13786 /* Adjust access and dma attributes for FMA */
13791 13787 mpt->m_reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
13792 13788 mpt->m_msg_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
13793 13789 mpt->m_io_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
13794 13790
13795 13791 }
13796 13792 }
13797 13793
13798 13794 int
13799 13795 mptsas_check_acc_handle(ddi_acc_handle_t handle)
13800 13796 {
13801 13797 ddi_fm_error_t de;
13802 13798
13803 13799 if (handle == NULL)
13804 13800 return (DDI_FAILURE);
13805 13801 ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
13806 13802 return (de.fme_status);
13807 13803 }
13808 13804
13809 13805 int
13810 13806 mptsas_check_dma_handle(ddi_dma_handle_t handle)
13811 13807 {
13812 13808 ddi_fm_error_t de;
13813 13809
13814 13810 if (handle == NULL)
13815 13811 return (DDI_FAILURE);
13816 13812 ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
13817 13813 return (de.fme_status);
13818 13814 }
13819 13815
13820 13816 void
13821 13817 mptsas_fm_ereport(mptsas_t *mpt, char *detail)
13822 13818 {
13823 13819 uint64_t ena;
13824 13820 char buf[FM_MAX_CLASS];
13825 13821
13826 13822 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
13827 13823 ena = fm_ena_generate(0, FM_ENA_FMT1);
13828 13824 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities)) {
13829 13825 ddi_fm_ereport_post(mpt->m_dip, buf, ena, DDI_NOSLEEP,
13830 13826 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
13831 13827 }
13832 13828 }
13833 13829
13834 13830 static int
13835 13831 mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
13836 13832 uint16_t *dev_handle, mptsas_target_t **pptgt)
13837 13833 {
13838 13834 int rval;
13839 13835 uint32_t dev_info;
13840 13836 uint64_t sas_wwn;
13841 13837 mptsas_phymask_t phymask;
13842 13838 uint8_t physport, phynum, config, disk;
13843 13839 uint64_t devicename;
13844 13840 uint16_t pdev_hdl;
13845 13841 mptsas_target_t *tmp_tgt = NULL;
13846 13842 uint16_t bay_num, enclosure, io_flags;
13847 13843
13848 13844 ASSERT(*pptgt == NULL);
13849 13845
13850 13846 rval = mptsas_get_sas_device_page0(mpt, page_address, dev_handle,
13851 13847 &sas_wwn, &dev_info, &physport, &phynum, &pdev_hdl,
13852 13848 &bay_num, &enclosure, &io_flags);
13853 13849 if (rval != DDI_SUCCESS) {
13854 13850 rval = DEV_INFO_FAIL_PAGE0;
13855 13851 return (rval);
13856 13852 }
13857 13853
13858 13854 if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
13859 13855 MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13860 13856 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) == NULL) {
13861 13857 rval = DEV_INFO_WRONG_DEVICE_TYPE;
13862 13858 return (rval);
13863 13859 }
13864 13860
13865 13861 /*
13866 13862 * Check if the dev handle is for a Phys Disk. If so, set return value
13867 13863 * and exit. Don't add Phys Disks to hash.
13868 13864 */
13869 13865 for (config = 0; config < mpt->m_num_raid_configs; config++) {
13870 13866 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
13871 13867 if (*dev_handle == mpt->m_raidconfig[config].
13872 13868 m_physdisk_devhdl[disk]) {
13873 13869 rval = DEV_INFO_PHYS_DISK;
13874 13870 return (rval);
13875 13871 }
13876 13872 }
13877 13873 }
13878 13874
13879 13875 /*
13880 13876 * Get SATA Device Name from SAS device page0 for
13881 13877 * sata device, if device name doesn't exist, set mta_wwn to
13882 13878 * 0 for direct attached SATA. For the device behind the expander
13883 13879 * we still can use STP address assigned by expander.
13884 13880 */
13885 13881 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13886 13882 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
13887 13883 /* alloc a temporary target to send the cmd to */
13888 13884 tmp_tgt = mptsas_tgt_alloc(mpt->m_tmp_targets, *dev_handle,
13889 13885 0, dev_info, 0, 0);
13890 13886 mutex_exit(&mpt->m_mutex);
13891 13887
13892 13888 devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
13893 13889
13894 13890 if (devicename == -1) {
13895 13891 mutex_enter(&mpt->m_mutex);
13896 13892 refhash_remove(mpt->m_tmp_targets, tmp_tgt);
13897 13893 rval = DEV_INFO_FAIL_GUID;
13898 13894 return (rval);
13899 13895 }
13900 13896
13901 13897 if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
13902 13898 sas_wwn = devicename;
13903 13899 } else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
13904 13900 sas_wwn = 0;
13905 13901 }
13906 13902
13907 13903 mutex_enter(&mpt->m_mutex);
13908 13904 refhash_remove(mpt->m_tmp_targets, tmp_tgt);
13909 13905 }
13910 13906
13911 13907 phymask = mptsas_physport_to_phymask(mpt, physport);
13912 13908 *pptgt = mptsas_tgt_alloc(mpt->m_targets, *dev_handle, sas_wwn,
13913 13909 dev_info, phymask, phynum);
13914 13910 if (*pptgt == NULL) {
13915 13911 mptsas_log(mpt, CE_WARN, "Failed to allocated target"
13916 13912 "structure!");
13917 13913 rval = DEV_INFO_FAIL_ALLOC;
13918 13914 return (rval);
13919 13915 }
13920 13916 (*pptgt)->m_io_flags = io_flags;
13921 13917 (*pptgt)->m_enclosure = enclosure;
13922 13918 (*pptgt)->m_slot_num = bay_num;
13923 13919 return (DEV_INFO_SUCCESS);
13924 13920 }
13925 13921
13926 13922 uint64_t
13927 13923 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
13928 13924 {
13929 13925 uint64_t sata_guid = 0, *pwwn = NULL;
13930 13926 int target = ptgt->m_devhdl;
13931 13927 uchar_t *inq83 = NULL;
13932 13928 int inq83_len = 0xFF;
13933 13929 uchar_t *dblk = NULL;
13934 13930 int inq83_retry = 3;
13935 13931 int rval = DDI_FAILURE;
13936 13932
13937 13933 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
13938 13934
13939 13935 inq83_retry:
13940 13936 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
13941 13937 inq83_len, NULL, 1);
13942 13938 if (rval != DDI_SUCCESS) {
13943 13939 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
13944 13940 "0x83 for target:%x, lun:%x failed!", target, lun);
13945 13941 sata_guid = -1;
13946 13942 goto out;
13947 13943 }
13948 13944 /* According to SAT2, the first descriptor is logic unit name */
13949 13945 dblk = &inq83[4];
13950 13946 if ((dblk[1] & 0x30) != 0) {
13951 13947 mptsas_log(mpt, CE_WARN, "!Descriptor is not lun associated.");
13952 13948 goto out;
13953 13949 }
13954 13950 pwwn = (uint64_t *)(void *)(&dblk[4]);
13955 13951 if ((dblk[4] & 0xf0) == 0x50) {
13956 13952 sata_guid = BE_64(*pwwn);
13957 13953 goto out;
13958 13954 } else if (dblk[4] == 'A') {
13959 13955 NDBG20(("SATA drive has no NAA format GUID."));
13960 13956 goto out;
13961 13957 } else {
13962 13958 /* The data is not ready, wait and retry */
13963 13959 inq83_retry--;
13964 13960 if (inq83_retry <= 0) {
13965 13961 goto out;
13966 13962 }
13967 13963 NDBG20(("The GUID is not ready, retry..."));
13968 13964 delay(1 * drv_usectohz(1000000));
13969 13965 goto inq83_retry;
13970 13966 }
13971 13967 out:
13972 13968 kmem_free(inq83, inq83_len);
13973 13969 return (sata_guid);
13974 13970 }
13975 13971
13976 13972 static int
13977 13973 mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, uchar_t page,
13978 13974 unsigned char *buf, int len, int *reallen, uchar_t evpd)
13979 13975 {
13980 13976 uchar_t cdb[CDB_GROUP0];
13981 13977 struct scsi_address ap;
13982 13978 struct buf *data_bp = NULL;
13983 13979 int resid = 0;
13984 13980 int ret = DDI_FAILURE;
13985 13981
13986 13982 ASSERT(len <= 0xffff);
13987 13983
13988 13984 ap.a_target = MPTSAS_INVALID_DEVHDL;
13989 13985 ap.a_lun = (uchar_t)(lun);
13990 13986 ap.a_hba_tran = mpt->m_tran;
13991 13987
13992 13988 data_bp = scsi_alloc_consistent_buf(&ap,
13993 13989 (struct buf *)NULL, len, B_READ, NULL_FUNC, NULL);
13994 13990 if (data_bp == NULL) {
13995 13991 return (ret);
13996 13992 }
13997 13993 bzero(cdb, CDB_GROUP0);
13998 13994 cdb[0] = SCMD_INQUIRY;
13999 13995 cdb[1] = evpd;
14000 13996 cdb[2] = page;
14001 13997 cdb[3] = (len & 0xff00) >> 8;
14002 13998 cdb[4] = (len & 0x00ff);
14003 13999 cdb[5] = 0;
14004 14000
14005 14001 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP0, data_bp,
14006 14002 &resid);
14007 14003 if (ret == DDI_SUCCESS) {
14008 14004 if (reallen) {
14009 14005 *reallen = len - resid;
14010 14006 }
14011 14007 bcopy((caddr_t)data_bp->b_un.b_addr, buf, len);
14012 14008 }
14013 14009 if (data_bp) {
14014 14010 scsi_free_consistent_buf(data_bp);
14015 14011 }
14016 14012 return (ret);
14017 14013 }
14018 14014
14019 14015 static int
14020 14016 mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
14021 14017 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
14022 14018 int *resid)
14023 14019 {
14024 14020 struct scsi_pkt *pktp = NULL;
14025 14021 scsi_hba_tran_t *tran_clone = NULL;
14026 14022 mptsas_tgt_private_t *tgt_private = NULL;
14027 14023 int ret = DDI_FAILURE;
14028 14024
14029 14025 /*
14030 14026 * scsi_hba_tran_t->tran_tgt_private is used to pass the address
14031 14027 * information to scsi_init_pkt, allocate a scsi_hba_tran structure
14032 14028 * to simulate the cmds from sd
14033 14029 */
14034 14030 tran_clone = kmem_alloc(
14035 14031 sizeof (scsi_hba_tran_t), KM_SLEEP);
14036 14032 if (tran_clone == NULL) {
14037 14033 goto out;
14038 14034 }
14039 14035 bcopy((caddr_t)mpt->m_tran,
14040 14036 (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
14041 14037 tgt_private = kmem_alloc(
14042 14038 sizeof (mptsas_tgt_private_t), KM_SLEEP);
14043 14039 if (tgt_private == NULL) {
14044 14040 goto out;
14045 14041 }
14046 14042 tgt_private->t_lun = ap->a_lun;
14047 14043 tgt_private->t_private = ptgt;
14048 14044 tran_clone->tran_tgt_private = tgt_private;
14049 14045 ap->a_hba_tran = tran_clone;
14050 14046
14051 14047 pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
14052 14048 data_bp, cdblen, sizeof (struct scsi_arq_status),
14053 14049 0, PKT_CONSISTENT, NULL, NULL);
14054 14050 if (pktp == NULL) {
14055 14051 goto out;
14056 14052 }
14057 14053 bcopy(cdb, pktp->pkt_cdbp, cdblen);
14058 14054 pktp->pkt_flags = FLAG_NOPARITY;
14059 14055 if (scsi_poll(pktp) < 0) {
14060 14056 goto out;
14061 14057 }
14062 14058 if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
14063 14059 goto out;
14064 14060 }
14065 14061 if (resid != NULL) {
14066 14062 *resid = pktp->pkt_resid;
14067 14063 }
14068 14064
14069 14065 ret = DDI_SUCCESS;
14070 14066 out:
14071 14067 if (pktp) {
14072 14068 scsi_destroy_pkt(pktp);
14073 14069 }
14074 14070 if (tran_clone) {
14075 14071 kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
14076 14072 }
14077 14073 if (tgt_private) {
14078 14074 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
14079 14075 }
14080 14076 return (ret);
14081 14077 }
14082 14078 static int
14083 14079 mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, int *lun)
14084 14080 {
14085 14081 char *cp = NULL;
14086 14082 char *ptr = NULL;
14087 14083 size_t s = 0;
14088 14084 char *wwid_str = NULL;
14089 14085 char *lun_str = NULL;
14090 14086 long lunnum;
14091 14087 long phyid = -1;
14092 14088 int rc = DDI_FAILURE;
14093 14089
14094 14090 ptr = name;
14095 14091 ASSERT(ptr[0] == 'w' || ptr[0] == 'p');
14096 14092 ptr++;
14097 14093 if ((cp = strchr(ptr, ',')) == NULL) {
14098 14094 return (DDI_FAILURE);
14099 14095 }
14100 14096
14101 14097 wwid_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14102 14098 s = (uintptr_t)cp - (uintptr_t)ptr;
14103 14099
14104 14100 bcopy(ptr, wwid_str, s);
14105 14101 wwid_str[s] = '\0';
14106 14102
14107 14103 ptr = ++cp;
14108 14104
14109 14105 if ((cp = strchr(ptr, '\0')) == NULL) {
14110 14106 goto out;
14111 14107 }
14112 14108 lun_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14113 14109 s = (uintptr_t)cp - (uintptr_t)ptr;
14114 14110
14115 14111 bcopy(ptr, lun_str, s);
14116 14112 lun_str[s] = '\0';
14117 14113
14118 14114 if (name[0] == 'p') {
14119 14115 rc = ddi_strtol(wwid_str, NULL, 0x10, &phyid);
14120 14116 } else {
14121 14117 rc = scsi_wwnstr_to_wwn(wwid_str, wwid);
14122 14118 }
14123 14119 if (rc != DDI_SUCCESS)
14124 14120 goto out;
14125 14121
14126 14122 if (phyid != -1) {
14127 14123 ASSERT(phyid < MPTSAS_MAX_PHYS);
14128 14124 *phy = (uint8_t)phyid;
14129 14125 }
14130 14126 rc = ddi_strtol(lun_str, NULL, 0x10, &lunnum);
14131 14127 if (rc != 0)
14132 14128 goto out;
14133 14129
14134 14130 *lun = (int)lunnum;
14135 14131 rc = DDI_SUCCESS;
14136 14132 out:
14137 14133 if (wwid_str)
14138 14134 kmem_free(wwid_str, SCSI_MAXNAMELEN);
14139 14135 if (lun_str)
14140 14136 kmem_free(lun_str, SCSI_MAXNAMELEN);
14141 14137
14142 14138 return (rc);
14143 14139 }
14144 14140
14145 14141 /*
14146 14142 * mptsas_parse_smp_name() is to parse sas wwn string
14147 14143 * which format is "wWWN"
14148 14144 */
14149 14145 static int
14150 14146 mptsas_parse_smp_name(char *name, uint64_t *wwn)
14151 14147 {
14152 14148 char *ptr = name;
14153 14149
14154 14150 if (*ptr != 'w') {
14155 14151 return (DDI_FAILURE);
14156 14152 }
14157 14153
14158 14154 ptr++;
14159 14155 if (scsi_wwnstr_to_wwn(ptr, wwn)) {
14160 14156 return (DDI_FAILURE);
14161 14157 }
14162 14158 return (DDI_SUCCESS);
14163 14159 }
14164 14160
14165 14161 static int
14166 14162 mptsas_bus_config(dev_info_t *pdip, uint_t flag,
14167 14163 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
14168 14164 {
14169 14165 int ret = NDI_FAILURE;
14170 14166 int circ = 0;
14171 14167 int circ1 = 0;
14172 14168 mptsas_t *mpt;
14173 14169 char *ptr = NULL;
14174 14170 char *devnm = NULL;
14175 14171 uint64_t wwid = 0;
14176 14172 uint8_t phy = 0xFF;
14177 14173 int lun = 0;
14178 14174 uint_t mflags = flag;
14179 14175 int bconfig = TRUE;
14180 14176
14181 14177 if (scsi_hba_iport_unit_address(pdip) == 0) {
14182 14178 return (DDI_FAILURE);
14183 14179 }
14184 14180
14185 14181 mpt = DIP2MPT(pdip);
14186 14182 if (!mpt) {
14187 14183 return (DDI_FAILURE);
14188 14184 }
14189 14185 /*
14190 14186 * Hold the nexus across the bus_config
14191 14187 */
14192 14188 ndi_devi_enter(scsi_vhci_dip, &circ);
14193 14189 ndi_devi_enter(pdip, &circ1);
14194 14190 switch (op) {
14195 14191 case BUS_CONFIG_ONE:
14196 14192 /* parse wwid/target name out of name given */
14197 14193 if ((ptr = strchr((char *)arg, '@')) == NULL) {
14198 14194 ret = NDI_FAILURE;
14199 14195 break;
14200 14196 }
14201 14197 ptr++;
14202 14198 if (strncmp((char *)arg, "smp", 3) == 0) {
14203 14199 /*
14204 14200 * This is a SMP target device
14205 14201 */
14206 14202 ret = mptsas_parse_smp_name(ptr, &wwid);
14207 14203 if (ret != DDI_SUCCESS) {
14208 14204 ret = NDI_FAILURE;
14209 14205 break;
14210 14206 }
14211 14207 ret = mptsas_config_smp(pdip, wwid, childp);
14212 14208 } else if ((ptr[0] == 'w') || (ptr[0] == 'p')) {
14213 14209 /*
14214 14210 * OBP could pass down a non-canonical form
14215 14211 * bootpath without LUN part when LUN is 0.
14216 14212 * So driver need adjust the string.
14217 14213 */
14218 14214 if (strchr(ptr, ',') == NULL) {
14219 14215 devnm = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14220 14216 (void) sprintf(devnm, "%s,0", (char *)arg);
14221 14217 ptr = strchr(devnm, '@');
14222 14218 ptr++;
14223 14219 }
14224 14220
14225 14221 /*
14226 14222 * The device path is wWWID format and the device
14227 14223 * is not SMP target device.
14228 14224 */
14229 14225 ret = mptsas_parse_address(ptr, &wwid, &phy, &lun);
14230 14226 if (ret != DDI_SUCCESS) {
14231 14227 ret = NDI_FAILURE;
14232 14228 break;
14233 14229 }
14234 14230 *childp = NULL;
14235 14231 if (ptr[0] == 'w') {
14236 14232 ret = mptsas_config_one_addr(pdip, wwid,
14237 14233 lun, childp);
14238 14234 } else if (ptr[0] == 'p') {
14239 14235 ret = mptsas_config_one_phy(pdip, phy, lun,
14240 14236 childp);
14241 14237 }
14242 14238
14243 14239 /*
14244 14240 * If this is CD/DVD device in OBP path, the
14245 14241 * ndi_busop_bus_config can be skipped as config one
14246 14242 * operation is done above.
14247 14243 */
14248 14244 if ((ret == NDI_SUCCESS) && (*childp != NULL) &&
14249 14245 (strcmp(ddi_node_name(*childp), "cdrom") == 0) &&
14250 14246 (strncmp((char *)arg, "disk", 4) == 0)) {
14251 14247 bconfig = FALSE;
14252 14248 ndi_hold_devi(*childp);
14253 14249 }
14254 14250 } else {
14255 14251 ret = NDI_FAILURE;
14256 14252 break;
14257 14253 }
14258 14254
14259 14255 /*
14260 14256 * DDI group instructed us to use this flag.
14261 14257 */
14262 14258 mflags |= NDI_MDI_FALLBACK;
14263 14259 break;
14264 14260 case BUS_CONFIG_DRIVER:
14265 14261 case BUS_CONFIG_ALL:
14266 14262 mptsas_config_all(pdip);
14267 14263 ret = NDI_SUCCESS;
14268 14264 break;
14269 14265 default:
14270 14266 ret = NDI_FAILURE;
14271 14267 break;
14272 14268 }
14273 14269
14274 14270 if ((ret == NDI_SUCCESS) && bconfig) {
14275 14271 ret = ndi_busop_bus_config(pdip, mflags, op,
14276 14272 (devnm == NULL) ? arg : devnm, childp, 0);
14277 14273 }
14278 14274
14279 14275 ndi_devi_exit(pdip, circ1);
14280 14276 ndi_devi_exit(scsi_vhci_dip, circ);
14281 14277 if (devnm != NULL)
14282 14278 kmem_free(devnm, SCSI_MAXNAMELEN);
14283 14279 return (ret);
14284 14280 }
14285 14281
14286 14282 static int
14287 14283 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
14288 14284 mptsas_target_t *ptgt)
14289 14285 {
14290 14286 int rval = DDI_FAILURE;
14291 14287 struct scsi_inquiry *sd_inq = NULL;
14292 14288 mptsas_t *mpt = DIP2MPT(pdip);
14293 14289
14294 14290 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
14295 14291
14296 14292 rval = mptsas_inquiry(mpt, ptgt, lun, 0, (uchar_t *)sd_inq,
14297 14293 SUN_INQSIZE, 0, (uchar_t)0);
14298 14294
14299 14295 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
14300 14296 rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
14301 14297 } else {
14302 14298 rval = DDI_FAILURE;
14303 14299 }
14304 14300
14305 14301 kmem_free(sd_inq, SUN_INQSIZE);
14306 14302 return (rval);
14307 14303 }
14308 14304
14309 14305 static int
14310 14306 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
14311 14307 dev_info_t **lundip)
14312 14308 {
14313 14309 int rval;
14314 14310 mptsas_t *mpt = DIP2MPT(pdip);
14315 14311 int phymask;
14316 14312 mptsas_target_t *ptgt = NULL;
14317 14313
14318 14314 /*
14319 14315 * Get the physical port associated to the iport
14320 14316 */
14321 14317 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14322 14318 "phymask", 0);
14323 14319
14324 14320 ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
14325 14321 if (ptgt == NULL) {
14326 14322 /*
14327 14323 * didn't match any device by searching
14328 14324 */
14329 14325 return (DDI_FAILURE);
14330 14326 }
14331 14327 /*
14332 14328 * If the LUN already exists and the status is online,
14333 14329 * we just return the pointer to dev_info_t directly.
14334 14330 * For the mdi_pathinfo node, we'll handle it in
14335 14331 * mptsas_create_virt_lun()
14336 14332 * TODO should be also in mptsas_handle_dr
14337 14333 */
14338 14334
14339 14335 *lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
14340 14336 if (*lundip != NULL) {
14341 14337 /*
14342 14338 * TODO Another senario is, we hotplug the same disk
14343 14339 * on the same slot, the devhdl changed, is this
14344 14340 * possible?
14345 14341 * tgt_private->t_private != ptgt
14346 14342 */
14347 14343 if (sasaddr != ptgt->m_addr.mta_wwn) {
14348 14344 /*
14349 14345 * The device has changed although the devhdl is the
14350 14346 * same (Enclosure mapping mode, change drive on the
14351 14347 * same slot)
14352 14348 */
14353 14349 return (DDI_FAILURE);
14354 14350 }
14355 14351 return (DDI_SUCCESS);
14356 14352 }
14357 14353
14358 14354 if (phymask == 0) {
14359 14355 /*
14360 14356 * Configure IR volume
14361 14357 */
14362 14358 rval = mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
14363 14359 return (rval);
14364 14360 }
14365 14361 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
14366 14362
14367 14363 return (rval);
14368 14364 }
14369 14365
14370 14366 static int
14371 14367 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
14372 14368 dev_info_t **lundip)
14373 14369 {
14374 14370 int rval;
14375 14371 mptsas_t *mpt = DIP2MPT(pdip);
14376 14372 mptsas_phymask_t phymask;
14377 14373 mptsas_target_t *ptgt = NULL;
14378 14374
14379 14375 /*
14380 14376 * Get the physical port associated to the iport
14381 14377 */
14382 14378 phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14383 14379 "phymask", 0);
14384 14380
14385 14381 ptgt = mptsas_phy_to_tgt(mpt, phymask, phy);
14386 14382 if (ptgt == NULL) {
14387 14383 /*
14388 14384 * didn't match any device by searching
14389 14385 */
14390 14386 return (DDI_FAILURE);
14391 14387 }
14392 14388
14393 14389 /*
14394 14390 * If the LUN already exists and the status is online,
14395 14391 * we just return the pointer to dev_info_t directly.
14396 14392 * For the mdi_pathinfo node, we'll handle it in
14397 14393 * mptsas_create_virt_lun().
14398 14394 */
14399 14395
14400 14396 *lundip = mptsas_find_child_phy(pdip, phy);
14401 14397 if (*lundip != NULL) {
14402 14398 return (DDI_SUCCESS);
14403 14399 }
14404 14400
14405 14401 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
14406 14402
14407 14403 return (rval);
14408 14404 }
14409 14405
14410 14406 static int
14411 14407 mptsas_retrieve_lundata(int lun_cnt, uint8_t *buf, uint16_t *lun_num,
14412 14408 uint8_t *lun_addr_type)
14413 14409 {
14414 14410 uint32_t lun_idx = 0;
14415 14411
14416 14412 ASSERT(lun_num != NULL);
14417 14413 ASSERT(lun_addr_type != NULL);
14418 14414
14419 14415 lun_idx = (lun_cnt + 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
14420 14416 /* determine report luns addressing type */
14421 14417 switch (buf[lun_idx] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) {
14422 14418 /*
14423 14419 * Vendors in the field have been found to be concatenating
14424 14420 * bus/target/lun to equal the complete lun value instead
14425 14421 * of switching to flat space addressing
14426 14422 */
14427 14423 /* 00b - peripheral device addressing method */
14428 14424 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL:
14429 14425 /* FALLTHRU */
14430 14426 /* 10b - logical unit addressing method */
14431 14427 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT:
14432 14428 /* FALLTHRU */
14433 14429 /* 01b - flat space addressing method */
14434 14430 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE:
14435 14431 /* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
14436 14432 *lun_addr_type = (buf[lun_idx] &
14437 14433 MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) >> 6;
14438 14434 *lun_num = (buf[lun_idx] & 0x3F) << 8;
14439 14435 *lun_num |= buf[lun_idx + 1];
14440 14436 return (DDI_SUCCESS);
14441 14437 default:
14442 14438 return (DDI_FAILURE);
14443 14439 }
14444 14440 }
14445 14441
14446 14442 static int
14447 14443 mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt)
14448 14444 {
14449 14445 struct buf *repluns_bp = NULL;
14450 14446 struct scsi_address ap;
14451 14447 uchar_t cdb[CDB_GROUP5];
14452 14448 int ret = DDI_FAILURE;
14453 14449 int retry = 0;
14454 14450 int lun_list_len = 0;
14455 14451 uint16_t lun_num = 0;
14456 14452 uint8_t lun_addr_type = 0;
14457 14453 uint32_t lun_cnt = 0;
14458 14454 uint32_t lun_total = 0;
14459 14455 dev_info_t *cdip = NULL;
14460 14456 uint16_t *saved_repluns = NULL;
14461 14457 char *buffer = NULL;
14462 14458 int buf_len = 128;
14463 14459 mptsas_t *mpt = DIP2MPT(pdip);
14464 14460 uint64_t sas_wwn = 0;
14465 14461 uint8_t phy = 0xFF;
14466 14462 uint32_t dev_info = 0;
14467 14463
14468 14464 mutex_enter(&mpt->m_mutex);
14469 14465 sas_wwn = ptgt->m_addr.mta_wwn;
14470 14466 phy = ptgt->m_phynum;
14471 14467 dev_info = ptgt->m_deviceinfo;
14472 14468 mutex_exit(&mpt->m_mutex);
14473 14469
14474 14470 if (sas_wwn == 0) {
14475 14471 /*
14476 14472 * It's a SATA without Device Name
14477 14473 * So don't try multi-LUNs
14478 14474 */
14479 14475 if (mptsas_find_child_phy(pdip, phy)) {
14480 14476 return (DDI_SUCCESS);
14481 14477 } else {
14482 14478 /*
14483 14479 * need configure and create node
14484 14480 */
14485 14481 return (DDI_FAILURE);
14486 14482 }
14487 14483 }
14488 14484
14489 14485 /*
14490 14486 * WWN (SAS address or Device Name exist)
14491 14487 */
14492 14488 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14493 14489 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14494 14490 /*
14495 14491 * SATA device with Device Name
14496 14492 * So don't try multi-LUNs
14497 14493 */
14498 14494 if (mptsas_find_child_addr(pdip, sas_wwn, 0)) {
14499 14495 return (DDI_SUCCESS);
14500 14496 } else {
14501 14497 return (DDI_FAILURE);
14502 14498 }
14503 14499 }
14504 14500
14505 14501 do {
14506 14502 ap.a_target = MPTSAS_INVALID_DEVHDL;
14507 14503 ap.a_lun = 0;
14508 14504 ap.a_hba_tran = mpt->m_tran;
14509 14505 repluns_bp = scsi_alloc_consistent_buf(&ap,
14510 14506 (struct buf *)NULL, buf_len, B_READ, NULL_FUNC, NULL);
14511 14507 if (repluns_bp == NULL) {
14512 14508 retry++;
14513 14509 continue;
14514 14510 }
14515 14511 bzero(cdb, CDB_GROUP5);
14516 14512 cdb[0] = SCMD_REPORT_LUNS;
14517 14513 cdb[6] = (buf_len & 0xff000000) >> 24;
14518 14514 cdb[7] = (buf_len & 0x00ff0000) >> 16;
14519 14515 cdb[8] = (buf_len & 0x0000ff00) >> 8;
14520 14516 cdb[9] = (buf_len & 0x000000ff);
14521 14517
14522 14518 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP5,
14523 14519 repluns_bp, NULL);
14524 14520 if (ret != DDI_SUCCESS) {
14525 14521 scsi_free_consistent_buf(repluns_bp);
14526 14522 retry++;
14527 14523 continue;
14528 14524 }
14529 14525 lun_list_len = BE_32(*(int *)((void *)(
14530 14526 repluns_bp->b_un.b_addr)));
14531 14527 if (buf_len >= lun_list_len + 8) {
14532 14528 ret = DDI_SUCCESS;
14533 14529 break;
14534 14530 }
14535 14531 scsi_free_consistent_buf(repluns_bp);
14536 14532 buf_len = lun_list_len + 8;
14537 14533
14538 14534 } while (retry < 3);
14539 14535
14540 14536 if (ret != DDI_SUCCESS)
14541 14537 return (ret);
14542 14538 buffer = (char *)repluns_bp->b_un.b_addr;
14543 14539 /*
14544 14540 * find out the number of luns returned by the SCSI ReportLun call
14545 14541 * and allocate buffer space
14546 14542 */
14547 14543 lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
14548 14544 saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
14549 14545 if (saved_repluns == NULL) {
14550 14546 scsi_free_consistent_buf(repluns_bp);
14551 14547 return (DDI_FAILURE);
14552 14548 }
14553 14549 for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
14554 14550 if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
14555 14551 &lun_num, &lun_addr_type) != DDI_SUCCESS) {
14556 14552 continue;
14557 14553 }
14558 14554 saved_repluns[lun_cnt] = lun_num;
14559 14555 if ((cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num)) !=
14560 14556 NULL) {
14561 14557 ret = DDI_SUCCESS;
14562 14558 } else {
14563 14559 ret = mptsas_probe_lun(pdip, lun_num, &cdip,
14564 14560 ptgt);
14565 14561 }
14566 14562 if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
14567 14563 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
14568 14564 MPTSAS_DEV_GONE);
14569 14565 }
14570 14566 }
14571 14567 mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
14572 14568 kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
14573 14569 scsi_free_consistent_buf(repluns_bp);
14574 14570 return (DDI_SUCCESS);
14575 14571 }
14576 14572
14577 14573 static int
14578 14574 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
14579 14575 {
14580 14576 int rval = DDI_FAILURE;
14581 14577 struct scsi_inquiry *sd_inq = NULL;
14582 14578 mptsas_t *mpt = DIP2MPT(pdip);
14583 14579 mptsas_target_t *ptgt = NULL;
14584 14580
14585 14581 mutex_enter(&mpt->m_mutex);
14586 14582 ptgt = refhash_linear_search(mpt->m_targets,
14587 14583 mptsas_target_eval_devhdl, &target);
14588 14584 mutex_exit(&mpt->m_mutex);
14589 14585 if (ptgt == NULL) {
14590 14586 mptsas_log(mpt, CE_WARN, "Volume with VolDevHandle of 0x%x "
14591 14587 "not found.", target);
14592 14588 return (rval);
14593 14589 }
14594 14590
14595 14591 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
14596 14592 rval = mptsas_inquiry(mpt, ptgt, 0, 0, (uchar_t *)sd_inq,
14597 14593 SUN_INQSIZE, 0, (uchar_t)0);
14598 14594
14599 14595 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
14600 14596 rval = mptsas_create_phys_lun(pdip, sd_inq, NULL, dip, ptgt,
14601 14597 0);
14602 14598 } else {
14603 14599 rval = DDI_FAILURE;
14604 14600 }
14605 14601
14606 14602 kmem_free(sd_inq, SUN_INQSIZE);
14607 14603 return (rval);
14608 14604 }
14609 14605
14610 14606 /*
14611 14607 * configure all RAID volumes for virtual iport
14612 14608 */
14613 14609 static void
14614 14610 mptsas_config_all_viport(dev_info_t *pdip)
14615 14611 {
14616 14612 mptsas_t *mpt = DIP2MPT(pdip);
14617 14613 int config, vol;
14618 14614 int target;
14619 14615 dev_info_t *lundip = NULL;
14620 14616
14621 14617 /*
14622 14618 * Get latest RAID info and search for any Volume DevHandles. If any
14623 14619 * are found, configure the volume.
14624 14620 */
14625 14621 mutex_enter(&mpt->m_mutex);
14626 14622 for (config = 0; config < mpt->m_num_raid_configs; config++) {
14627 14623 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
14628 14624 if (mpt->m_raidconfig[config].m_raidvol[vol].m_israid
14629 14625 == 1) {
14630 14626 target = mpt->m_raidconfig[config].
14631 14627 m_raidvol[vol].m_raidhandle;
14632 14628 mutex_exit(&mpt->m_mutex);
14633 14629 (void) mptsas_config_raid(pdip, target,
14634 14630 &lundip);
14635 14631 mutex_enter(&mpt->m_mutex);
14636 14632 }
14637 14633 }
14638 14634 }
14639 14635 mutex_exit(&mpt->m_mutex);
14640 14636 }
14641 14637
14642 14638 static void
14643 14639 mptsas_offline_missed_luns(dev_info_t *pdip, uint16_t *repluns,
14644 14640 int lun_cnt, mptsas_target_t *ptgt)
14645 14641 {
14646 14642 dev_info_t *child = NULL, *savechild = NULL;
14647 14643 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
14648 14644 uint64_t sas_wwn, wwid;
14649 14645 uint8_t phy;
14650 14646 int lun;
14651 14647 int i;
14652 14648 int find;
14653 14649 char *addr;
14654 14650 char *nodename;
14655 14651 mptsas_t *mpt = DIP2MPT(pdip);
14656 14652
14657 14653 mutex_enter(&mpt->m_mutex);
14658 14654 wwid = ptgt->m_addr.mta_wwn;
14659 14655 mutex_exit(&mpt->m_mutex);
14660 14656
14661 14657 child = ddi_get_child(pdip);
14662 14658 while (child) {
14663 14659 find = 0;
14664 14660 savechild = child;
14665 14661 child = ddi_get_next_sibling(child);
14666 14662
14667 14663 nodename = ddi_node_name(savechild);
14668 14664 if (strcmp(nodename, "smp") == 0) {
14669 14665 continue;
14670 14666 }
14671 14667
14672 14668 addr = ddi_get_name_addr(savechild);
14673 14669 if (addr == NULL) {
14674 14670 continue;
14675 14671 }
14676 14672
14677 14673 if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
14678 14674 DDI_SUCCESS) {
14679 14675 continue;
14680 14676 }
14681 14677
14682 14678 if (wwid == sas_wwn) {
14683 14679 for (i = 0; i < lun_cnt; i++) {
14684 14680 if (repluns[i] == lun) {
14685 14681 find = 1;
↓ open down ↓ |
7781 lines elided |
↑ open up ↑ |
14686 14682 break;
14687 14683 }
14688 14684 }
14689 14685 } else {
14690 14686 continue;
14691 14687 }
14692 14688 if (find == 0) {
14693 14689 /*
14694 14690 * The lun has not been there already
14695 14691 */
14696 - (void) mptsas_offline_lun(pdip, savechild, NULL,
14697 - NDI_DEVI_REMOVE);
14692 + (void) mptsas_offline_lun(savechild, NULL);
14698 14693 }
14699 14694 }
14700 14695
14701 14696 pip = mdi_get_next_client_path(pdip, NULL);
14702 14697 while (pip) {
14703 14698 find = 0;
14704 14699 savepip = pip;
14705 14700 addr = MDI_PI(pip)->pi_addr;
14706 14701
14707 14702 pip = mdi_get_next_client_path(pdip, pip);
14708 14703
14709 14704 if (addr == NULL) {
14710 14705 continue;
14711 14706 }
14712 14707
14713 14708 if (mptsas_parse_address(addr, &sas_wwn, &phy,
14714 14709 &lun) != DDI_SUCCESS) {
14715 14710 continue;
14716 14711 }
14717 14712
14718 14713 if (sas_wwn == wwid) {
14719 14714 for (i = 0; i < lun_cnt; i++) {
14720 14715 if (repluns[i] == lun) {
14721 14716 find = 1;
14722 14717 break;
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
14723 14718 }
14724 14719 }
14725 14720 } else {
14726 14721 continue;
14727 14722 }
14728 14723
14729 14724 if (find == 0) {
14730 14725 /*
14731 14726 * The lun has not been there already
14732 14727 */
14733 - (void) mptsas_offline_lun(pdip, NULL, savepip,
14734 - NDI_DEVI_REMOVE);
14728 + (void) mptsas_offline_lun(NULL, savepip);
14735 14729 }
14736 14730 }
14737 14731 }
14738 14732
14739 14733 /*
14740 14734 * If this enclosure doesn't exist in the enclosure list, add it. If it does,
14741 14735 * update it.
14742 14736 */
14743 14737 static void
14744 14738 mptsas_enclosure_update(mptsas_t *mpt, mptsas_enclosure_t *mep)
14745 14739 {
14746 14740 mptsas_enclosure_t *m;
14747 14741
14748 14742 ASSERT(MUTEX_HELD(&mpt->m_mutex));
14749 14743 m = mptsas_enc_lookup(mpt, mep->me_enchdl);
14750 14744 if (m != NULL) {
14751 14745 uint8_t *ledp;
14752 14746 m->me_flags = mep->me_flags;
14753 14747
14754 14748
14755 14749 /*
14756 14750 * If the number of slots and the first slot entry in the
14757 14751 * enclosure has not changed, then we don't need to do anything
14758 14752 * here. Otherwise, we need to allocate a new array for the LED
14759 14753 * status of the slot.
14760 14754 */
14761 14755 if (m->me_fslot == mep->me_fslot &&
14762 14756 m->me_nslots == mep->me_nslots)
14763 14757 return;
14764 14758
14765 14759 /*
14766 14760 * If the number of slots or the first slot has changed, it's
14767 14761 * not clear that we're really in a place that we can continue
14768 14762 * to honor the existing flags.
14769 14763 */
14770 14764 if (mep->me_nslots > 0) {
14771 14765 ledp = kmem_zalloc(sizeof (uint8_t) * mep->me_nslots,
14772 14766 KM_SLEEP);
14773 14767 } else {
14774 14768 ledp = NULL;
14775 14769 }
14776 14770
14777 14771 if (m->me_slotleds != NULL) {
14778 14772 kmem_free(m->me_slotleds, sizeof (uint8_t) *
14779 14773 m->me_nslots);
14780 14774 }
14781 14775 m->me_slotleds = ledp;
14782 14776 m->me_fslot = mep->me_fslot;
14783 14777 m->me_nslots = mep->me_nslots;
14784 14778 return;
14785 14779 }
14786 14780
14787 14781 m = kmem_zalloc(sizeof (*m), KM_SLEEP);
14788 14782 m->me_enchdl = mep->me_enchdl;
14789 14783 m->me_flags = mep->me_flags;
14790 14784 m->me_nslots = mep->me_nslots;
14791 14785 m->me_fslot = mep->me_fslot;
14792 14786 if (m->me_nslots > 0) {
14793 14787 m->me_slotleds = kmem_zalloc(sizeof (uint8_t) * mep->me_nslots,
14794 14788 KM_SLEEP);
14795 14789 /*
14796 14790 * It may make sense to optionally flush all of the slots and/or
14797 14791 * read the slot status flag here to synchronize between
14798 14792 * ourselves and the card. So far, that hasn't been needed
14799 14793 * annecdotally when enumerating something new. If we do, we
14800 14794 * should kick that off in a taskq potentially.
14801 14795 */
14802 14796 }
14803 14797 list_insert_tail(&mpt->m_enclosures, m);
14804 14798 }
14805 14799
14806 14800 static void
14807 14801 mptsas_update_hashtab(struct mptsas *mpt)
14808 14802 {
14809 14803 uint32_t page_address;
14810 14804 int rval = 0;
14811 14805 uint16_t dev_handle;
14812 14806 mptsas_target_t *ptgt = NULL;
14813 14807 mptsas_smp_t smp_node;
14814 14808
14815 14809 /*
14816 14810 * Get latest RAID info.
14817 14811 */
14818 14812 (void) mptsas_get_raid_info(mpt);
14819 14813
14820 14814 dev_handle = mpt->m_smp_devhdl;
14821 14815 while (mpt->m_done_traverse_smp == 0) {
14822 14816 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
14823 14817 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)dev_handle;
14824 14818 if (mptsas_get_sas_expander_page0(mpt, page_address, &smp_node)
14825 14819 != DDI_SUCCESS) {
14826 14820 break;
14827 14821 }
14828 14822 mpt->m_smp_devhdl = dev_handle = smp_node.m_devhdl;
14829 14823 (void) mptsas_smp_alloc(mpt, &smp_node);
14830 14824 }
14831 14825
14832 14826 /*
14833 14827 * Loop over enclosures so we can understand what's there.
14834 14828 */
14835 14829 dev_handle = MPTSAS_INVALID_DEVHDL;
14836 14830 while (mpt->m_done_traverse_enc == 0) {
14837 14831 mptsas_enclosure_t me;
14838 14832
14839 14833 page_address = (MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE &
14840 14834 MPI2_SAS_ENCLOS_PGAD_FORM_MASK) | (uint32_t)dev_handle;
14841 14835
14842 14836 if (mptsas_get_enclosure_page0(mpt, page_address, &me) !=
14843 14837 DDI_SUCCESS) {
14844 14838 break;
14845 14839 }
14846 14840 dev_handle = me.me_enchdl;
14847 14841 mptsas_enclosure_update(mpt, &me);
14848 14842 }
14849 14843
14850 14844 /*
14851 14845 * Config target devices
14852 14846 */
14853 14847 dev_handle = mpt->m_dev_handle;
14854 14848
14855 14849 /*
14856 14850 * Loop to get sas device page 0 by GetNextHandle till the
14857 14851 * the last handle. If the sas device is a SATA/SSP target,
14858 14852 * we try to config it.
14859 14853 */
14860 14854 while (mpt->m_done_traverse_dev == 0) {
14861 14855 ptgt = NULL;
14862 14856 page_address =
14863 14857 (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
14864 14858 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14865 14859 (uint32_t)dev_handle;
14866 14860 rval = mptsas_get_target_device_info(mpt, page_address,
14867 14861 &dev_handle, &ptgt);
14868 14862 if ((rval == DEV_INFO_FAIL_PAGE0) ||
14869 14863 (rval == DEV_INFO_FAIL_ALLOC) ||
14870 14864 (rval == DEV_INFO_FAIL_GUID)) {
14871 14865 break;
14872 14866 }
14873 14867
14874 14868 mpt->m_dev_handle = dev_handle;
14875 14869 }
14876 14870
14877 14871 }
14878 14872
14879 14873 void
14880 14874 mptsas_update_driver_data(struct mptsas *mpt)
14881 14875 {
14882 14876 mptsas_target_t *tp;
14883 14877 mptsas_smp_t *sp;
14884 14878
14885 14879 ASSERT(MUTEX_HELD(&mpt->m_mutex));
14886 14880
14887 14881 /*
14888 14882 * TODO after hard reset, update the driver data structures
14889 14883 * 1. update port/phymask mapping table mpt->m_phy_info
14890 14884 * 2. invalid all the entries in hash table
14891 14885 * m_devhdl = 0xffff and m_deviceinfo = 0
14892 14886 * 3. call sas_device_page/expander_page to update hash table
14893 14887 */
14894 14888 mptsas_update_phymask(mpt);
14895 14889
14896 14890 /*
14897 14891 * Remove all the devhdls for existing entries but leave their
14898 14892 * addresses alone. In update_hashtab() below, we'll find all
14899 14893 * targets that are still present and reassociate them with
14900 14894 * their potentially new devhdls. Leaving the targets around in
14901 14895 * this fashion allows them to be used on the tx waitq even
14902 14896 * while IOC reset is occurring.
14903 14897 */
14904 14898 for (tp = refhash_first(mpt->m_targets); tp != NULL;
14905 14899 tp = refhash_next(mpt->m_targets, tp)) {
14906 14900 tp->m_devhdl = MPTSAS_INVALID_DEVHDL;
14907 14901 tp->m_deviceinfo = 0;
14908 14902 tp->m_dr_flag = MPTSAS_DR_INACTIVE;
14909 14903 }
14910 14904 for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
14911 14905 sp = refhash_next(mpt->m_smp_targets, sp)) {
14912 14906 sp->m_devhdl = MPTSAS_INVALID_DEVHDL;
14913 14907 sp->m_deviceinfo = 0;
14914 14908 }
14915 14909 mpt->m_done_traverse_dev = 0;
14916 14910 mpt->m_done_traverse_smp = 0;
14917 14911 mpt->m_done_traverse_enc = 0;
14918 14912 mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
14919 14913 mptsas_update_hashtab(mpt);
14920 14914 }
14921 14915
14922 14916 static void
14923 14917 mptsas_config_all(dev_info_t *pdip)
14924 14918 {
14925 14919 dev_info_t *smpdip = NULL;
14926 14920 mptsas_t *mpt = DIP2MPT(pdip);
14927 14921 int phymask = 0;
14928 14922 mptsas_phymask_t phy_mask;
14929 14923 mptsas_target_t *ptgt = NULL;
14930 14924 mptsas_smp_t *psmp;
14931 14925
14932 14926 /*
14933 14927 * Get the phymask associated to the iport
14934 14928 */
14935 14929 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14936 14930 "phymask", 0);
14937 14931
14938 14932 /*
14939 14933 * Enumerate RAID volumes here (phymask == 0).
14940 14934 */
14941 14935 if (phymask == 0) {
14942 14936 mptsas_config_all_viport(pdip);
14943 14937 return;
14944 14938 }
14945 14939
14946 14940 mutex_enter(&mpt->m_mutex);
14947 14941
14948 14942 if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp ||
14949 14943 !mpt->m_done_traverse_enc) {
14950 14944 mptsas_update_hashtab(mpt);
14951 14945 }
14952 14946
14953 14947 for (psmp = refhash_first(mpt->m_smp_targets); psmp != NULL;
14954 14948 psmp = refhash_next(mpt->m_smp_targets, psmp)) {
14955 14949 phy_mask = psmp->m_addr.mta_phymask;
14956 14950 if (phy_mask == phymask) {
14957 14951 smpdip = NULL;
14958 14952 mutex_exit(&mpt->m_mutex);
14959 14953 (void) mptsas_online_smp(pdip, psmp, &smpdip);
14960 14954 mutex_enter(&mpt->m_mutex);
14961 14955 }
14962 14956 }
14963 14957
14964 14958 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
14965 14959 ptgt = refhash_next(mpt->m_targets, ptgt)) {
14966 14960 phy_mask = ptgt->m_addr.mta_phymask;
14967 14961 if (phy_mask == phymask) {
14968 14962 mutex_exit(&mpt->m_mutex);
14969 14963 (void) mptsas_config_target(pdip, ptgt);
14970 14964 mutex_enter(&mpt->m_mutex);
14971 14965 }
14972 14966 }
14973 14967 mutex_exit(&mpt->m_mutex);
14974 14968 }
14975 14969
14976 14970 static int
14977 14971 mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt)
14978 14972 {
14979 14973 int rval = DDI_FAILURE;
14980 14974 dev_info_t *tdip;
14981 14975
14982 14976 rval = mptsas_config_luns(pdip, ptgt);
14983 14977 if (rval != DDI_SUCCESS) {
14984 14978 /*
14985 14979 * The return value means the SCMD_REPORT_LUNS
14986 14980 * did not execute successfully. The target maybe
14987 14981 * doesn't support such command.
14988 14982 */
14989 14983 rval = mptsas_probe_lun(pdip, 0, &tdip, ptgt);
14990 14984 }
14991 14985 return (rval);
14992 14986 }
14993 14987
14994 14988 /*
14995 14989 * Return fail if not all the childs/paths are freed.
14996 14990 * if there is any path under the HBA, the return value will be always fail
14997 14991 * because we didn't call mdi_pi_free for path
14998 14992 */
14999 14993 static int
15000 14994 mptsas_offline_target(dev_info_t *pdip, char *name)
15001 14995 {
15002 14996 dev_info_t *child = NULL, *prechild = NULL;
15003 14997 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
15004 14998 int tmp_rval, rval = DDI_SUCCESS;
15005 14999 char *addr, *cp;
15006 15000 size_t s;
15007 15001 mptsas_t *mpt = DIP2MPT(pdip);
15008 15002
15009 15003 child = ddi_get_child(pdip);
15010 15004 while (child) {
15011 15005 addr = ddi_get_name_addr(child);
15012 15006 prechild = child;
15013 15007 child = ddi_get_next_sibling(child);
15014 15008
15015 15009 if (addr == NULL) {
15016 15010 continue;
15017 15011 }
↓ open down ↓ |
273 lines elided |
↑ open up ↑ |
15018 15012 if ((cp = strchr(addr, ',')) == NULL) {
15019 15013 continue;
15020 15014 }
15021 15015
15022 15016 s = (uintptr_t)cp - (uintptr_t)addr;
15023 15017
15024 15018 if (strncmp(addr, name, s) != 0) {
15025 15019 continue;
15026 15020 }
15027 15021
15028 - tmp_rval = mptsas_offline_lun(pdip, prechild, NULL,
15029 - NDI_DEVI_REMOVE);
15022 + tmp_rval = mptsas_offline_lun(prechild, NULL);
15030 15023 if (tmp_rval != DDI_SUCCESS) {
15031 15024 rval = DDI_FAILURE;
15032 15025 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15033 15026 prechild, MPTSAS_DEV_GONE) !=
15034 15027 DDI_PROP_SUCCESS) {
15035 15028 mptsas_log(mpt, CE_WARN, "mptsas driver "
15036 15029 "unable to create property for "
15037 15030 "SAS %s (MPTSAS_DEV_GONE)", addr);
15038 15031 }
15039 15032 }
15040 15033 }
15041 15034
15042 15035 pip = mdi_get_next_client_path(pdip, NULL);
15043 15036 while (pip) {
15044 15037 addr = MDI_PI(pip)->pi_addr;
15045 15038 savepip = pip;
15046 15039 pip = mdi_get_next_client_path(pdip, pip);
15047 15040 if (addr == NULL) {
15048 15041 continue;
15049 15042 }
15050 15043
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
15051 15044 if ((cp = strchr(addr, ',')) == NULL) {
15052 15045 continue;
15053 15046 }
15054 15047
15055 15048 s = (uintptr_t)cp - (uintptr_t)addr;
15056 15049
15057 15050 if (strncmp(addr, name, s) != 0) {
15058 15051 continue;
15059 15052 }
15060 15053
15061 - (void) mptsas_offline_lun(pdip, NULL, savepip,
15062 - NDI_DEVI_REMOVE);
15054 + (void) mptsas_offline_lun(NULL, savepip);
15063 15055 /*
15064 15056 * driver will not invoke mdi_pi_free, so path will not
15065 15057 * be freed forever, return DDI_FAILURE.
15066 15058 */
15067 15059 rval = DDI_FAILURE;
15068 15060 }
15069 15061 return (rval);
15070 15062 }
15071 15063
15072 15064 static int
15073 -mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
15074 - mdi_pathinfo_t *rpip, uint_t flags)
15065 +mptsas_offline_lun(dev_info_t *rdip, mdi_pathinfo_t *rpip)
15075 15066 {
15076 15067 int rval = DDI_FAILURE;
15077 - char *devname;
15078 - dev_info_t *cdip, *parent;
15079 15068
15080 15069 if (rpip != NULL) {
15081 - parent = scsi_vhci_dip;
15082 - cdip = mdi_pi_get_client(rpip);
15083 - } else if (rdip != NULL) {
15084 - parent = pdip;
15085 - cdip = rdip;
15086 - } else {
15087 - return (DDI_FAILURE);
15088 - }
15089 -
15090 - /*
15091 - * Make sure node is attached otherwise
15092 - * it won't have related cache nodes to
15093 - * clean up. i_ddi_devi_attached is
15094 - * similiar to i_ddi_node_state(cdip) >=
15095 - * DS_ATTACHED.
15096 - */
15097 - if (i_ddi_devi_attached(cdip)) {
15098 -
15099 - /* Get full devname */
15100 - devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
15101 - (void) ddi_deviname(cdip, devname);
15102 - /* Clean cache */
15103 - (void) devfs_clean(parent, devname + 1,
15104 - DV_CLEAN_FORCE);
15105 - kmem_free(devname, MAXNAMELEN + 1);
15106 - }
15107 - if (rpip != NULL) {
15108 15070 if (MDI_PI_IS_OFFLINE(rpip)) {
15109 15071 rval = DDI_SUCCESS;
15110 15072 } else {
15111 15073 rval = mdi_pi_offline(rpip, 0);
15112 15074 }
15113 - } else {
15114 - rval = ndi_devi_offline(cdip, flags);
15075 + } else if (rdip != NULL) {
15076 + rval = ndi_devi_offline(rdip,
15077 + NDI_DEVFS_CLEAN | NDI_DEVI_REMOVE);
15115 15078 }
15116 15079
15117 15080 return (rval);
15118 15081 }
15119 15082
15120 15083 static dev_info_t *
15121 15084 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
15122 15085 {
15123 15086 dev_info_t *child = NULL;
15124 15087 char *smp_wwn = NULL;
15125 15088
15126 15089 child = ddi_get_child(parent);
15127 15090 while (child) {
15128 15091 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
15129 15092 DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
15130 15093 != DDI_SUCCESS) {
15131 15094 child = ddi_get_next_sibling(child);
15132 15095 continue;
15133 15096 }
15134 15097
15135 15098 if (strcmp(smp_wwn, str_wwn) == 0) {
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
15136 15099 ddi_prop_free(smp_wwn);
15137 15100 break;
15138 15101 }
15139 15102 child = ddi_get_next_sibling(child);
15140 15103 ddi_prop_free(smp_wwn);
15141 15104 }
15142 15105 return (child);
15143 15106 }
15144 15107
15145 15108 static int
15146 -mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, uint_t flags)
15109 +mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node)
15147 15110 {
15148 15111 int rval = DDI_FAILURE;
15149 - char *devname;
15150 15112 char wwn_str[MPTSAS_WWN_STRLEN];
15151 15113 dev_info_t *cdip;
15152 15114
15153 15115 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
15154 15116
15155 15117 cdip = mptsas_find_smp_child(pdip, wwn_str);
15156 -
15157 15118 if (cdip == NULL)
15158 15119 return (DDI_SUCCESS);
15159 15120
15160 - /*
15161 - * Make sure node is attached otherwise
15162 - * it won't have related cache nodes to
15163 - * clean up. i_ddi_devi_attached is
15164 - * similiar to i_ddi_node_state(cdip) >=
15165 - * DS_ATTACHED.
15166 - */
15167 - if (i_ddi_devi_attached(cdip)) {
15121 + rval = ndi_devi_offline(cdip, NDI_DEVFS_CLEAN | NDI_DEVI_REMOVE);
15168 15122
15169 - /* Get full devname */
15170 - devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
15171 - (void) ddi_deviname(cdip, devname);
15172 - /* Clean cache */
15173 - (void) devfs_clean(pdip, devname + 1,
15174 - DV_CLEAN_FORCE);
15175 - kmem_free(devname, MAXNAMELEN + 1);
15176 - }
15177 -
15178 - rval = ndi_devi_offline(cdip, flags);
15179 -
15180 15123 return (rval);
15181 15124 }
15182 15125
15183 15126 static dev_info_t *
15184 15127 mptsas_find_child(dev_info_t *pdip, char *name)
15185 15128 {
15186 15129 dev_info_t *child = NULL;
15187 15130 char *rname = NULL;
15188 15131 int rval = DDI_FAILURE;
15189 15132
15190 15133 rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15191 15134
15192 15135 child = ddi_get_child(pdip);
15193 15136 while (child) {
15194 15137 rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
15195 15138 if (rval != DDI_SUCCESS) {
15196 15139 child = ddi_get_next_sibling(child);
15197 15140 bzero(rname, SCSI_MAXNAMELEN);
15198 15141 continue;
15199 15142 }
15200 15143
15201 15144 if (strcmp(rname, name) == 0) {
15202 15145 break;
15203 15146 }
15204 15147 child = ddi_get_next_sibling(child);
15205 15148 bzero(rname, SCSI_MAXNAMELEN);
15206 15149 }
15207 15150
15208 15151 kmem_free(rname, SCSI_MAXNAMELEN);
15209 15152
15210 15153 return (child);
15211 15154 }
15212 15155
15213 15156
15214 15157 static dev_info_t *
15215 15158 mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, int lun)
15216 15159 {
15217 15160 dev_info_t *child = NULL;
15218 15161 char *name = NULL;
15219 15162 char *addr = NULL;
15220 15163
15221 15164 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15222 15165 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15223 15166 (void) sprintf(name, "%016"PRIx64, sasaddr);
15224 15167 (void) sprintf(addr, "w%s,%x", name, lun);
15225 15168 child = mptsas_find_child(pdip, addr);
15226 15169 kmem_free(name, SCSI_MAXNAMELEN);
15227 15170 kmem_free(addr, SCSI_MAXNAMELEN);
15228 15171 return (child);
15229 15172 }
15230 15173
15231 15174 static dev_info_t *
15232 15175 mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy)
15233 15176 {
15234 15177 dev_info_t *child;
15235 15178 char *addr;
15236 15179
15237 15180 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15238 15181 (void) sprintf(addr, "p%x,0", phy);
15239 15182 child = mptsas_find_child(pdip, addr);
15240 15183 kmem_free(addr, SCSI_MAXNAMELEN);
15241 15184 return (child);
15242 15185 }
15243 15186
15244 15187 static mdi_pathinfo_t *
15245 15188 mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy)
15246 15189 {
15247 15190 mdi_pathinfo_t *path;
15248 15191 char *addr = NULL;
15249 15192
15250 15193 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15251 15194 (void) sprintf(addr, "p%x,0", phy);
15252 15195 path = mdi_pi_find(pdip, NULL, addr);
15253 15196 kmem_free(addr, SCSI_MAXNAMELEN);
15254 15197 return (path);
15255 15198 }
15256 15199
15257 15200 static mdi_pathinfo_t *
15258 15201 mptsas_find_path_addr(dev_info_t *parent, uint64_t sasaddr, int lun)
15259 15202 {
15260 15203 mdi_pathinfo_t *path;
15261 15204 char *name = NULL;
15262 15205 char *addr = NULL;
15263 15206
15264 15207 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15265 15208 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15266 15209 (void) sprintf(name, "%016"PRIx64, sasaddr);
15267 15210 (void) sprintf(addr, "w%s,%x", name, lun);
15268 15211 path = mdi_pi_find(parent, NULL, addr);
15269 15212 kmem_free(name, SCSI_MAXNAMELEN);
15270 15213 kmem_free(addr, SCSI_MAXNAMELEN);
15271 15214
15272 15215 return (path);
15273 15216 }
15274 15217
15275 15218 static int
15276 15219 mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
15277 15220 dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
15278 15221 {
15279 15222 int i = 0;
15280 15223 uchar_t *inq83 = NULL;
15281 15224 int inq83_len1 = 0xFF;
15282 15225 int inq83_len = 0;
15283 15226 int rval = DDI_FAILURE;
15284 15227 ddi_devid_t devid;
15285 15228 char *guid = NULL;
15286 15229 int target = ptgt->m_devhdl;
15287 15230 mdi_pathinfo_t *pip = NULL;
15288 15231 mptsas_t *mpt = DIP2MPT(pdip);
15289 15232
15290 15233 /*
15291 15234 * For DVD/CD ROM and tape devices and optical
15292 15235 * devices, we won't try to enumerate them under
15293 15236 * scsi_vhci, so no need to try page83
15294 15237 */
15295 15238 if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
15296 15239 sd_inq->inq_dtype == DTYPE_OPTICAL ||
15297 15240 sd_inq->inq_dtype == DTYPE_ESI))
15298 15241 goto create_lun;
15299 15242
15300 15243 /*
15301 15244 * The LCA returns good SCSI status, but corrupt page 83 data the first
15302 15245 * time it is queried. The solution is to keep trying to request page83
15303 15246 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
15304 15247 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
15305 15248 * give up to get VPD page at this stage and fail the enumeration.
15306 15249 */
15307 15250
15308 15251 inq83 = kmem_zalloc(inq83_len1, KM_SLEEP);
15309 15252
15310 15253 for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
15311 15254 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
15312 15255 inq83_len1, &inq83_len, 1);
15313 15256 if (rval != 0) {
15314 15257 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
15315 15258 "0x83 for target:%x, lun:%x failed!", target, lun);
15316 15259 if (mptsas_physical_bind_failed_page_83 != B_FALSE)
15317 15260 goto create_lun;
15318 15261 goto out;
15319 15262 }
15320 15263 /*
15321 15264 * create DEVID from inquiry data
15322 15265 */
15323 15266 if ((rval = ddi_devid_scsi_encode(
15324 15267 DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
15325 15268 sizeof (struct scsi_inquiry), NULL, 0, inq83,
15326 15269 (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
15327 15270 /*
15328 15271 * extract GUID from DEVID
15329 15272 */
15330 15273 guid = ddi_devid_to_guid(devid);
15331 15274
15332 15275 /*
15333 15276 * Do not enable MPXIO if the strlen(guid) is greater
15334 15277 * than MPTSAS_MAX_GUID_LEN, this constrain would be
15335 15278 * handled by framework later.
15336 15279 */
15337 15280 if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
15338 15281 ddi_devid_free_guid(guid);
15339 15282 guid = NULL;
15340 15283 if (mpt->m_mpxio_enable == TRUE) {
15341 15284 mptsas_log(mpt, CE_NOTE, "!Target:%x, "
15342 15285 "lun:%x doesn't have a valid GUID, "
15343 15286 "multipathing for this drive is "
15344 15287 "not enabled", target, lun);
15345 15288 }
15346 15289 }
15347 15290
15348 15291 /*
15349 15292 * devid no longer needed
15350 15293 */
15351 15294 ddi_devid_free(devid);
15352 15295 break;
15353 15296 } else if (rval == DDI_NOT_WELL_FORMED) {
15354 15297 /*
15355 15298 * return value of ddi_devid_scsi_encode equal to
15356 15299 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
15357 15300 * to retry inquiry page 0x83 and get GUID.
15358 15301 */
15359 15302 NDBG20(("Not well formed devid, retry..."));
15360 15303 delay(1 * drv_usectohz(1000000));
15361 15304 continue;
15362 15305 } else {
15363 15306 mptsas_log(mpt, CE_WARN, "!Encode devid failed for "
15364 15307 "path target:%x, lun:%x", target, lun);
15365 15308 rval = DDI_FAILURE;
15366 15309 goto create_lun;
15367 15310 }
15368 15311 }
15369 15312
15370 15313 if (i == mptsas_inq83_retry_timeout) {
15371 15314 mptsas_log(mpt, CE_WARN, "!Repeated page83 requests timeout "
15372 15315 "for path target:%x, lun:%x", target, lun);
15373 15316 }
15374 15317
15375 15318 rval = DDI_FAILURE;
15376 15319
15377 15320 create_lun:
15378 15321 if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
15379 15322 rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
15380 15323 ptgt, lun);
15381 15324 }
15382 15325 if (rval != DDI_SUCCESS) {
15383 15326 rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
15384 15327 ptgt, lun);
15385 15328
15386 15329 }
15387 15330 out:
15388 15331 if (guid != NULL) {
15389 15332 /*
15390 15333 * guid no longer needed
15391 15334 */
15392 15335 ddi_devid_free_guid(guid);
15393 15336 }
15394 15337 if (inq83 != NULL)
15395 15338 kmem_free(inq83, inq83_len1);
15396 15339 return (rval);
15397 15340 }
15398 15341
15399 15342 static int
15400 15343 mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid,
15401 15344 dev_info_t **lun_dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, int lun)
15402 15345 {
15403 15346 int target;
15404 15347 char *nodename = NULL;
15405 15348 char **compatible = NULL;
15406 15349 int ncompatible = 0;
15407 15350 int mdi_rtn = MDI_FAILURE;
15408 15351 int rval = DDI_FAILURE;
15409 15352 char *old_guid = NULL;
15410 15353 mptsas_t *mpt = DIP2MPT(pdip);
15411 15354 char *lun_addr = NULL;
15412 15355 char *wwn_str = NULL;
15413 15356 char *attached_wwn_str = NULL;
15414 15357 char *component = NULL;
15415 15358 uint8_t phy = 0xFF;
15416 15359 uint64_t sas_wwn;
15417 15360 int64_t lun64 = 0;
15418 15361 uint32_t devinfo;
15419 15362 uint16_t dev_hdl;
15420 15363 uint16_t pdev_hdl;
15421 15364 uint64_t dev_sas_wwn;
15422 15365 uint64_t pdev_sas_wwn;
15423 15366 uint32_t pdev_info;
15424 15367 uint8_t physport;
15425 15368 uint8_t phy_id;
15426 15369 uint32_t page_address;
15427 15370 uint16_t bay_num, enclosure, io_flags;
15428 15371 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
15429 15372 uint32_t dev_info;
15430 15373
15431 15374 mutex_enter(&mpt->m_mutex);
15432 15375 target = ptgt->m_devhdl;
15433 15376 sas_wwn = ptgt->m_addr.mta_wwn;
15434 15377 devinfo = ptgt->m_deviceinfo;
15435 15378 phy = ptgt->m_phynum;
15436 15379 mutex_exit(&mpt->m_mutex);
15437 15380
15438 15381 if (sas_wwn) {
15439 15382 *pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
15440 15383 } else {
15441 15384 *pip = mptsas_find_path_phy(pdip, phy);
15442 15385 }
15443 15386
15444 15387 if (*pip != NULL) {
15445 15388 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
15446 15389 ASSERT(*lun_dip != NULL);
15447 15390 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
15448 15391 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
15449 15392 MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
15450 15393 if (strncmp(guid, old_guid, strlen(guid)) == 0) {
15451 15394 /*
15452 15395 * Same path back online again.
15453 15396 */
15454 15397 (void) ddi_prop_free(old_guid);
15455 15398 if ((!MDI_PI_IS_ONLINE(*pip)) &&
15456 15399 (!MDI_PI_IS_STANDBY(*pip)) &&
15457 15400 (ptgt->m_tgt_unconfigured == 0)) {
15458 15401 rval = mdi_pi_online(*pip, 0);
15459 15402 } else {
15460 15403 rval = DDI_SUCCESS;
15461 15404 }
15462 15405 if (rval != DDI_SUCCESS) {
15463 15406 mptsas_log(mpt, CE_WARN, "path:target: "
15464 15407 "%x, lun:%x online failed!", target,
15465 15408 lun);
15466 15409 *pip = NULL;
15467 15410 *lun_dip = NULL;
15468 15411 }
15469 15412 return (rval);
15470 15413 } else {
15471 15414 /*
15472 15415 * The GUID of the LUN has changed which maybe
15473 15416 * because customer mapped another volume to the
15474 15417 * same LUN.
15475 15418 */
15476 15419 mptsas_log(mpt, CE_WARN, "The GUID of the "
15477 15420 "target:%x, lun:%x was changed, maybe "
15478 15421 "because someone mapped another volume "
15479 15422 "to the same LUN", target, lun);
15480 15423 (void) ddi_prop_free(old_guid);
15481 15424 if (!MDI_PI_IS_OFFLINE(*pip)) {
15482 15425 rval = mdi_pi_offline(*pip, 0);
15483 15426 if (rval != MDI_SUCCESS) {
15484 15427 mptsas_log(mpt, CE_WARN, "path:"
15485 15428 "target:%x, lun:%x offline "
15486 15429 "failed!", target, lun);
15487 15430 *pip = NULL;
15488 15431 *lun_dip = NULL;
15489 15432 return (DDI_FAILURE);
15490 15433 }
15491 15434 }
15492 15435 if (mdi_pi_free(*pip, 0) != MDI_SUCCESS) {
15493 15436 mptsas_log(mpt, CE_WARN, "path:target:"
15494 15437 "%x, lun:%x free failed!", target,
15495 15438 lun);
15496 15439 *pip = NULL;
15497 15440 *lun_dip = NULL;
15498 15441 return (DDI_FAILURE);
15499 15442 }
15500 15443 }
15501 15444 } else {
15502 15445 mptsas_log(mpt, CE_WARN, "Can't get client-guid "
15503 15446 "property for path:target:%x, lun:%x", target, lun);
15504 15447 *pip = NULL;
15505 15448 *lun_dip = NULL;
15506 15449 return (DDI_FAILURE);
15507 15450 }
15508 15451 }
15509 15452 scsi_hba_nodename_compatible_get(inq, NULL,
15510 15453 inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
15511 15454
15512 15455 /*
15513 15456 * if nodename can't be determined then print a message and skip it
15514 15457 */
15515 15458 if (nodename == NULL) {
15516 15459 mptsas_log(mpt, CE_WARN, "mptsas driver found no compatible "
15517 15460 "driver for target%d lun %d dtype:0x%02x", target, lun,
15518 15461 inq->inq_dtype);
15519 15462 return (DDI_FAILURE);
15520 15463 }
15521 15464
15522 15465 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
15523 15466 /* The property is needed by MPAPI */
15524 15467 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
15525 15468
15526 15469 lun_addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15527 15470 if (guid) {
15528 15471 (void) sprintf(lun_addr, "w%s,%x", wwn_str, lun);
15529 15472 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15530 15473 } else {
15531 15474 (void) sprintf(lun_addr, "p%x,%x", phy, lun);
15532 15475 (void) sprintf(wwn_str, "p%x", phy);
15533 15476 }
15534 15477
15535 15478 mdi_rtn = mdi_pi_alloc_compatible(pdip, nodename,
15536 15479 guid, lun_addr, compatible, ncompatible,
15537 15480 0, pip);
15538 15481 if (mdi_rtn == MDI_SUCCESS) {
15539 15482
15540 15483 if (mdi_prop_update_string(*pip, MDI_GUID,
15541 15484 guid) != DDI_SUCCESS) {
15542 15485 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15543 15486 "create prop for target %d lun %d (MDI_GUID)",
15544 15487 target, lun);
15545 15488 mdi_rtn = MDI_FAILURE;
15546 15489 goto virt_create_done;
15547 15490 }
15548 15491
15549 15492 if (mdi_prop_update_int(*pip, LUN_PROP,
15550 15493 lun) != DDI_SUCCESS) {
15551 15494 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15552 15495 "create prop for target %d lun %d (LUN_PROP)",
15553 15496 target, lun);
15554 15497 mdi_rtn = MDI_FAILURE;
15555 15498 goto virt_create_done;
15556 15499 }
15557 15500 lun64 = (int64_t)lun;
15558 15501 if (mdi_prop_update_int64(*pip, LUN64_PROP,
15559 15502 lun64) != DDI_SUCCESS) {
15560 15503 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15561 15504 "create prop for target %d (LUN64_PROP)",
15562 15505 target);
15563 15506 mdi_rtn = MDI_FAILURE;
15564 15507 goto virt_create_done;
15565 15508 }
15566 15509 if (mdi_prop_update_string_array(*pip, "compatible",
15567 15510 compatible, ncompatible) !=
15568 15511 DDI_PROP_SUCCESS) {
15569 15512 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15570 15513 "create prop for target %d lun %d (COMPATIBLE)",
15571 15514 target, lun);
15572 15515 mdi_rtn = MDI_FAILURE;
15573 15516 goto virt_create_done;
15574 15517 }
15575 15518 if (sas_wwn && (mdi_prop_update_string(*pip,
15576 15519 SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS)) {
15577 15520 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15578 15521 "create prop for target %d lun %d "
15579 15522 "(target-port)", target, lun);
15580 15523 mdi_rtn = MDI_FAILURE;
15581 15524 goto virt_create_done;
15582 15525 } else if ((sas_wwn == 0) && (mdi_prop_update_int(*pip,
15583 15526 "sata-phy", phy) != DDI_PROP_SUCCESS)) {
15584 15527 /*
15585 15528 * Direct attached SATA device without DeviceName
15586 15529 */
15587 15530 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15588 15531 "create prop for SAS target %d lun %d "
15589 15532 "(sata-phy)", target, lun);
15590 15533 mdi_rtn = MDI_FAILURE;
15591 15534 goto virt_create_done;
15592 15535 }
15593 15536 mutex_enter(&mpt->m_mutex);
15594 15537
15595 15538 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15596 15539 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15597 15540 (uint32_t)ptgt->m_devhdl;
15598 15541 rval = mptsas_get_sas_device_page0(mpt, page_address,
15599 15542 &dev_hdl, &dev_sas_wwn, &dev_info, &physport,
15600 15543 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15601 15544 if (rval != DDI_SUCCESS) {
15602 15545 mutex_exit(&mpt->m_mutex);
15603 15546 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15604 15547 "parent device for handle %d", page_address);
15605 15548 mdi_rtn = MDI_FAILURE;
15606 15549 goto virt_create_done;
15607 15550 }
15608 15551
15609 15552 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15610 15553 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
15611 15554 rval = mptsas_get_sas_device_page0(mpt, page_address,
15612 15555 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
15613 15556 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15614 15557 if (rval != DDI_SUCCESS) {
15615 15558 mutex_exit(&mpt->m_mutex);
15616 15559 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
15617 15560 "device info for handle %d", page_address);
15618 15561 mdi_rtn = MDI_FAILURE;
15619 15562 goto virt_create_done;
15620 15563 }
15621 15564
15622 15565 mutex_exit(&mpt->m_mutex);
15623 15566
15624 15567 /*
15625 15568 * If this device direct attached to the controller
15626 15569 * set the attached-port to the base wwid
15627 15570 */
15628 15571 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15629 15572 != DEVINFO_DIRECT_ATTACHED) {
15630 15573 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15631 15574 pdev_sas_wwn);
15632 15575 } else {
15633 15576 /*
15634 15577 * Update the iport's attached-port to guid
15635 15578 */
15636 15579 if (sas_wwn == 0) {
15637 15580 (void) sprintf(wwn_str, "p%x", phy);
15638 15581 } else {
15639 15582 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15640 15583 }
15641 15584 if (ddi_prop_update_string(DDI_DEV_T_NONE,
15642 15585 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15643 15586 DDI_PROP_SUCCESS) {
15644 15587 mptsas_log(mpt, CE_WARN,
15645 15588 "mptsas unable to create "
15646 15589 "property for iport target-port"
15647 15590 " %s (sas_wwn)",
15648 15591 wwn_str);
15649 15592 mdi_rtn = MDI_FAILURE;
15650 15593 goto virt_create_done;
15651 15594 }
15652 15595
15653 15596 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15654 15597 mpt->un.m_base_wwid);
15655 15598 }
15656 15599
15657 15600 if (IS_SATA_DEVICE(ptgt->m_deviceinfo)) {
15658 15601 char uabuf[SCSI_WWN_BUFLEN];
15659 15602
15660 15603 if (scsi_wwn_to_wwnstr(dev_sas_wwn, 1, uabuf) == NULL) {
15661 15604 mptsas_log(mpt, CE_WARN,
15662 15605 "mptsas unable to format SATA bridge WWN");
15663 15606 mdi_rtn = MDI_FAILURE;
15664 15607 goto virt_create_done;
15665 15608 }
15666 15609
15667 15610 if (mdi_prop_update_string(*pip,
15668 15611 SCSI_ADDR_PROP_BRIDGE_PORT, uabuf) !=
15669 15612 DDI_SUCCESS) {
15670 15613 mptsas_log(mpt, CE_WARN,
15671 15614 "mptsas unable to create SCSI bridge port "
15672 15615 "property for SATA device");
15673 15616 mdi_rtn = MDI_FAILURE;
15674 15617 goto virt_create_done;
15675 15618 }
15676 15619 }
15677 15620
15678 15621 if (mdi_prop_update_string(*pip,
15679 15622 SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
15680 15623 DDI_PROP_SUCCESS) {
15681 15624 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15682 15625 "property for iport attached-port %s (sas_wwn)",
15683 15626 attached_wwn_str);
15684 15627 mdi_rtn = MDI_FAILURE;
15685 15628 goto virt_create_done;
15686 15629 }
15687 15630
15688 15631
15689 15632 if (inq->inq_dtype == 0) {
15690 15633 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
15691 15634 /*
15692 15635 * set obp path for pathinfo
15693 15636 */
15694 15637 (void) snprintf(component, MAXPATHLEN,
15695 15638 "disk@%s", lun_addr);
15696 15639
15697 15640 if (mdi_pi_pathname_obp_set(*pip, component) !=
15698 15641 DDI_SUCCESS) {
15699 15642 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
15700 15643 "unable to set obp-path for object %s",
15701 15644 component);
15702 15645 mdi_rtn = MDI_FAILURE;
15703 15646 goto virt_create_done;
15704 15647 }
15705 15648 }
15706 15649
15707 15650 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
15708 15651 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
15709 15652 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
15710 15653 if ((ndi_prop_update_int(DDI_DEV_T_NONE, *lun_dip,
15711 15654 "pm-capable", 1)) !=
15712 15655 DDI_PROP_SUCCESS) {
15713 15656 mptsas_log(mpt, CE_WARN, "mptsas driver"
15714 15657 "failed to create pm-capable "
15715 15658 "property, target %d", target);
15716 15659 mdi_rtn = MDI_FAILURE;
15717 15660 goto virt_create_done;
15718 15661 }
15719 15662 }
15720 15663 /*
15721 15664 * Create the phy-num property
15722 15665 */
15723 15666 if (mdi_prop_update_int(*pip, "phy-num",
15724 15667 ptgt->m_phynum) != DDI_SUCCESS) {
15725 15668 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15726 15669 "create phy-num property for target %d lun %d",
15727 15670 target, lun);
15728 15671 mdi_rtn = MDI_FAILURE;
15729 15672 goto virt_create_done;
15730 15673 }
15731 15674 NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
15732 15675 mdi_rtn = mdi_pi_online(*pip, 0);
15733 15676 if (mdi_rtn == MDI_NOT_SUPPORTED) {
15734 15677 mdi_rtn = MDI_FAILURE;
15735 15678 }
15736 15679 virt_create_done:
15737 15680 if (*pip && mdi_rtn != MDI_SUCCESS) {
15738 15681 (void) mdi_pi_free(*pip, 0);
15739 15682 *pip = NULL;
15740 15683 *lun_dip = NULL;
15741 15684 }
15742 15685 }
15743 15686
15744 15687 scsi_hba_nodename_compatible_free(nodename, compatible);
15745 15688 if (lun_addr != NULL) {
15746 15689 kmem_free(lun_addr, SCSI_MAXNAMELEN);
15747 15690 }
15748 15691 if (wwn_str != NULL) {
15749 15692 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
15750 15693 }
15751 15694 if (component != NULL) {
15752 15695 kmem_free(component, MAXPATHLEN);
15753 15696 }
15754 15697
15755 15698 return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15756 15699 }
15757 15700
15758 15701 static int
15759 15702 mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq,
15760 15703 char *guid, dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
15761 15704 {
15762 15705 int target;
15763 15706 int rval;
15764 15707 int ndi_rtn = NDI_FAILURE;
15765 15708 uint64_t be_sas_wwn;
15766 15709 char *nodename = NULL;
15767 15710 char **compatible = NULL;
15768 15711 int ncompatible = 0;
15769 15712 int instance = 0;
15770 15713 mptsas_t *mpt = DIP2MPT(pdip);
15771 15714 char *wwn_str = NULL;
15772 15715 char *component = NULL;
15773 15716 char *attached_wwn_str = NULL;
15774 15717 uint8_t phy = 0xFF;
15775 15718 uint64_t sas_wwn;
15776 15719 uint32_t devinfo;
15777 15720 uint16_t dev_hdl;
15778 15721 uint16_t pdev_hdl;
15779 15722 uint64_t pdev_sas_wwn;
15780 15723 uint64_t dev_sas_wwn;
15781 15724 uint32_t pdev_info;
15782 15725 uint8_t physport;
15783 15726 uint8_t phy_id;
15784 15727 uint32_t page_address;
15785 15728 uint16_t bay_num, enclosure, io_flags;
15786 15729 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
15787 15730 uint32_t dev_info;
15788 15731 int64_t lun64 = 0;
15789 15732
15790 15733 mutex_enter(&mpt->m_mutex);
15791 15734 target = ptgt->m_devhdl;
15792 15735 sas_wwn = ptgt->m_addr.mta_wwn;
15793 15736 devinfo = ptgt->m_deviceinfo;
15794 15737 phy = ptgt->m_phynum;
15795 15738 mutex_exit(&mpt->m_mutex);
15796 15739
15797 15740 /*
15798 15741 * generate compatible property with binding-set "mpt"
15799 15742 */
15800 15743 scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL,
15801 15744 &nodename, &compatible, &ncompatible);
15802 15745
15803 15746 /*
15804 15747 * if nodename can't be determined then print a message and skip it
15805 15748 */
15806 15749 if (nodename == NULL) {
15807 15750 mptsas_log(mpt, CE_WARN, "mptsas found no compatible driver "
15808 15751 "for target %d lun %d", target, lun);
15809 15752 return (DDI_FAILURE);
15810 15753 }
15811 15754
15812 15755 ndi_rtn = ndi_devi_alloc(pdip, nodename,
15813 15756 DEVI_SID_NODEID, lun_dip);
15814 15757
15815 15758 /*
15816 15759 * if lun alloc success, set props
15817 15760 */
15818 15761 if (ndi_rtn == NDI_SUCCESS) {
15819 15762
15820 15763 if (ndi_prop_update_int(DDI_DEV_T_NONE,
15821 15764 *lun_dip, LUN_PROP, lun) !=
15822 15765 DDI_PROP_SUCCESS) {
15823 15766 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15824 15767 "property for target %d lun %d (LUN_PROP)",
15825 15768 target, lun);
15826 15769 ndi_rtn = NDI_FAILURE;
15827 15770 goto phys_create_done;
15828 15771 }
15829 15772
15830 15773 lun64 = (int64_t)lun;
15831 15774 if (ndi_prop_update_int64(DDI_DEV_T_NONE,
15832 15775 *lun_dip, LUN64_PROP, lun64) !=
15833 15776 DDI_PROP_SUCCESS) {
15834 15777 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15835 15778 "property for target %d lun64 %d (LUN64_PROP)",
15836 15779 target, lun);
15837 15780 ndi_rtn = NDI_FAILURE;
15838 15781 goto phys_create_done;
15839 15782 }
15840 15783 if (ndi_prop_update_string_array(DDI_DEV_T_NONE,
15841 15784 *lun_dip, "compatible", compatible, ncompatible)
15842 15785 != DDI_PROP_SUCCESS) {
15843 15786 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15844 15787 "property for target %d lun %d (COMPATIBLE)",
15845 15788 target, lun);
15846 15789 ndi_rtn = NDI_FAILURE;
15847 15790 goto phys_create_done;
15848 15791 }
15849 15792
15850 15793 /*
15851 15794 * We need the SAS WWN for non-multipath devices, so
15852 15795 * we'll use the same property as that multipathing
15853 15796 * devices need to present for MPAPI. If we don't have
15854 15797 * a WWN (e.g. parallel SCSI), don't create the prop.
15855 15798 */
15856 15799 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
15857 15800 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15858 15801 if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE,
15859 15802 *lun_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str)
15860 15803 != DDI_PROP_SUCCESS) {
15861 15804 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15862 15805 "create property for SAS target %d lun %d "
15863 15806 "(target-port)", target, lun);
15864 15807 ndi_rtn = NDI_FAILURE;
15865 15808 goto phys_create_done;
15866 15809 }
15867 15810
15868 15811 be_sas_wwn = BE_64(sas_wwn);
15869 15812 if (sas_wwn && ndi_prop_update_byte_array(
15870 15813 DDI_DEV_T_NONE, *lun_dip, "port-wwn",
15871 15814 (uchar_t *)&be_sas_wwn, 8) != DDI_PROP_SUCCESS) {
15872 15815 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15873 15816 "create property for SAS target %d lun %d "
15874 15817 "(port-wwn)", target, lun);
15875 15818 ndi_rtn = NDI_FAILURE;
15876 15819 goto phys_create_done;
15877 15820 } else if ((sas_wwn == 0) && (ndi_prop_update_int(
15878 15821 DDI_DEV_T_NONE, *lun_dip, "sata-phy", phy) !=
15879 15822 DDI_PROP_SUCCESS)) {
15880 15823 /*
15881 15824 * Direct attached SATA device without DeviceName
15882 15825 */
15883 15826 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15884 15827 "create property for SAS target %d lun %d "
15885 15828 "(sata-phy)", target, lun);
15886 15829 ndi_rtn = NDI_FAILURE;
15887 15830 goto phys_create_done;
15888 15831 }
15889 15832
15890 15833 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15891 15834 *lun_dip, SAS_PROP) != DDI_PROP_SUCCESS) {
15892 15835 mptsas_log(mpt, CE_WARN, "mptsas unable to"
15893 15836 "create property for SAS target %d lun %d"
15894 15837 " (SAS_PROP)", target, lun);
15895 15838 ndi_rtn = NDI_FAILURE;
15896 15839 goto phys_create_done;
15897 15840 }
15898 15841 if (guid && (ndi_prop_update_string(DDI_DEV_T_NONE,
15899 15842 *lun_dip, NDI_GUID, guid) != DDI_SUCCESS)) {
15900 15843 mptsas_log(mpt, CE_WARN, "mptsas unable "
15901 15844 "to create guid property for target %d "
15902 15845 "lun %d", target, lun);
15903 15846 ndi_rtn = NDI_FAILURE;
15904 15847 goto phys_create_done;
15905 15848 }
15906 15849
15907 15850 /*
15908 15851 * The following code is to set properties for SM-HBA support,
15909 15852 * it doesn't apply to RAID volumes
15910 15853 */
15911 15854 if (ptgt->m_addr.mta_phymask == 0)
15912 15855 goto phys_raid_lun;
15913 15856
15914 15857 mutex_enter(&mpt->m_mutex);
15915 15858
15916 15859 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15917 15860 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15918 15861 (uint32_t)ptgt->m_devhdl;
15919 15862 rval = mptsas_get_sas_device_page0(mpt, page_address,
15920 15863 &dev_hdl, &dev_sas_wwn, &dev_info,
15921 15864 &physport, &phy_id, &pdev_hdl,
15922 15865 &bay_num, &enclosure, &io_flags);
15923 15866 if (rval != DDI_SUCCESS) {
15924 15867 mutex_exit(&mpt->m_mutex);
15925 15868 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
15926 15869 "parent device for handle %d.", page_address);
15927 15870 ndi_rtn = NDI_FAILURE;
15928 15871 goto phys_create_done;
15929 15872 }
15930 15873
15931 15874 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15932 15875 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
15933 15876 rval = mptsas_get_sas_device_page0(mpt, page_address,
15934 15877 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
15935 15878 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15936 15879 if (rval != DDI_SUCCESS) {
15937 15880 mutex_exit(&mpt->m_mutex);
15938 15881 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15939 15882 "device for handle %d.", page_address);
15940 15883 ndi_rtn = NDI_FAILURE;
15941 15884 goto phys_create_done;
15942 15885 }
15943 15886
15944 15887 mutex_exit(&mpt->m_mutex);
15945 15888
15946 15889 /*
15947 15890 * If this device direct attached to the controller
15948 15891 * set the attached-port to the base wwid
15949 15892 */
15950 15893 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15951 15894 != DEVINFO_DIRECT_ATTACHED) {
15952 15895 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15953 15896 pdev_sas_wwn);
15954 15897 } else {
15955 15898 /*
15956 15899 * Update the iport's attached-port to guid
15957 15900 */
15958 15901 if (sas_wwn == 0) {
15959 15902 (void) sprintf(wwn_str, "p%x", phy);
15960 15903 } else {
15961 15904 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15962 15905 }
15963 15906 if (ddi_prop_update_string(DDI_DEV_T_NONE,
15964 15907 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15965 15908 DDI_PROP_SUCCESS) {
15966 15909 mptsas_log(mpt, CE_WARN,
15967 15910 "mptsas unable to create "
15968 15911 "property for iport target-port"
15969 15912 " %s (sas_wwn)",
15970 15913 wwn_str);
15971 15914 ndi_rtn = NDI_FAILURE;
15972 15915 goto phys_create_done;
15973 15916 }
15974 15917
15975 15918 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15976 15919 mpt->un.m_base_wwid);
15977 15920 }
15978 15921
15979 15922 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15980 15923 *lun_dip, SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
15981 15924 DDI_PROP_SUCCESS) {
15982 15925 mptsas_log(mpt, CE_WARN,
15983 15926 "mptsas unable to create "
15984 15927 "property for iport attached-port %s (sas_wwn)",
15985 15928 attached_wwn_str);
15986 15929 ndi_rtn = NDI_FAILURE;
15987 15930 goto phys_create_done;
15988 15931 }
15989 15932
15990 15933 if (IS_SATA_DEVICE(dev_info)) {
15991 15934 char uabuf[SCSI_WWN_BUFLEN];
15992 15935
15993 15936 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15994 15937 *lun_dip, MPTSAS_VARIANT, "sata") !=
15995 15938 DDI_PROP_SUCCESS) {
15996 15939 mptsas_log(mpt, CE_WARN,
15997 15940 "mptsas unable to create "
15998 15941 "property for device variant ");
15999 15942 ndi_rtn = NDI_FAILURE;
16000 15943 goto phys_create_done;
16001 15944 }
16002 15945
16003 15946 if (scsi_wwn_to_wwnstr(dev_sas_wwn, 1, uabuf) == NULL) {
16004 15947 mptsas_log(mpt, CE_WARN,
16005 15948 "mptsas unable to format SATA bridge WWN");
16006 15949 ndi_rtn = NDI_FAILURE;
16007 15950 goto phys_create_done;
16008 15951 }
16009 15952
16010 15953 if (ndi_prop_update_string(DDI_DEV_T_NONE, *lun_dip,
16011 15954 SCSI_ADDR_PROP_BRIDGE_PORT, uabuf) !=
16012 15955 DDI_PROP_SUCCESS) {
16013 15956 mptsas_log(mpt, CE_WARN,
16014 15957 "mptsas unable to create SCSI bridge port "
16015 15958 "property for SATA device");
16016 15959 ndi_rtn = NDI_FAILURE;
16017 15960 goto phys_create_done;
16018 15961 }
16019 15962 }
16020 15963
16021 15964 if (IS_ATAPI_DEVICE(dev_info)) {
16022 15965 if (ndi_prop_update_string(DDI_DEV_T_NONE,
16023 15966 *lun_dip, MPTSAS_VARIANT, "atapi") !=
16024 15967 DDI_PROP_SUCCESS) {
16025 15968 mptsas_log(mpt, CE_WARN,
16026 15969 "mptsas unable to create "
16027 15970 "property for device variant ");
16028 15971 ndi_rtn = NDI_FAILURE;
16029 15972 goto phys_create_done;
16030 15973 }
16031 15974 }
16032 15975
16033 15976 phys_raid_lun:
16034 15977 /*
16035 15978 * if this is a SAS controller, and the target is a SATA
16036 15979 * drive, set the 'pm-capable' property for sd and if on
16037 15980 * an OPL platform, also check if this is an ATAPI
16038 15981 * device.
16039 15982 */
16040 15983 instance = ddi_get_instance(mpt->m_dip);
16041 15984 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
16042 15985 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
16043 15986 NDBG2(("mptsas%d: creating pm-capable property, "
16044 15987 "target %d", instance, target));
16045 15988
16046 15989 if ((ndi_prop_update_int(DDI_DEV_T_NONE,
16047 15990 *lun_dip, "pm-capable", 1)) !=
16048 15991 DDI_PROP_SUCCESS) {
16049 15992 mptsas_log(mpt, CE_WARN, "mptsas "
16050 15993 "failed to create pm-capable "
16051 15994 "property, target %d", target);
16052 15995 ndi_rtn = NDI_FAILURE;
16053 15996 goto phys_create_done;
16054 15997 }
16055 15998
16056 15999 }
16057 16000
16058 16001 if ((inq->inq_dtype == 0) || (inq->inq_dtype == 5)) {
16059 16002 /*
16060 16003 * add 'obp-path' properties for devinfo
16061 16004 */
16062 16005 bzero(wwn_str, sizeof (wwn_str));
16063 16006 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
16064 16007 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
16065 16008 if (guid) {
16066 16009 (void) snprintf(component, MAXPATHLEN,
16067 16010 "disk@w%s,%x", wwn_str, lun);
16068 16011 } else {
16069 16012 (void) snprintf(component, MAXPATHLEN,
16070 16013 "disk@p%x,%x", phy, lun);
16071 16014 }
16072 16015 if (ddi_pathname_obp_set(*lun_dip, component)
16073 16016 != DDI_SUCCESS) {
16074 16017 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
16075 16018 "unable to set obp-path for SAS "
16076 16019 "object %s", component);
16077 16020 ndi_rtn = NDI_FAILURE;
16078 16021 goto phys_create_done;
16079 16022 }
16080 16023 }
16081 16024 /*
16082 16025 * Create the phy-num property for non-raid disk
16083 16026 */
16084 16027 if (ptgt->m_addr.mta_phymask != 0) {
16085 16028 if (ndi_prop_update_int(DDI_DEV_T_NONE,
16086 16029 *lun_dip, "phy-num", ptgt->m_phynum) !=
16087 16030 DDI_PROP_SUCCESS) {
16088 16031 mptsas_log(mpt, CE_WARN, "mptsas driver "
16089 16032 "failed to create phy-num property for "
16090 16033 "target %d", target);
16091 16034 ndi_rtn = NDI_FAILURE;
16092 16035 goto phys_create_done;
16093 16036 }
16094 16037 }
16095 16038 phys_create_done:
16096 16039 /*
16097 16040 * If props were setup ok, online the lun
16098 16041 */
16099 16042 if (ndi_rtn == NDI_SUCCESS) {
16100 16043 /*
16101 16044 * Try to online the new node
16102 16045 */
16103 16046 ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
16104 16047 }
16105 16048
16106 16049 /*
16107 16050 * If success set rtn flag, else unwire alloc'd lun
16108 16051 */
16109 16052 if (ndi_rtn != NDI_SUCCESS) {
16110 16053 NDBG12(("mptsas driver unable to online "
16111 16054 "target %d lun %d", target, lun));
16112 16055 ndi_prop_remove_all(*lun_dip);
16113 16056 (void) ndi_devi_free(*lun_dip);
16114 16057 *lun_dip = NULL;
16115 16058 }
16116 16059 }
16117 16060
16118 16061 scsi_hba_nodename_compatible_free(nodename, compatible);
16119 16062
16120 16063 if (wwn_str != NULL) {
16121 16064 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
16122 16065 }
16123 16066 if (component != NULL) {
16124 16067 kmem_free(component, MAXPATHLEN);
16125 16068 }
16126 16069
16127 16070
16128 16071 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
16129 16072 }
16130 16073
16131 16074 static int
16132 16075 mptsas_probe_smp(dev_info_t *pdip, uint64_t wwn)
16133 16076 {
16134 16077 mptsas_t *mpt = DIP2MPT(pdip);
16135 16078 struct smp_device smp_sd;
16136 16079
16137 16080 /* XXX An HBA driver should not be allocating an smp_device. */
16138 16081 bzero(&smp_sd, sizeof (struct smp_device));
16139 16082 smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran;
16140 16083 bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE);
16141 16084
16142 16085 if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS)
16143 16086 return (NDI_FAILURE);
16144 16087 return (NDI_SUCCESS);
16145 16088 }
16146 16089
16147 16090 static int
16148 16091 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
16149 16092 {
16150 16093 mptsas_t *mpt = DIP2MPT(pdip);
16151 16094 mptsas_smp_t *psmp = NULL;
16152 16095 int rval;
16153 16096 int phymask;
16154 16097
16155 16098 /*
16156 16099 * Get the physical port associated to the iport
16157 16100 * PHYMASK TODO
16158 16101 */
16159 16102 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
16160 16103 "phymask", 0);
16161 16104 /*
16162 16105 * Find the smp node in hash table with specified sas address and
16163 16106 * physical port
16164 16107 */
16165 16108 psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
16166 16109 if (psmp == NULL) {
16167 16110 return (DDI_FAILURE);
16168 16111 }
16169 16112
16170 16113 rval = mptsas_online_smp(pdip, psmp, smp_dip);
16171 16114
16172 16115 return (rval);
16173 16116 }
16174 16117
16175 16118 static int
16176 16119 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
16177 16120 dev_info_t **smp_dip)
16178 16121 {
16179 16122 char wwn_str[MPTSAS_WWN_STRLEN];
16180 16123 char attached_wwn_str[MPTSAS_WWN_STRLEN];
16181 16124 int ndi_rtn = NDI_FAILURE;
16182 16125 int rval = 0;
16183 16126 mptsas_smp_t dev_info;
16184 16127 uint32_t page_address;
16185 16128 mptsas_t *mpt = DIP2MPT(pdip);
16186 16129 uint16_t dev_hdl;
16187 16130 uint64_t sas_wwn;
16188 16131 uint64_t smp_sas_wwn;
16189 16132 uint8_t physport;
16190 16133 uint8_t phy_id;
16191 16134 uint16_t pdev_hdl;
16192 16135 uint8_t numphys = 0;
16193 16136 uint16_t i = 0;
16194 16137 char phymask[MPTSAS_MAX_PHYS];
16195 16138 char *iport = NULL;
16196 16139 mptsas_phymask_t phy_mask = 0;
16197 16140 uint16_t attached_devhdl;
16198 16141 uint16_t bay_num, enclosure, io_flags;
16199 16142
16200 16143 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
16201 16144
16202 16145 /*
16203 16146 * Probe smp device, prevent the node of removed device from being
16204 16147 * configured succesfully
16205 16148 */
16206 16149 if (mptsas_probe_smp(pdip, smp_node->m_addr.mta_wwn) != NDI_SUCCESS) {
16207 16150 return (DDI_FAILURE);
16208 16151 }
16209 16152
16210 16153 if ((*smp_dip = mptsas_find_smp_child(pdip, wwn_str)) != NULL) {
16211 16154 return (DDI_SUCCESS);
16212 16155 }
16213 16156
16214 16157 ndi_rtn = ndi_devi_alloc(pdip, "smp", DEVI_SID_NODEID, smp_dip);
16215 16158
16216 16159 /*
16217 16160 * if lun alloc success, set props
16218 16161 */
16219 16162 if (ndi_rtn == NDI_SUCCESS) {
16220 16163 /*
16221 16164 * Set the flavor of the child to be SMP flavored
16222 16165 */
16223 16166 ndi_flavor_set(*smp_dip, SCSA_FLAVOR_SMP);
16224 16167
16225 16168 if (ndi_prop_update_string(DDI_DEV_T_NONE,
16226 16169 *smp_dip, SMP_WWN, wwn_str) !=
16227 16170 DDI_PROP_SUCCESS) {
16228 16171 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
16229 16172 "property for smp device %s (sas_wwn)",
16230 16173 wwn_str);
16231 16174 ndi_rtn = NDI_FAILURE;
16232 16175 goto smp_create_done;
16233 16176 }
16234 16177 (void) sprintf(wwn_str, "w%"PRIx64, smp_node->m_addr.mta_wwn);
16235 16178 if (ndi_prop_update_string(DDI_DEV_T_NONE,
16236 16179 *smp_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str) !=
16237 16180 DDI_PROP_SUCCESS) {
16238 16181 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
16239 16182 "property for iport target-port %s (sas_wwn)",
16240 16183 wwn_str);
16241 16184 ndi_rtn = NDI_FAILURE;
16242 16185 goto smp_create_done;
16243 16186 }
16244 16187
16245 16188 mutex_enter(&mpt->m_mutex);
16246 16189
16247 16190 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
16248 16191 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | smp_node->m_devhdl;
16249 16192 rval = mptsas_get_sas_expander_page0(mpt, page_address,
16250 16193 &dev_info);
16251 16194 if (rval != DDI_SUCCESS) {
16252 16195 mutex_exit(&mpt->m_mutex);
16253 16196 mptsas_log(mpt, CE_WARN,
16254 16197 "mptsas unable to get expander "
16255 16198 "parent device info for %x", page_address);
16256 16199 ndi_rtn = NDI_FAILURE;
16257 16200 goto smp_create_done;
16258 16201 }
16259 16202
16260 16203 smp_node->m_pdevhdl = dev_info.m_pdevhdl;
16261 16204 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
16262 16205 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
16263 16206 (uint32_t)dev_info.m_pdevhdl;
16264 16207 rval = mptsas_get_sas_device_page0(mpt, page_address,
16265 16208 &dev_hdl, &sas_wwn, &smp_node->m_pdevinfo, &physport,
16266 16209 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
16267 16210 if (rval != DDI_SUCCESS) {
16268 16211 mutex_exit(&mpt->m_mutex);
16269 16212 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
16270 16213 "device info for %x", page_address);
16271 16214 ndi_rtn = NDI_FAILURE;
16272 16215 goto smp_create_done;
16273 16216 }
16274 16217
16275 16218 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
16276 16219 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
16277 16220 (uint32_t)dev_info.m_devhdl;
16278 16221 rval = mptsas_get_sas_device_page0(mpt, page_address,
16279 16222 &dev_hdl, &smp_sas_wwn, &smp_node->m_deviceinfo,
16280 16223 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure,
16281 16224 &io_flags);
16282 16225 if (rval != DDI_SUCCESS) {
16283 16226 mutex_exit(&mpt->m_mutex);
16284 16227 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
16285 16228 "device info for %x", page_address);
16286 16229 ndi_rtn = NDI_FAILURE;
16287 16230 goto smp_create_done;
16288 16231 }
16289 16232 mutex_exit(&mpt->m_mutex);
16290 16233
16291 16234 /*
16292 16235 * If this smp direct attached to the controller
16293 16236 * set the attached-port to the base wwid
16294 16237 */
16295 16238 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
16296 16239 != DEVINFO_DIRECT_ATTACHED) {
16297 16240 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
16298 16241 sas_wwn);
16299 16242 } else {
16300 16243 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
16301 16244 mpt->un.m_base_wwid);
16302 16245 }
16303 16246
16304 16247 if (ndi_prop_update_string(DDI_DEV_T_NONE,
16305 16248 *smp_dip, SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwn_str) !=
16306 16249 DDI_PROP_SUCCESS) {
16307 16250 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
16308 16251 "property for smp attached-port %s (sas_wwn)",
16309 16252 attached_wwn_str);
16310 16253 ndi_rtn = NDI_FAILURE;
16311 16254 goto smp_create_done;
16312 16255 }
16313 16256
16314 16257 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
16315 16258 *smp_dip, SMP_PROP) != DDI_PROP_SUCCESS) {
16316 16259 mptsas_log(mpt, CE_WARN, "mptsas unable to "
16317 16260 "create property for SMP %s (SMP_PROP) ",
16318 16261 wwn_str);
16319 16262 ndi_rtn = NDI_FAILURE;
16320 16263 goto smp_create_done;
16321 16264 }
16322 16265
16323 16266 /*
16324 16267 * check the smp to see whether it direct
16325 16268 * attached to the controller
16326 16269 */
16327 16270 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
16328 16271 != DEVINFO_DIRECT_ATTACHED) {
16329 16272 goto smp_create_done;
16330 16273 }
16331 16274 numphys = ddi_prop_get_int(DDI_DEV_T_ANY, pdip,
16332 16275 DDI_PROP_DONTPASS, MPTSAS_NUM_PHYS, -1);
16333 16276 if (numphys > 0) {
16334 16277 goto smp_create_done;
16335 16278 }
16336 16279 /*
16337 16280 * this iport is an old iport, we need to
16338 16281 * reconfig the props for it.
16339 16282 */
16340 16283 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
16341 16284 MPTSAS_VIRTUAL_PORT, 0) !=
16342 16285 DDI_PROP_SUCCESS) {
16343 16286 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
16344 16287 MPTSAS_VIRTUAL_PORT);
16345 16288 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
16346 16289 "prop update failed");
16347 16290 goto smp_create_done;
16348 16291 }
16349 16292
16350 16293 mutex_enter(&mpt->m_mutex);
16351 16294 numphys = 0;
16352 16295 iport = ddi_get_name_addr(pdip);
16353 16296 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
16354 16297 bzero(phymask, sizeof (phymask));
16355 16298 (void) sprintf(phymask,
16356 16299 "%x", mpt->m_phy_info[i].phy_mask);
16357 16300 if (strcmp(phymask, iport) == 0) {
16358 16301 phy_mask = mpt->m_phy_info[i].phy_mask;
16359 16302 break;
16360 16303 }
16361 16304 }
16362 16305
16363 16306 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
16364 16307 if ((phy_mask >> i) & 0x01) {
16365 16308 numphys++;
16366 16309 }
16367 16310 }
16368 16311 /*
16369 16312 * Update PHY info for smhba
16370 16313 */
16371 16314 if (mptsas_smhba_phy_init(mpt)) {
16372 16315 mutex_exit(&mpt->m_mutex);
16373 16316 mptsas_log(mpt, CE_WARN, "mptsas phy update "
16374 16317 "failed");
16375 16318 goto smp_create_done;
16376 16319 }
16377 16320 mutex_exit(&mpt->m_mutex);
16378 16321
16379 16322 mptsas_smhba_set_all_phy_props(mpt, pdip, numphys, phy_mask,
16380 16323 &attached_devhdl);
16381 16324
16382 16325 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
16383 16326 MPTSAS_NUM_PHYS, numphys) !=
16384 16327 DDI_PROP_SUCCESS) {
16385 16328 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
16386 16329 MPTSAS_NUM_PHYS);
16387 16330 mptsas_log(mpt, CE_WARN, "mptsas update "
16388 16331 "num phys props failed");
16389 16332 goto smp_create_done;
16390 16333 }
16391 16334 /*
16392 16335 * Add parent's props for SMHBA support
16393 16336 */
16394 16337 if (ddi_prop_update_string(DDI_DEV_T_NONE, pdip,
16395 16338 SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
16396 16339 DDI_PROP_SUCCESS) {
16397 16340 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
16398 16341 SCSI_ADDR_PROP_ATTACHED_PORT);
16399 16342 mptsas_log(mpt, CE_WARN, "mptsas update iport"
16400 16343 "attached-port failed");
16401 16344 goto smp_create_done;
16402 16345 }
16403 16346
16404 16347 smp_create_done:
16405 16348 /*
16406 16349 * If props were setup ok, online the lun
16407 16350 */
16408 16351 if (ndi_rtn == NDI_SUCCESS) {
16409 16352 /*
16410 16353 * Try to online the new node
16411 16354 */
16412 16355 ndi_rtn = ndi_devi_online(*smp_dip, NDI_ONLINE_ATTACH);
16413 16356 }
16414 16357
16415 16358 /*
16416 16359 * If success set rtn flag, else unwire alloc'd lun
16417 16360 */
16418 16361 if (ndi_rtn != NDI_SUCCESS) {
16419 16362 NDBG12(("mptsas unable to online "
16420 16363 "SMP target %s", wwn_str));
16421 16364 ndi_prop_remove_all(*smp_dip);
16422 16365 (void) ndi_devi_free(*smp_dip);
16423 16366 }
16424 16367 }
16425 16368
16426 16369 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
16427 16370 }
16428 16371
16429 16372 /* smp transport routine */
16430 16373 static int mptsas_smp_start(struct smp_pkt *smp_pkt)
16431 16374 {
16432 16375 uint64_t wwn;
16433 16376 Mpi2SmpPassthroughRequest_t req;
16434 16377 Mpi2SmpPassthroughReply_t rep;
16435 16378 uint32_t direction = 0;
16436 16379 mptsas_t *mpt;
16437 16380 int ret;
16438 16381 uint64_t tmp64;
16439 16382
16440 16383 mpt = (mptsas_t *)smp_pkt->smp_pkt_address->
16441 16384 smp_a_hba_tran->smp_tran_hba_private;
16442 16385
16443 16386 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
16444 16387 /*
16445 16388 * Need to compose a SMP request message
16446 16389 * and call mptsas_do_passthru() function
16447 16390 */
16448 16391 bzero(&req, sizeof (req));
16449 16392 bzero(&rep, sizeof (rep));
16450 16393 req.PassthroughFlags = 0;
16451 16394 req.PhysicalPort = 0xff;
16452 16395 req.ChainOffset = 0;
16453 16396 req.Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
16454 16397
16455 16398 if ((smp_pkt->smp_pkt_reqsize & 0xffff0000ul) != 0) {
16456 16399 smp_pkt->smp_pkt_reason = ERANGE;
16457 16400 return (DDI_FAILURE);
16458 16401 }
16459 16402 req.RequestDataLength = LE_16((uint16_t)(smp_pkt->smp_pkt_reqsize - 4));
16460 16403
16461 16404 req.MsgFlags = 0;
16462 16405 tmp64 = LE_64(wwn);
16463 16406 bcopy(&tmp64, &req.SASAddress, SAS_WWN_BYTE_SIZE);
16464 16407 if (smp_pkt->smp_pkt_rspsize > 0) {
16465 16408 direction |= MPTSAS_PASS_THRU_DIRECTION_READ;
16466 16409 }
16467 16410 if (smp_pkt->smp_pkt_reqsize > 0) {
16468 16411 direction |= MPTSAS_PASS_THRU_DIRECTION_WRITE;
16469 16412 }
16470 16413
16471 16414 mutex_enter(&mpt->m_mutex);
16472 16415 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep,
16473 16416 (uint8_t *)smp_pkt->smp_pkt_rsp,
16474 16417 offsetof(Mpi2SmpPassthroughRequest_t, SGL), sizeof (rep),
16475 16418 smp_pkt->smp_pkt_rspsize - 4, direction,
16476 16419 (uint8_t *)smp_pkt->smp_pkt_req, smp_pkt->smp_pkt_reqsize - 4,
16477 16420 smp_pkt->smp_pkt_timeout, FKIOCTL);
16478 16421 mutex_exit(&mpt->m_mutex);
16479 16422 if (ret != 0) {
16480 16423 cmn_err(CE_WARN, "smp_start do passthru error %d", ret);
16481 16424 smp_pkt->smp_pkt_reason = (uchar_t)(ret);
16482 16425 return (DDI_FAILURE);
16483 16426 }
16484 16427 /* do passthrough success, check the smp status */
16485 16428 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
16486 16429 switch (LE_16(rep.IOCStatus)) {
16487 16430 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
16488 16431 smp_pkt->smp_pkt_reason = ENODEV;
16489 16432 break;
16490 16433 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
16491 16434 smp_pkt->smp_pkt_reason = EOVERFLOW;
16492 16435 break;
16493 16436 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
16494 16437 smp_pkt->smp_pkt_reason = EIO;
16495 16438 break;
16496 16439 default:
16497 16440 mptsas_log(mpt, CE_NOTE, "smp_start: get unknown ioc"
16498 16441 "status:%x", LE_16(rep.IOCStatus));
16499 16442 smp_pkt->smp_pkt_reason = EIO;
16500 16443 break;
16501 16444 }
16502 16445 return (DDI_FAILURE);
16503 16446 }
16504 16447 if (rep.SASStatus != MPI2_SASSTATUS_SUCCESS) {
16505 16448 mptsas_log(mpt, CE_NOTE, "smp_start: get error SAS status:%x",
16506 16449 rep.SASStatus);
16507 16450 smp_pkt->smp_pkt_reason = EIO;
16508 16451 return (DDI_FAILURE);
16509 16452 }
16510 16453
16511 16454 return (DDI_SUCCESS);
16512 16455 }
16513 16456
16514 16457 /*
16515 16458 * If we didn't get a match, we need to get sas page0 for each device, and
16516 16459 * untill we get a match. If failed, return NULL
16517 16460 */
16518 16461 static mptsas_target_t *
16519 16462 mptsas_phy_to_tgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint8_t phy)
16520 16463 {
16521 16464 int i, j = 0;
16522 16465 int rval = 0;
16523 16466 uint16_t cur_handle;
16524 16467 uint32_t page_address;
16525 16468 mptsas_target_t *ptgt = NULL;
16526 16469
16527 16470 /*
16528 16471 * PHY named device must be direct attached and attaches to
16529 16472 * narrow port, if the iport is not parent of the device which
16530 16473 * we are looking for.
16531 16474 */
16532 16475 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
16533 16476 if ((1 << i) & phymask)
16534 16477 j++;
16535 16478 }
16536 16479
16537 16480 if (j > 1)
16538 16481 return (NULL);
16539 16482
16540 16483 /*
16541 16484 * Must be a narrow port and single device attached to the narrow port
16542 16485 * So the physical port num of device which is equal to the iport's
16543 16486 * port num is the device what we are looking for.
16544 16487 */
16545 16488
16546 16489 if (mpt->m_phy_info[phy].phy_mask != phymask)
16547 16490 return (NULL);
16548 16491
16549 16492 mutex_enter(&mpt->m_mutex);
16550 16493
16551 16494 ptgt = refhash_linear_search(mpt->m_targets, mptsas_target_eval_nowwn,
16552 16495 &phy);
16553 16496 if (ptgt != NULL) {
16554 16497 mutex_exit(&mpt->m_mutex);
16555 16498 return (ptgt);
16556 16499 }
16557 16500
16558 16501 if (mpt->m_done_traverse_dev) {
16559 16502 mutex_exit(&mpt->m_mutex);
16560 16503 return (NULL);
16561 16504 }
16562 16505
16563 16506 /* If didn't get a match, come here */
16564 16507 cur_handle = mpt->m_dev_handle;
16565 16508 for (; ; ) {
16566 16509 ptgt = NULL;
16567 16510 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
16568 16511 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)cur_handle;
16569 16512 rval = mptsas_get_target_device_info(mpt, page_address,
16570 16513 &cur_handle, &ptgt);
16571 16514 if ((rval == DEV_INFO_FAIL_PAGE0) ||
16572 16515 (rval == DEV_INFO_FAIL_ALLOC) ||
16573 16516 (rval == DEV_INFO_FAIL_GUID)) {
16574 16517 break;
16575 16518 }
16576 16519 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
16577 16520 (rval == DEV_INFO_PHYS_DISK)) {
16578 16521 continue;
16579 16522 }
16580 16523 mpt->m_dev_handle = cur_handle;
16581 16524
16582 16525 if ((ptgt->m_addr.mta_wwn == 0) && (ptgt->m_phynum == phy)) {
16583 16526 break;
16584 16527 }
16585 16528 }
16586 16529
16587 16530 mutex_exit(&mpt->m_mutex);
16588 16531 return (ptgt);
16589 16532 }
16590 16533
16591 16534 /*
16592 16535 * The ptgt->m_addr.mta_wwn contains the wwid for each disk.
16593 16536 * For Raid volumes, we need to check m_raidvol[x].m_raidwwid
16594 16537 * If we didn't get a match, we need to get sas page0 for each device, and
16595 16538 * untill we get a match
16596 16539 * If failed, return NULL
16597 16540 */
16598 16541 static mptsas_target_t *
16599 16542 mptsas_wwid_to_ptgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
16600 16543 {
16601 16544 int rval = 0;
16602 16545 uint16_t cur_handle;
16603 16546 uint32_t page_address;
16604 16547 mptsas_target_t *tmp_tgt = NULL;
16605 16548 mptsas_target_addr_t addr;
16606 16549
16607 16550 addr.mta_wwn = wwid;
16608 16551 addr.mta_phymask = phymask;
16609 16552 mutex_enter(&mpt->m_mutex);
16610 16553 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
16611 16554 if (tmp_tgt != NULL) {
16612 16555 mutex_exit(&mpt->m_mutex);
16613 16556 return (tmp_tgt);
16614 16557 }
16615 16558
16616 16559 if (phymask == 0) {
16617 16560 /*
16618 16561 * It's IR volume
16619 16562 */
16620 16563 rval = mptsas_get_raid_info(mpt);
16621 16564 if (rval) {
16622 16565 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
16623 16566 }
16624 16567 mutex_exit(&mpt->m_mutex);
16625 16568 return (tmp_tgt);
16626 16569 }
16627 16570
16628 16571 if (mpt->m_done_traverse_dev) {
16629 16572 mutex_exit(&mpt->m_mutex);
16630 16573 return (NULL);
16631 16574 }
16632 16575
16633 16576 /* If didn't get a match, come here */
16634 16577 cur_handle = mpt->m_dev_handle;
16635 16578 for (;;) {
16636 16579 tmp_tgt = NULL;
16637 16580 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
16638 16581 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | cur_handle;
16639 16582 rval = mptsas_get_target_device_info(mpt, page_address,
16640 16583 &cur_handle, &tmp_tgt);
16641 16584 if ((rval == DEV_INFO_FAIL_PAGE0) ||
16642 16585 (rval == DEV_INFO_FAIL_ALLOC) ||
16643 16586 (rval == DEV_INFO_FAIL_GUID)) {
16644 16587 tmp_tgt = NULL;
16645 16588 break;
16646 16589 }
16647 16590 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
16648 16591 (rval == DEV_INFO_PHYS_DISK)) {
16649 16592 continue;
16650 16593 }
16651 16594 mpt->m_dev_handle = cur_handle;
16652 16595 if ((tmp_tgt->m_addr.mta_wwn) &&
16653 16596 (tmp_tgt->m_addr.mta_wwn == wwid) &&
16654 16597 (tmp_tgt->m_addr.mta_phymask == phymask)) {
16655 16598 break;
16656 16599 }
16657 16600 }
16658 16601
16659 16602 mutex_exit(&mpt->m_mutex);
16660 16603 return (tmp_tgt);
16661 16604 }
16662 16605
16663 16606 static mptsas_smp_t *
16664 16607 mptsas_wwid_to_psmp(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
16665 16608 {
16666 16609 int rval = 0;
16667 16610 uint16_t cur_handle;
16668 16611 uint32_t page_address;
16669 16612 mptsas_smp_t smp_node, *psmp = NULL;
16670 16613 mptsas_target_addr_t addr;
16671 16614
16672 16615 addr.mta_wwn = wwid;
16673 16616 addr.mta_phymask = phymask;
16674 16617 mutex_enter(&mpt->m_mutex);
16675 16618 psmp = refhash_lookup(mpt->m_smp_targets, &addr);
16676 16619 if (psmp != NULL) {
16677 16620 mutex_exit(&mpt->m_mutex);
16678 16621 return (psmp);
16679 16622 }
16680 16623
16681 16624 if (mpt->m_done_traverse_smp) {
16682 16625 mutex_exit(&mpt->m_mutex);
16683 16626 return (NULL);
16684 16627 }
16685 16628
16686 16629 /* If didn't get a match, come here */
16687 16630 cur_handle = mpt->m_smp_devhdl;
16688 16631 for (;;) {
16689 16632 psmp = NULL;
16690 16633 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
16691 16634 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)cur_handle;
16692 16635 rval = mptsas_get_sas_expander_page0(mpt, page_address,
16693 16636 &smp_node);
16694 16637 if (rval != DDI_SUCCESS) {
16695 16638 break;
16696 16639 }
16697 16640 mpt->m_smp_devhdl = cur_handle = smp_node.m_devhdl;
16698 16641 psmp = mptsas_smp_alloc(mpt, &smp_node);
16699 16642 ASSERT(psmp);
16700 16643 if ((psmp->m_addr.mta_wwn) && (psmp->m_addr.mta_wwn == wwid) &&
16701 16644 (psmp->m_addr.mta_phymask == phymask)) {
16702 16645 break;
16703 16646 }
16704 16647 }
16705 16648
16706 16649 mutex_exit(&mpt->m_mutex);
16707 16650 return (psmp);
16708 16651 }
16709 16652
16710 16653 mptsas_target_t *
16711 16654 mptsas_tgt_alloc(refhash_t *refhash, uint16_t devhdl, uint64_t wwid,
16712 16655 uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum)
16713 16656 {
16714 16657 mptsas_target_t *tmp_tgt = NULL;
16715 16658 mptsas_target_addr_t addr;
16716 16659
16717 16660 addr.mta_wwn = wwid;
16718 16661 addr.mta_phymask = phymask;
16719 16662 tmp_tgt = refhash_lookup(refhash, &addr);
16720 16663 if (tmp_tgt != NULL) {
16721 16664 NDBG20(("Hash item already exist"));
16722 16665 tmp_tgt->m_deviceinfo = devinfo;
16723 16666 tmp_tgt->m_devhdl = devhdl; /* XXX - duplicate? */
16724 16667 return (tmp_tgt);
16725 16668 }
16726 16669 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
16727 16670 if (tmp_tgt == NULL) {
16728 16671 cmn_err(CE_WARN, "Fatal, allocated tgt failed");
16729 16672 return (NULL);
16730 16673 }
16731 16674 tmp_tgt->m_devhdl = devhdl;
16732 16675 tmp_tgt->m_addr.mta_wwn = wwid;
16733 16676 tmp_tgt->m_deviceinfo = devinfo;
16734 16677 tmp_tgt->m_addr.mta_phymask = phymask;
16735 16678 tmp_tgt->m_phynum = phynum;
16736 16679 /* Initialized the tgt structure */
16737 16680 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
16738 16681 tmp_tgt->m_qfull_retry_interval =
16739 16682 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
16740 16683 tmp_tgt->m_t_throttle = MAX_THROTTLE;
16741 16684 TAILQ_INIT(&tmp_tgt->m_active_cmdq);
16742 16685
16743 16686 refhash_insert(refhash, tmp_tgt);
16744 16687
16745 16688 return (tmp_tgt);
16746 16689 }
16747 16690
16748 16691 static void
16749 16692 mptsas_smp_target_copy(mptsas_smp_t *src, mptsas_smp_t *dst)
16750 16693 {
16751 16694 dst->m_devhdl = src->m_devhdl;
16752 16695 dst->m_deviceinfo = src->m_deviceinfo;
16753 16696 dst->m_pdevhdl = src->m_pdevhdl;
16754 16697 dst->m_pdevinfo = src->m_pdevinfo;
16755 16698 }
16756 16699
16757 16700 static mptsas_smp_t *
16758 16701 mptsas_smp_alloc(mptsas_t *mpt, mptsas_smp_t *data)
16759 16702 {
16760 16703 mptsas_target_addr_t addr;
16761 16704 mptsas_smp_t *ret_data;
16762 16705
16763 16706 addr.mta_wwn = data->m_addr.mta_wwn;
16764 16707 addr.mta_phymask = data->m_addr.mta_phymask;
16765 16708 ret_data = refhash_lookup(mpt->m_smp_targets, &addr);
16766 16709 /*
16767 16710 * If there's already a matching SMP target, update its fields
16768 16711 * in place. Since the address is not changing, it's safe to do
16769 16712 * this. We cannot just bcopy() here because the structure we've
16770 16713 * been given has invalid hash links.
16771 16714 */
16772 16715 if (ret_data != NULL) {
16773 16716 mptsas_smp_target_copy(data, ret_data);
16774 16717 return (ret_data);
16775 16718 }
16776 16719
16777 16720 ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
16778 16721 bcopy(data, ret_data, sizeof (mptsas_smp_t));
16779 16722 refhash_insert(mpt->m_smp_targets, ret_data);
16780 16723 return (ret_data);
16781 16724 }
16782 16725
16783 16726 /*
16784 16727 * Functions for SGPIO LED support
16785 16728 */
16786 16729 static dev_info_t *
16787 16730 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
16788 16731 {
16789 16732 dev_info_t *dip;
16790 16733 int prop;
16791 16734 dip = e_ddi_hold_devi_by_dev(dev, 0);
16792 16735 if (dip == NULL)
16793 16736 return (dip);
16794 16737 prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
16795 16738 "phymask", 0);
16796 16739 *phymask = (mptsas_phymask_t)prop;
16797 16740 ddi_release_devi(dip);
16798 16741 return (dip);
16799 16742 }
16800 16743 static mptsas_target_t *
16801 16744 mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask)
16802 16745 {
16803 16746 uint8_t phynum;
16804 16747 uint64_t wwn;
16805 16748 int lun;
16806 16749 mptsas_target_t *ptgt = NULL;
16807 16750
16808 16751 if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) {
16809 16752 return (NULL);
16810 16753 }
16811 16754 if (addr[0] == 'w') {
16812 16755 ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn);
16813 16756 } else {
16814 16757 ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum);
16815 16758 }
16816 16759 return (ptgt);
16817 16760 }
16818 16761
16819 16762 static int
16820 16763 mptsas_flush_led_status(mptsas_t *mpt, mptsas_enclosure_t *mep, uint16_t idx)
16821 16764 {
16822 16765 uint32_t slotstatus = 0;
16823 16766
16824 16767 ASSERT3U(idx, <, mep->me_nslots);
16825 16768
16826 16769 /* Build an MPI2 Slot Status based on our view of the world */
16827 16770 if (mep->me_slotleds[idx] & (1 << (MPTSAS_LEDCTL_LED_IDENT - 1)))
16828 16771 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST;
16829 16772 if (mep->me_slotleds[idx] & (1 << (MPTSAS_LEDCTL_LED_FAIL - 1)))
16830 16773 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT;
16831 16774 if (mep->me_slotleds[idx] & (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1)))
16832 16775 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
16833 16776
16834 16777 /* Write it to the controller */
16835 16778 NDBG14(("mptsas_ioctl: set LED status %x for slot %x",
16836 16779 slotstatus, idx + mep->me_fslot));
16837 16780 return (mptsas_send_sep(mpt, mep, idx, &slotstatus,
16838 16781 MPI2_SEP_REQ_ACTION_WRITE_STATUS));
16839 16782 }
16840 16783
16841 16784 /*
16842 16785 * send sep request, use enclosure/slot addressing
16843 16786 */
16844 16787 static int
16845 16788 mptsas_send_sep(mptsas_t *mpt, mptsas_enclosure_t *mep, uint16_t idx,
16846 16789 uint32_t *status, uint8_t act)
16847 16790 {
16848 16791 Mpi2SepRequest_t req;
16849 16792 Mpi2SepReply_t rep;
16850 16793 int ret;
16851 16794 uint16_t enctype;
16852 16795 uint16_t slot;
16853 16796
16854 16797 ASSERT(mutex_owned(&mpt->m_mutex));
16855 16798
16856 16799 /*
16857 16800 * Look through the enclosures and make sure that this enclosure is
16858 16801 * something that is directly attached device. If we didn't find an
16859 16802 * enclosure for this device, don't send the ioctl.
16860 16803 */
16861 16804 enctype = mep->me_flags & MPI2_SAS_ENCLS0_FLAGS_MNG_MASK;
16862 16805 if (enctype != MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES &&
16863 16806 enctype != MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO &&
16864 16807 enctype != MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO) {
16865 16808 return (ENOTTY);
16866 16809 }
16867 16810 slot = idx + mep->me_fslot;
16868 16811
16869 16812 bzero(&req, sizeof (req));
16870 16813 bzero(&rep, sizeof (rep));
16871 16814
16872 16815 req.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
16873 16816 req.Action = act;
16874 16817 req.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
16875 16818 req.EnclosureHandle = LE_16(mep->me_enchdl);
16876 16819 req.Slot = LE_16(slot);
16877 16820 if (act == MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16878 16821 req.SlotStatus = LE_32(*status);
16879 16822 }
16880 16823 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
16881 16824 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
16882 16825 if (ret != 0) {
16883 16826 mptsas_log(mpt, CE_NOTE, "mptsas_send_sep: passthru SEP "
16884 16827 "Processor Request message error %d", ret);
16885 16828 return (ret);
16886 16829 }
16887 16830 /* do passthrough success, check the ioc status */
16888 16831 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
16889 16832 mptsas_log(mpt, CE_NOTE, "send_sep act %x: ioc "
16890 16833 "status:%x loginfo %x", act, LE_16(rep.IOCStatus),
16891 16834 LE_32(rep.IOCLogInfo));
16892 16835 switch (LE_16(rep.IOCStatus) & MPI2_IOCSTATUS_MASK) {
16893 16836 case MPI2_IOCSTATUS_INVALID_FUNCTION:
16894 16837 case MPI2_IOCSTATUS_INVALID_VPID:
16895 16838 case MPI2_IOCSTATUS_INVALID_FIELD:
16896 16839 case MPI2_IOCSTATUS_INVALID_STATE:
16897 16840 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
16898 16841 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
16899 16842 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
16900 16843 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
16901 16844 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
16902 16845 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
16903 16846 return (EINVAL);
16904 16847 case MPI2_IOCSTATUS_BUSY:
16905 16848 return (EBUSY);
16906 16849 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
16907 16850 return (EAGAIN);
16908 16851 case MPI2_IOCSTATUS_INVALID_SGL:
16909 16852 case MPI2_IOCSTATUS_INTERNAL_ERROR:
16910 16853 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
16911 16854 default:
16912 16855 return (EIO);
16913 16856 }
16914 16857 }
16915 16858 if (act != MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16916 16859 *status = LE_32(rep.SlotStatus);
16917 16860 }
16918 16861
16919 16862 return (0);
16920 16863 }
16921 16864
16922 16865 int
16923 16866 mptsas_dma_addr_create(mptsas_t *mpt, ddi_dma_attr_t dma_attr,
16924 16867 ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp, caddr_t *dma_memp,
16925 16868 uint32_t alloc_size, ddi_dma_cookie_t *cookiep)
16926 16869 {
16927 16870 ddi_dma_cookie_t new_cookie;
16928 16871 size_t alloc_len;
16929 16872 uint_t ncookie;
16930 16873
16931 16874 if (cookiep == NULL)
16932 16875 cookiep = &new_cookie;
16933 16876
16934 16877 if (ddi_dma_alloc_handle(mpt->m_dip, &dma_attr, DDI_DMA_SLEEP,
16935 16878 NULL, dma_hdp) != DDI_SUCCESS) {
16936 16879 return (FALSE);
16937 16880 }
16938 16881
16939 16882 if (ddi_dma_mem_alloc(*dma_hdp, alloc_size, &mpt->m_dev_acc_attr,
16940 16883 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, dma_memp, &alloc_len,
16941 16884 acc_hdp) != DDI_SUCCESS) {
16942 16885 ddi_dma_free_handle(dma_hdp);
16943 16886 *dma_hdp = NULL;
16944 16887 return (FALSE);
16945 16888 }
16946 16889
16947 16890 if (ddi_dma_addr_bind_handle(*dma_hdp, NULL, *dma_memp, alloc_len,
16948 16891 (DDI_DMA_RDWR | DDI_DMA_CONSISTENT), DDI_DMA_SLEEP, NULL,
16949 16892 cookiep, &ncookie) != DDI_DMA_MAPPED) {
16950 16893 (void) ddi_dma_mem_free(acc_hdp);
16951 16894 ddi_dma_free_handle(dma_hdp);
16952 16895 *dma_hdp = NULL;
16953 16896 return (FALSE);
16954 16897 }
16955 16898
16956 16899 return (TRUE);
16957 16900 }
16958 16901
16959 16902 void
16960 16903 mptsas_dma_addr_destroy(ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp)
16961 16904 {
16962 16905 if (*dma_hdp == NULL)
16963 16906 return;
16964 16907
16965 16908 (void) ddi_dma_unbind_handle(*dma_hdp);
16966 16909 (void) ddi_dma_mem_free(acc_hdp);
16967 16910 ddi_dma_free_handle(dma_hdp);
16968 16911 *dma_hdp = NULL;
16969 16912 }
↓ open down ↓ |
1780 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX