Print this page
First pass at 4310
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c
+++ new/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25 25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 26 * Copyright 2014 OmniTI Computer Consulting, Inc. All rights reserved.
27 27 * Copyright (c) 2014, Tegile Systems Inc. All rights reserved.
28 28 */
29 29
30 30 /*
31 31 * Copyright (c) 2000 to 2010, LSI Corporation.
32 32 * All rights reserved.
33 33 *
34 34 * Redistribution and use in source and binary forms of all code within
35 35 * this file that is exclusively owned by LSI, with or without
36 36 * modification, is permitted provided that, in addition to the CDDL 1.0
37 37 * License requirements, the following conditions are met:
38 38 *
39 39 * Neither the name of the author nor the names of its contributors may be
40 40 * used to endorse or promote products derived from this software without
41 41 * specific prior written permission.
42 42 *
43 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
46 46 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
47 47 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
48 48 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
49 49 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
50 50 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
51 51 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
52 52 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54 54 * DAMAGE.
55 55 */
56 56
57 57 /*
58 58 * mptsas - This is a driver based on LSI Logic's MPT2.0/2.5 interface.
59 59 *
60 60 */
61 61
62 62 #if defined(lint) || defined(DEBUG)
63 63 #define MPTSAS_DEBUG
64 64 #endif
65 65
66 66 /*
67 67 * standard header files.
68 68 */
69 69 #include <sys/note.h>
70 70 #include <sys/scsi/scsi.h>
71 71 #include <sys/pci.h>
72 72 #include <sys/file.h>
73 73 #include <sys/policy.h>
74 74 #include <sys/model.h>
75 75 #include <sys/sysevent.h>
76 76 #include <sys/sysevent/eventdefs.h>
77 77 #include <sys/sysevent/dr.h>
78 78 #include <sys/sata/sata_defs.h>
79 79 #include <sys/scsi/generic/sas.h>
80 80 #include <sys/scsi/impl/scsi_sas.h>
81 81
82 82 #pragma pack(1)
83 83 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
84 84 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
85 85 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
86 86 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
87 87 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
88 88 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
89 89 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
90 90 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
91 91 #pragma pack()
92 92
93 93 /*
94 94 * private header files.
95 95 *
96 96 */
97 97 #include <sys/scsi/impl/scsi_reset_notify.h>
98 98 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
99 99 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
100 100 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
101 101 #include <sys/scsi/adapters/mpt_sas/mptsas_hash.h>
102 102 #include <sys/raidioctl.h>
103 103
104 104 #include <sys/fs/dv_node.h> /* devfs_clean */
105 105
106 106 /*
107 107 * FMA header files
108 108 */
109 109 #include <sys/ddifm.h>
110 110 #include <sys/fm/protocol.h>
111 111 #include <sys/fm/util.h>
112 112 #include <sys/fm/io/ddi.h>
113 113
114 114 /*
115 115 * autoconfiguration data and routines.
116 116 */
117 117 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
118 118 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
119 119 static int mptsas_power(dev_info_t *dip, int component, int level);
120 120
121 121 /*
122 122 * cb_ops function
123 123 */
124 124 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
125 125 cred_t *credp, int *rval);
126 126 #ifdef __sparc
127 127 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd);
128 128 #else /* __sparc */
129 129 static int mptsas_quiesce(dev_info_t *devi);
130 130 #endif /* __sparc */
131 131
132 132 /*
133 133 * Resource initilaization for hardware
134 134 */
135 135 static void mptsas_setup_cmd_reg(mptsas_t *mpt);
136 136 static void mptsas_disable_bus_master(mptsas_t *mpt);
137 137 static void mptsas_hba_fini(mptsas_t *mpt);
138 138 static void mptsas_cfg_fini(mptsas_t *mptsas_blkp);
139 139 static int mptsas_hba_setup(mptsas_t *mpt);
140 140 static void mptsas_hba_teardown(mptsas_t *mpt);
141 141 static int mptsas_config_space_init(mptsas_t *mpt);
142 142 static void mptsas_config_space_fini(mptsas_t *mpt);
143 143 static void mptsas_iport_register(mptsas_t *mpt);
144 144 static int mptsas_smp_setup(mptsas_t *mpt);
145 145 static void mptsas_smp_teardown(mptsas_t *mpt);
146 146 static int mptsas_cache_create(mptsas_t *mpt);
147 147 static void mptsas_cache_destroy(mptsas_t *mpt);
148 148 static int mptsas_alloc_request_frames(mptsas_t *mpt);
149 149 static int mptsas_alloc_sense_bufs(mptsas_t *mpt);
150 150 static int mptsas_alloc_reply_frames(mptsas_t *mpt);
151 151 static int mptsas_alloc_free_queue(mptsas_t *mpt);
152 152 static int mptsas_alloc_post_queue(mptsas_t *mpt);
153 153 static void mptsas_alloc_reply_args(mptsas_t *mpt);
154 154 static int mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
155 155 static void mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
156 156 static int mptsas_init_chip(mptsas_t *mpt, int first_time);
157 157
158 158 /*
159 159 * SCSA function prototypes
160 160 */
161 161 static int mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
162 162 static int mptsas_scsi_reset(struct scsi_address *ap, int level);
163 163 static int mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
164 164 static int mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
165 165 static int mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
166 166 int tgtonly);
167 167 static void mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
168 168 static struct scsi_pkt *mptsas_scsi_init_pkt(struct scsi_address *ap,
169 169 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
170 170 int tgtlen, int flags, int (*callback)(), caddr_t arg);
171 171 static void mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
172 172 static void mptsas_scsi_destroy_pkt(struct scsi_address *ap,
173 173 struct scsi_pkt *pkt);
174 174 static int mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
175 175 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
176 176 static void mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
177 177 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
178 178 static int mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
179 179 void (*callback)(caddr_t), caddr_t arg);
180 180 static int mptsas_get_name(struct scsi_device *sd, char *name, int len);
181 181 static int mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len);
182 182 static int mptsas_scsi_quiesce(dev_info_t *dip);
183 183 static int mptsas_scsi_unquiesce(dev_info_t *dip);
184 184 static int mptsas_bus_config(dev_info_t *pdip, uint_t flags,
185 185 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
186 186
187 187 /*
188 188 * SMP functions
189 189 */
190 190 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
191 191
192 192 /*
193 193 * internal function prototypes.
194 194 */
195 195 static void mptsas_list_add(mptsas_t *mpt);
196 196 static void mptsas_list_del(mptsas_t *mpt);
197 197
198 198 static int mptsas_quiesce_bus(mptsas_t *mpt);
199 199 static int mptsas_unquiesce_bus(mptsas_t *mpt);
200 200
201 201 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
202 202 static void mptsas_free_handshake_msg(mptsas_t *mpt);
203 203
204 204 static void mptsas_ncmds_checkdrain(void *arg);
205 205
206 206 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
207 207 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
208 208 static int mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
209 209 static void mptsas_accept_tx_waitq(mptsas_t *mpt);
210 210
211 211 static int mptsas_do_detach(dev_info_t *dev);
212 212 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
213 213 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
214 214 struct scsi_pkt *pkt);
215 215 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp);
216 216
217 217 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
218 218 static void mptsas_handle_event(void *args);
219 219 static int mptsas_handle_event_sync(void *args);
220 220 static void mptsas_handle_dr(void *args);
221 221 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
222 222 dev_info_t *pdip);
223 223
224 224 static void mptsas_restart_cmd(void *);
225 225
226 226 static void mptsas_flush_hba(mptsas_t *mpt);
227 227 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
228 228 uint8_t tasktype);
229 229 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
230 230 uchar_t reason, uint_t stat);
231 231
232 232 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
233 233 static void mptsas_process_intr(mptsas_t *mpt,
234 234 pMpi2ReplyDescriptorsUnion_t reply_desc_union);
235 235 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
236 236 pMpi2ReplyDescriptorsUnion_t reply_desc);
237 237 static void mptsas_handle_address_reply(mptsas_t *mpt,
238 238 pMpi2ReplyDescriptorsUnion_t reply_desc);
239 239 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
240 240 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
241 241 uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
242 242
243 243 static void mptsas_watch(void *arg);
244 244 static void mptsas_watchsubr(mptsas_t *mpt);
245 245 static void mptsas_cmd_timeout(mptsas_t *mpt, mptsas_target_t *ptgt);
246 246
247 247 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
248 248 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
249 249 uint8_t *data, uint32_t request_size, uint32_t reply_size,
250 250 uint32_t data_size, uint32_t direction, uint8_t *dataout,
251 251 uint32_t dataout_size, short timeout, int mode);
252 252 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
253 253
254 254 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt,
255 255 uint32_t unique_id);
256 256 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd);
257 257 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt,
258 258 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
259 259 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt,
260 260 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
261 261 uint32_t diag_type);
262 262 static int mptsas_diag_register(mptsas_t *mpt,
263 263 mptsas_fw_diag_register_t *diag_register, uint32_t *return_code);
264 264 static int mptsas_diag_unregister(mptsas_t *mpt,
265 265 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
266 266 static int mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
267 267 uint32_t *return_code);
268 268 static int mptsas_diag_read_buffer(mptsas_t *mpt,
269 269 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
270 270 uint32_t *return_code, int ioctl_mode);
271 271 static int mptsas_diag_release(mptsas_t *mpt,
272 272 mptsas_fw_diag_release_t *diag_release, uint32_t *return_code);
273 273 static int mptsas_do_diag_action(mptsas_t *mpt, uint32_t action,
274 274 uint8_t *diag_action, uint32_t length, uint32_t *return_code,
275 275 int ioctl_mode);
276 276 static int mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *data,
277 277 int mode);
278 278
279 279 static int mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
280 280 int cmdlen, int tgtlen, int statuslen, int kf);
281 281 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
282 282
283 283 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
284 284 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
285 285
286 286 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
287 287 int kmflags);
288 288 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
289 289
290 290 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
291 291 mptsas_cmd_t *cmd);
292 292 static void mptsas_check_task_mgt(mptsas_t *mpt,
293 293 pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
294 294 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
295 295 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
296 296 int *resid);
297 297
298 298 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
299 299 static void mptsas_free_active_slots(mptsas_t *mpt);
300 300 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
301 301
302 302 static void mptsas_restart_hba(mptsas_t *mpt);
303 303 static void mptsas_restart_waitq(mptsas_t *mpt);
304 304
305 305 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
306 306 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);
307 307 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
308 308
309 309 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
310 310 static void mptsas_doneq_empty(mptsas_t *mpt);
311 311 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
312 312
313 313 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
314 314 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
315 315 static mptsas_cmd_t *mptsas_tx_waitq_rm(mptsas_t *mpt);
316 316 static void mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
317 317
318 318
319 319 static void mptsas_start_watch_reset_delay();
320 320 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
321 321 static void mptsas_watch_reset_delay(void *arg);
322 322 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
323 323
324 324 /*
325 325 * helper functions
326 326 */
327 327 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
328 328
329 329 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
330 330 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
331 331 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
332 332 int lun);
333 333 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
334 334 int lun);
335 335 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
336 336 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
337 337
338 338 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
339 339 int *lun);
340 340 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
341 341
342 342 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt,
343 343 mptsas_phymask_t phymask, uint8_t phy);
344 344 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt,
345 345 mptsas_phymask_t phymask, uint64_t wwid);
346 346 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt,
347 347 mptsas_phymask_t phymask, uint64_t wwid);
348 348
349 349 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
350 350 uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
351 351
352 352 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
353 353 uint16_t *handle, mptsas_target_t **pptgt);
354 354 static void mptsas_update_phymask(mptsas_t *mpt);
355 355
356 356 static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
357 357 uint32_t *status, uint8_t cmd);
358 358 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
359 359 mptsas_phymask_t *phymask);
360 360 static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr,
361 361 mptsas_phymask_t phymask);
362 362 static int mptsas_flush_led_status(mptsas_t *mpt, mptsas_target_t *ptgt);
363 363
364 364
365 365 /*
366 366 * Enumeration / DR functions
367 367 */
368 368 static void mptsas_config_all(dev_info_t *pdip);
369 369 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
370 370 dev_info_t **lundip);
371 371 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
372 372 dev_info_t **lundip);
373 373
374 374 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
375 375 static int mptsas_offline_target(dev_info_t *pdip, char *name);
376 376
377 377 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
378 378 dev_info_t **dip);
379 379
380 380 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
381 381 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
382 382 dev_info_t **dip, mptsas_target_t *ptgt);
383 383
384 384 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
385 385 dev_info_t **dip, mptsas_target_t *ptgt, int lun);
386 386
387 387 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
388 388 char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
389 389 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
390 390 char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
391 391 int lun);
392 392
393 393 static void mptsas_offline_missed_luns(dev_info_t *pdip,
394 394 uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
395 395 static int mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
396 396 mdi_pathinfo_t *rpip, uint_t flags);
397 397
398 398 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
399 399 dev_info_t **smp_dip);
400 400 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
401 401 uint_t flags);
402 402
403 403 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
404 404 int mode, int *rval);
405 405 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
406 406 int mode, int *rval);
407 407 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
408 408 int mode, int *rval);
409 409 static void mptsas_record_event(void *args);
410 410 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
411 411 int mode);
412 412
413 413 mptsas_target_t *mptsas_tgt_alloc(mptsas_t *, uint16_t, uint64_t,
414 414 uint32_t, mptsas_phymask_t, uint8_t);
415 415 static mptsas_smp_t *mptsas_smp_alloc(mptsas_t *, mptsas_smp_t *);
416 416 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
417 417 dev_info_t **smp_dip);
418 418
419 419 /*
420 420 * Power management functions
421 421 */
422 422 static int mptsas_get_pci_cap(mptsas_t *mpt);
423 423 static int mptsas_init_pm(mptsas_t *mpt);
424 424
425 425 /*
426 426 * MPT MSI tunable:
427 427 *
428 428 * By default MSI is enabled on all supported platforms.
429 429 */
430 430 boolean_t mptsas_enable_msi = B_TRUE;
431 431 boolean_t mptsas_physical_bind_failed_page_83 = B_FALSE;
432 432
433 433 /*
434 434 * Global switch for use of MPI2.5 FAST PATH.
435 435 * We don't really know what FAST PATH actually does, so if it is suspected
436 436 * to cause problems it can be turned off by setting this variable to B_FALSE.
437 437 */
438 438 boolean_t mptsas_use_fastpath = B_TRUE;
439 439
440 440 static int mptsas_register_intrs(mptsas_t *);
441 441 static void mptsas_unregister_intrs(mptsas_t *);
442 442 static int mptsas_add_intrs(mptsas_t *, int);
443 443 static void mptsas_rem_intrs(mptsas_t *);
444 444
445 445 /*
446 446 * FMA Prototypes
447 447 */
448 448 static void mptsas_fm_init(mptsas_t *mpt);
449 449 static void mptsas_fm_fini(mptsas_t *mpt);
450 450 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
451 451
452 452 extern pri_t minclsyspri, maxclsyspri;
453 453
454 454 /*
455 455 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
456 456 * under this device that the paths to a physical device are created when
457 457 * MPxIO is used.
458 458 */
459 459 extern dev_info_t *scsi_vhci_dip;
460 460
461 461 /*
462 462 * Tunable timeout value for Inquiry VPD page 0x83
463 463 * By default the value is 30 seconds.
464 464 */
465 465 int mptsas_inq83_retry_timeout = 30;
466 466
467 467 /*
468 468 * This is used to allocate memory for message frame storage, not for
469 469 * data I/O DMA. All message frames must be stored in the first 4G of
470 470 * physical memory.
471 471 */
472 472 ddi_dma_attr_t mptsas_dma_attrs = {
473 473 DMA_ATTR_V0, /* attribute layout version */
474 474 0x0ull, /* address low - should be 0 (longlong) */
475 475 0xffffffffull, /* address high - 32-bit max range */
476 476 0x00ffffffull, /* count max - max DMA object size */
477 477 4, /* allocation alignment requirements */
478 478 0x78, /* burstsizes - binary encoded values */
479 479 1, /* minxfer - gran. of DMA engine */
480 480 0x00ffffffull, /* maxxfer - gran. of DMA engine */
481 481 0xffffffffull, /* max segment size (DMA boundary) */
482 482 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
483 483 512, /* granularity - device transfer size */
484 484 0 /* flags, set to 0 */
485 485 };
486 486
487 487 /*
488 488 * This is used for data I/O DMA memory allocation. (full 64-bit DMA
489 489 * physical addresses are supported.)
490 490 */
491 491 ddi_dma_attr_t mptsas_dma_attrs64 = {
492 492 DMA_ATTR_V0, /* attribute layout version */
493 493 0x0ull, /* address low - should be 0 (longlong) */
494 494 0xffffffffffffffffull, /* address high - 64-bit max */
495 495 0x00ffffffull, /* count max - max DMA object size */
496 496 4, /* allocation alignment requirements */
497 497 0x78, /* burstsizes - binary encoded values */
498 498 1, /* minxfer - gran. of DMA engine */
499 499 0x00ffffffull, /* maxxfer - gran. of DMA engine */
500 500 0xffffffffull, /* max segment size (DMA boundary) */
501 501 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
502 502 512, /* granularity - device transfer size */
503 503 0 /* flags, set to 0 */
504 504 };
505 505
506 506 ddi_device_acc_attr_t mptsas_dev_attr = {
507 507 DDI_DEVICE_ATTR_V1,
508 508 DDI_STRUCTURE_LE_ACC,
509 509 DDI_STRICTORDER_ACC,
510 510 DDI_DEFAULT_ACC
511 511 };
512 512
513 513 static struct cb_ops mptsas_cb_ops = {
514 514 scsi_hba_open, /* open */
515 515 scsi_hba_close, /* close */
516 516 nodev, /* strategy */
517 517 nodev, /* print */
518 518 nodev, /* dump */
519 519 nodev, /* read */
520 520 nodev, /* write */
521 521 mptsas_ioctl, /* ioctl */
522 522 nodev, /* devmap */
523 523 nodev, /* mmap */
524 524 nodev, /* segmap */
525 525 nochpoll, /* chpoll */
526 526 ddi_prop_op, /* cb_prop_op */
527 527 NULL, /* streamtab */
528 528 D_MP, /* cb_flag */
529 529 CB_REV, /* rev */
530 530 nodev, /* aread */
531 531 nodev /* awrite */
532 532 };
533 533
534 534 static struct dev_ops mptsas_ops = {
535 535 DEVO_REV, /* devo_rev, */
536 536 0, /* refcnt */
537 537 ddi_no_info, /* info */
538 538 nulldev, /* identify */
539 539 nulldev, /* probe */
540 540 mptsas_attach, /* attach */
541 541 mptsas_detach, /* detach */
542 542 #ifdef __sparc
543 543 mptsas_reset,
544 544 #else
545 545 nodev, /* reset */
546 546 #endif /* __sparc */
547 547 &mptsas_cb_ops, /* driver operations */
548 548 NULL, /* bus operations */
549 549 mptsas_power, /* power management */
550 550 #ifdef __sparc
551 551 ddi_quiesce_not_needed
552 552 #else
553 553 mptsas_quiesce /* quiesce */
554 554 #endif /* __sparc */
555 555 };
556 556
557 557
558 558 #define MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24"
559 559
560 560 static struct modldrv modldrv = {
561 561 &mod_driverops, /* Type of module. This one is a driver */
562 562 MPTSAS_MOD_STRING, /* Name of the module. */
563 563 &mptsas_ops, /* driver ops */
564 564 };
565 565
566 566 static struct modlinkage modlinkage = {
567 567 MODREV_1, &modldrv, NULL
568 568 };
569 569 #define TARGET_PROP "target"
570 570 #define LUN_PROP "lun"
571 571 #define LUN64_PROP "lun64"
572 572 #define SAS_PROP "sas-mpt"
573 573 #define MDI_GUID "wwn"
574 574 #define NDI_GUID "guid"
575 575 #define MPTSAS_DEV_GONE "mptsas_dev_gone"
576 576
577 577 /*
578 578 * Local static data
579 579 */
580 580 #if defined(MPTSAS_DEBUG)
581 581 /*
582 582 * Flags to indicate which debug messages are to be printed and which go to the
583 583 * debug log ring buffer. Default is to not print anything, and to log
584 584 * everything except the watchsubr() output which normally happens every second.
585 585 */
586 586 uint32_t mptsas_debugprt_flags = 0x0;
587 587 uint32_t mptsas_debuglog_flags = ~(1U << 30);
588 588 #endif /* defined(MPTSAS_DEBUG) */
589 589 uint32_t mptsas_debug_resets = 0;
590 590
591 591 static kmutex_t mptsas_global_mutex;
592 592 static void *mptsas_state; /* soft state ptr */
593 593 static krwlock_t mptsas_global_rwlock;
594 594
595 595 static kmutex_t mptsas_log_mutex;
596 596 static char mptsas_log_buf[256];
597 597 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex, mptsas_log_buf))
598 598
599 599 static mptsas_t *mptsas_head, *mptsas_tail;
600 600 static clock_t mptsas_scsi_watchdog_tick;
601 601 static clock_t mptsas_tick;
602 602 static timeout_id_t mptsas_reset_watch;
603 603 static timeout_id_t mptsas_timeout_id;
604 604 static int mptsas_timeouts_enabled = 0;
605 605
606 606 /*
607 607 * Default length for extended auto request sense buffers.
608 608 * All sense buffers need to be under the same alloc because there
609 609 * is only one common top 32bits (of 64bits) address register.
610 610 * Most requests only require 32 bytes, but some request >256.
611 611 * We use rmalloc()/rmfree() on this additional memory to manage the
612 612 * "extended" requests.
613 613 */
614 614 int mptsas_extreq_sense_bufsize = 256*64;
615 615
616 616 /*
617 617 * We believe that all software resrictions of having to run with DMA
618 618 * attributes to limit allocation to the first 4G are removed.
619 619 * However, this flag remains to enable quick switchback should suspicious
620 620 * problems emerge.
621 621 * Note that scsi_alloc_consistent_buf() does still adhere to allocating
622 622 * 32 bit addressable memory, but we can cope if that is changed now.
623 623 */
624 624 int mptsas_use_64bit_msgaddr = 1;
625 625
626 626 /*
627 627 * warlock directives
628 628 */
629 629 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \
630 630 mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status))
631 631 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt))
632 632 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
633 633 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private))
634 634 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private))
635 635
636 636 /*
637 637 * SM - HBA statics
638 638 */
639 639 char *mptsas_driver_rev = MPTSAS_MOD_STRING;
640 640
641 641 #ifdef MPTSAS_DEBUG
642 642 void debug_enter(char *);
643 643 #endif
644 644
645 645 /*
646 646 * Notes:
647 647 * - scsi_hba_init(9F) initializes SCSI HBA modules
648 648 * - must call scsi_hba_fini(9F) if modload() fails
649 649 */
650 650 int
651 651 _init(void)
652 652 {
653 653 int status;
654 654 /* CONSTCOND */
655 655 ASSERT(NO_COMPETING_THREADS);
656 656
657 657 NDBG0(("_init"));
658 658
659 659 status = ddi_soft_state_init(&mptsas_state, MPTSAS_SIZE,
660 660 MPTSAS_INITIAL_SOFT_SPACE);
661 661 if (status != 0) {
662 662 return (status);
663 663 }
664 664
665 665 if ((status = scsi_hba_init(&modlinkage)) != 0) {
666 666 ddi_soft_state_fini(&mptsas_state);
667 667 return (status);
668 668 }
669 669
670 670 mutex_init(&mptsas_global_mutex, NULL, MUTEX_DRIVER, NULL);
671 671 rw_init(&mptsas_global_rwlock, NULL, RW_DRIVER, NULL);
672 672 mutex_init(&mptsas_log_mutex, NULL, MUTEX_DRIVER, NULL);
673 673
674 674 if ((status = mod_install(&modlinkage)) != 0) {
675 675 mutex_destroy(&mptsas_log_mutex);
676 676 rw_destroy(&mptsas_global_rwlock);
677 677 mutex_destroy(&mptsas_global_mutex);
678 678 ddi_soft_state_fini(&mptsas_state);
679 679 scsi_hba_fini(&modlinkage);
680 680 }
681 681
682 682 return (status);
683 683 }
684 684
685 685 /*
686 686 * Notes:
687 687 * - scsi_hba_fini(9F) uninitializes SCSI HBA modules
688 688 */
689 689 int
690 690 _fini(void)
691 691 {
692 692 int status;
693 693 /* CONSTCOND */
694 694 ASSERT(NO_COMPETING_THREADS);
695 695
696 696 NDBG0(("_fini"));
697 697
698 698 if ((status = mod_remove(&modlinkage)) == 0) {
699 699 ddi_soft_state_fini(&mptsas_state);
700 700 scsi_hba_fini(&modlinkage);
701 701 mutex_destroy(&mptsas_global_mutex);
702 702 rw_destroy(&mptsas_global_rwlock);
703 703 mutex_destroy(&mptsas_log_mutex);
704 704 }
705 705 return (status);
706 706 }
707 707
708 708 /*
709 709 * The loadable-module _info(9E) entry point
710 710 */
711 711 int
712 712 _info(struct modinfo *modinfop)
713 713 {
714 714 /* CONSTCOND */
715 715 ASSERT(NO_COMPETING_THREADS);
716 716 NDBG0(("mptsas _info"));
717 717
718 718 return (mod_info(&modlinkage, modinfop));
719 719 }
720 720
721 721 static int
722 722 mptsas_target_eval_devhdl(const void *op, void *arg)
723 723 {
724 724 uint16_t dh = *(uint16_t *)arg;
725 725 const mptsas_target_t *tp = op;
726 726
727 727 return ((int)tp->m_devhdl - (int)dh);
728 728 }
729 729
730 730 static int
731 731 mptsas_target_eval_slot(const void *op, void *arg)
732 732 {
733 733 mptsas_led_control_t *lcp = arg;
734 734 const mptsas_target_t *tp = op;
735 735
736 736 if (tp->m_enclosure != lcp->Enclosure)
737 737 return ((int)tp->m_enclosure - (int)lcp->Enclosure);
738 738
739 739 return ((int)tp->m_slot_num - (int)lcp->Slot);
740 740 }
741 741
742 742 static int
743 743 mptsas_target_eval_nowwn(const void *op, void *arg)
744 744 {
745 745 uint8_t phy = *(uint8_t *)arg;
746 746 const mptsas_target_t *tp = op;
747 747
748 748 if (tp->m_addr.mta_wwn != 0)
749 749 return (-1);
750 750
751 751 return ((int)tp->m_phynum - (int)phy);
752 752 }
753 753
754 754 static int
755 755 mptsas_smp_eval_devhdl(const void *op, void *arg)
756 756 {
757 757 uint16_t dh = *(uint16_t *)arg;
758 758 const mptsas_smp_t *sp = op;
759 759
760 760 return ((int)sp->m_devhdl - (int)dh);
761 761 }
762 762
763 763 static uint64_t
764 764 mptsas_target_addr_hash(const void *tp)
765 765 {
766 766 const mptsas_target_addr_t *tap = tp;
767 767
768 768 return ((tap->mta_wwn & 0xffffffffffffULL) |
769 769 ((uint64_t)tap->mta_phymask << 48));
770 770 }
771 771
772 772 static int
773 773 mptsas_target_addr_cmp(const void *a, const void *b)
774 774 {
775 775 const mptsas_target_addr_t *aap = a;
776 776 const mptsas_target_addr_t *bap = b;
777 777
778 778 if (aap->mta_wwn < bap->mta_wwn)
779 779 return (-1);
780 780 if (aap->mta_wwn > bap->mta_wwn)
781 781 return (1);
782 782 return ((int)bap->mta_phymask - (int)aap->mta_phymask);
783 783 }
784 784
785 785 static void
786 786 mptsas_target_free(void *op)
787 787 {
788 788 kmem_free(op, sizeof (mptsas_target_t));
789 789 }
790 790
791 791 static void
792 792 mptsas_smp_free(void *op)
793 793 {
794 794 kmem_free(op, sizeof (mptsas_smp_t));
795 795 }
796 796
797 797 static void
798 798 mptsas_destroy_hashes(mptsas_t *mpt)
799 799 {
800 800 mptsas_target_t *tp;
801 801 mptsas_smp_t *sp;
802 802
803 803 for (tp = refhash_first(mpt->m_targets); tp != NULL;
804 804 tp = refhash_next(mpt->m_targets, tp)) {
805 805 refhash_remove(mpt->m_targets, tp);
806 806 }
807 807 for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
808 808 sp = refhash_next(mpt->m_smp_targets, sp)) {
809 809 refhash_remove(mpt->m_smp_targets, sp);
810 810 }
811 811 refhash_destroy(mpt->m_targets);
812 812 refhash_destroy(mpt->m_smp_targets);
813 813 mpt->m_targets = NULL;
814 814 mpt->m_smp_targets = NULL;
815 815 }
816 816
817 817 static int
818 818 mptsas_iport_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
819 819 {
820 820 dev_info_t *pdip;
821 821 mptsas_t *mpt;
822 822 scsi_hba_tran_t *hba_tran;
823 823 char *iport = NULL;
824 824 char phymask[MPTSAS_MAX_PHYS];
825 825 mptsas_phymask_t phy_mask = 0;
826 826 int dynamic_port = 0;
827 827 uint32_t page_address;
828 828 char initiator_wwnstr[MPTSAS_WWN_STRLEN];
829 829 int rval = DDI_FAILURE;
830 830 int i = 0;
831 831 uint8_t numphys = 0;
832 832 uint8_t phy_id;
833 833 uint8_t phy_port = 0;
834 834 uint16_t attached_devhdl = 0;
835 835 uint32_t dev_info;
836 836 uint64_t attached_sas_wwn;
837 837 uint16_t dev_hdl;
838 838 uint16_t pdev_hdl;
839 839 uint16_t bay_num, enclosure, io_flags;
840 840 char attached_wwnstr[MPTSAS_WWN_STRLEN];
841 841
842 842 /* CONSTCOND */
843 843 ASSERT(NO_COMPETING_THREADS);
844 844
845 845 switch (cmd) {
846 846 case DDI_ATTACH:
847 847 break;
848 848
849 849 case DDI_RESUME:
850 850 /*
851 851 * If this a scsi-iport node, nothing to do here.
852 852 */
853 853 return (DDI_SUCCESS);
854 854
855 855 default:
856 856 return (DDI_FAILURE);
857 857 }
858 858
859 859 pdip = ddi_get_parent(dip);
860 860
861 861 if ((hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SCSI_DEVICE)) ==
862 862 NULL) {
863 863 cmn_err(CE_WARN, "Failed attach iport because fail to "
864 864 "get tran vector for the HBA node");
865 865 return (DDI_FAILURE);
866 866 }
867 867
868 868 mpt = TRAN2MPT(hba_tran);
869 869 ASSERT(mpt != NULL);
870 870 if (mpt == NULL)
871 871 return (DDI_FAILURE);
872 872
873 873 if ((hba_tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) ==
874 874 NULL) {
875 875 mptsas_log(mpt, CE_WARN, "Failed attach iport because fail to "
876 876 "get tran vector for the iport node");
877 877 return (DDI_FAILURE);
878 878 }
879 879
880 880 /*
881 881 * Overwrite parent's tran_hba_private to iport's tran vector
882 882 */
883 883 hba_tran->tran_hba_private = mpt;
884 884
885 885 ddi_report_dev(dip);
886 886
887 887 /*
888 888 * Get SAS address for initiator port according dev_handle
889 889 */
890 890 iport = ddi_get_name_addr(dip);
891 891 if (iport && strncmp(iport, "v0", 2) == 0) {
892 892 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
893 893 MPTSAS_VIRTUAL_PORT, 1) !=
894 894 DDI_PROP_SUCCESS) {
895 895 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
896 896 MPTSAS_VIRTUAL_PORT);
897 897 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
898 898 "prop update failed");
899 899 return (DDI_FAILURE);
900 900 }
901 901 return (DDI_SUCCESS);
902 902 }
903 903
904 904 mutex_enter(&mpt->m_mutex);
905 905 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
906 906 bzero(phymask, sizeof (phymask));
907 907 (void) sprintf(phymask,
908 908 "%x", mpt->m_phy_info[i].phy_mask);
909 909 if (strcmp(phymask, iport) == 0) {
910 910 break;
911 911 }
912 912 }
913 913
914 914 if (i == MPTSAS_MAX_PHYS) {
915 915 mptsas_log(mpt, CE_WARN, "Failed attach port %s because port"
916 916 "seems not exist", iport);
917 917 mutex_exit(&mpt->m_mutex);
918 918 return (DDI_FAILURE);
919 919 }
920 920
921 921 phy_mask = mpt->m_phy_info[i].phy_mask;
922 922
923 923 if (mpt->m_phy_info[i].port_flags & AUTO_PORT_CONFIGURATION)
924 924 dynamic_port = 1;
925 925 else
926 926 dynamic_port = 0;
927 927
928 928 /*
929 929 * Update PHY info for smhba
930 930 */
931 931 if (mptsas_smhba_phy_init(mpt)) {
932 932 mutex_exit(&mpt->m_mutex);
933 933 mptsas_log(mpt, CE_WARN, "mptsas phy update "
934 934 "failed");
935 935 return (DDI_FAILURE);
936 936 }
937 937
938 938 mutex_exit(&mpt->m_mutex);
939 939
940 940 numphys = 0;
941 941 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
942 942 if ((phy_mask >> i) & 0x01) {
943 943 numphys++;
944 944 }
945 945 }
946 946
947 947 bzero(initiator_wwnstr, sizeof (initiator_wwnstr));
948 948 (void) sprintf(initiator_wwnstr, "w%016"PRIx64,
949 949 mpt->un.m_base_wwid);
950 950
951 951 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
952 952 SCSI_ADDR_PROP_INITIATOR_PORT, initiator_wwnstr) !=
953 953 DDI_PROP_SUCCESS) {
954 954 (void) ddi_prop_remove(DDI_DEV_T_NONE,
955 955 dip, SCSI_ADDR_PROP_INITIATOR_PORT);
956 956 mptsas_log(mpt, CE_WARN, "mptsas Initiator port "
957 957 "prop update failed");
958 958 return (DDI_FAILURE);
959 959 }
960 960 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
961 961 MPTSAS_NUM_PHYS, numphys) !=
962 962 DDI_PROP_SUCCESS) {
963 963 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, MPTSAS_NUM_PHYS);
964 964 return (DDI_FAILURE);
965 965 }
966 966
967 967 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
968 968 "phymask", phy_mask) !=
969 969 DDI_PROP_SUCCESS) {
970 970 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "phymask");
971 971 mptsas_log(mpt, CE_WARN, "mptsas phy mask "
972 972 "prop update failed");
973 973 return (DDI_FAILURE);
974 974 }
975 975
976 976 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
977 977 "dynamic-port", dynamic_port) !=
978 978 DDI_PROP_SUCCESS) {
979 979 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "dynamic-port");
980 980 mptsas_log(mpt, CE_WARN, "mptsas dynamic port "
981 981 "prop update failed");
982 982 return (DDI_FAILURE);
983 983 }
984 984 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
985 985 MPTSAS_VIRTUAL_PORT, 0) !=
986 986 DDI_PROP_SUCCESS) {
987 987 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
988 988 MPTSAS_VIRTUAL_PORT);
989 989 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
990 990 "prop update failed");
991 991 return (DDI_FAILURE);
992 992 }
993 993 mptsas_smhba_set_all_phy_props(mpt, dip, numphys, phy_mask,
994 994 &attached_devhdl);
995 995
996 996 mutex_enter(&mpt->m_mutex);
997 997 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
998 998 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)attached_devhdl;
999 999 rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
1000 1000 &attached_sas_wwn, &dev_info, &phy_port, &phy_id,
1001 1001 &pdev_hdl, &bay_num, &enclosure, &io_flags);
1002 1002 if (rval != DDI_SUCCESS) {
1003 1003 mptsas_log(mpt, CE_WARN,
1004 1004 "Failed to get device page0 for handle:%d",
1005 1005 attached_devhdl);
1006 1006 mutex_exit(&mpt->m_mutex);
1007 1007 return (DDI_FAILURE);
1008 1008 }
1009 1009
1010 1010 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1011 1011 bzero(phymask, sizeof (phymask));
1012 1012 (void) sprintf(phymask, "%x", mpt->m_phy_info[i].phy_mask);
1013 1013 if (strcmp(phymask, iport) == 0) {
1014 1014 (void) sprintf(&mpt->m_phy_info[i].smhba_info.path[0],
1015 1015 "%x",
1016 1016 mpt->m_phy_info[i].phy_mask);
1017 1017 }
1018 1018 }
1019 1019 mutex_exit(&mpt->m_mutex);
1020 1020
1021 1021 bzero(attached_wwnstr, sizeof (attached_wwnstr));
1022 1022 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
1023 1023 attached_sas_wwn);
1024 1024 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
1025 1025 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
1026 1026 DDI_PROP_SUCCESS) {
1027 1027 (void) ddi_prop_remove(DDI_DEV_T_NONE,
1028 1028 dip, SCSI_ADDR_PROP_ATTACHED_PORT);
1029 1029 return (DDI_FAILURE);
1030 1030 }
1031 1031
1032 1032 /* Create kstats for each phy on this iport */
1033 1033
1034 1034 mptsas_create_phy_stats(mpt, iport, dip);
1035 1035
1036 1036 /*
1037 1037 * register sas hba iport with mdi (MPxIO/vhci)
1038 1038 */
1039 1039 if (mdi_phci_register(MDI_HCI_CLASS_SCSI,
1040 1040 dip, 0) == MDI_SUCCESS) {
1041 1041 mpt->m_mpxio_enable = TRUE;
1042 1042 }
1043 1043 return (DDI_SUCCESS);
1044 1044 }
1045 1045
1046 1046 /*
1047 1047 * Notes:
1048 1048 * Set up all device state and allocate data structures,
1049 1049 * mutexes, condition variables, etc. for device operation.
1050 1050 * Add interrupts needed.
1051 1051 * Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
1052 1052 */
1053 1053 static int
1054 1054 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1055 1055 {
↓ open down ↓ |
1055 lines elided |
↑ open up ↑ |
1056 1056 mptsas_t *mpt = NULL;
1057 1057 int instance, i, j;
1058 1058 int doneq_thread_num;
1059 1059 char intr_added = 0;
1060 1060 char map_setup = 0;
1061 1061 char config_setup = 0;
1062 1062 char hba_attach_setup = 0;
1063 1063 char smp_attach_setup = 0;
1064 1064 char mutex_init_done = 0;
1065 1065 char event_taskq_create = 0;
1066 + char reset_taskq_create = 0;
1066 1067 char dr_taskq_create = 0;
1067 1068 char doneq_thread_create = 0;
1068 1069 char added_watchdog = 0;
1069 1070 scsi_hba_tran_t *hba_tran;
1070 1071 uint_t mem_bar = MEM_SPACE;
1071 1072 int rval = DDI_FAILURE;
1072 1073
1073 1074 /* CONSTCOND */
1074 1075 ASSERT(NO_COMPETING_THREADS);
1075 1076
1076 1077 if (scsi_hba_iport_unit_address(dip)) {
1077 1078 return (mptsas_iport_attach(dip, cmd));
1078 1079 }
1079 1080
1080 1081 switch (cmd) {
1081 1082 case DDI_ATTACH:
1082 1083 break;
1083 1084
1084 1085 case DDI_RESUME:
1085 1086 if ((hba_tran = ddi_get_driver_private(dip)) == NULL)
1086 1087 return (DDI_FAILURE);
1087 1088
1088 1089 mpt = TRAN2MPT(hba_tran);
1089 1090
1090 1091 if (!mpt) {
1091 1092 return (DDI_FAILURE);
1092 1093 }
1093 1094
1094 1095 /*
1095 1096 * Reset hardware and softc to "no outstanding commands"
1096 1097 * Note that a check condition can result on first command
1097 1098 * to a target.
1098 1099 */
1099 1100 mutex_enter(&mpt->m_mutex);
1100 1101
1101 1102 /*
1102 1103 * raise power.
1103 1104 */
1104 1105 if (mpt->m_options & MPTSAS_OPT_PM) {
1105 1106 mutex_exit(&mpt->m_mutex);
1106 1107 (void) pm_busy_component(dip, 0);
1107 1108 rval = pm_power_has_changed(dip, 0, PM_LEVEL_D0);
1108 1109 if (rval == DDI_SUCCESS) {
1109 1110 mutex_enter(&mpt->m_mutex);
1110 1111 } else {
1111 1112 /*
1112 1113 * The pm_raise_power() call above failed,
1113 1114 * and that can only occur if we were unable
1114 1115 * to reset the hardware. This is probably
1115 1116 * due to unhealty hardware, and because
1116 1117 * important filesystems(such as the root
1117 1118 * filesystem) could be on the attached disks,
1118 1119 * it would not be a good idea to continue,
1119 1120 * as we won't be entirely certain we are
1120 1121 * writing correct data. So we panic() here
1121 1122 * to not only prevent possible data corruption,
1122 1123 * but to give developers or end users a hope
1123 1124 * of identifying and correcting any problems.
1124 1125 */
1125 1126 fm_panic("mptsas could not reset hardware "
1126 1127 "during resume");
1127 1128 }
1128 1129 }
1129 1130
1130 1131 mpt->m_suspended = 0;
1131 1132
1132 1133 /*
1133 1134 * Reinitialize ioc
1134 1135 */
1135 1136 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1136 1137 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
1137 1138 mutex_exit(&mpt->m_mutex);
1138 1139 if (mpt->m_options & MPTSAS_OPT_PM) {
1139 1140 (void) pm_idle_component(dip, 0);
1140 1141 }
1141 1142 fm_panic("mptsas init chip fail during resume");
1142 1143 }
1143 1144 /*
1144 1145 * mptsas_update_driver_data needs interrupts so enable them
1145 1146 * first.
1146 1147 */
1147 1148 MPTSAS_ENABLE_INTR(mpt);
1148 1149 mptsas_update_driver_data(mpt);
1149 1150
1150 1151 /* start requests, if possible */
1151 1152 mptsas_restart_hba(mpt);
1152 1153
1153 1154 mutex_exit(&mpt->m_mutex);
1154 1155
1155 1156 /*
1156 1157 * Restart watch thread
1157 1158 */
1158 1159 mutex_enter(&mptsas_global_mutex);
1159 1160 if (mptsas_timeout_id == 0) {
1160 1161 mptsas_timeout_id = timeout(mptsas_watch, NULL,
1161 1162 mptsas_tick);
1162 1163 mptsas_timeouts_enabled = 1;
1163 1164 }
1164 1165 mutex_exit(&mptsas_global_mutex);
1165 1166
1166 1167 /* report idle status to pm framework */
1167 1168 if (mpt->m_options & MPTSAS_OPT_PM) {
1168 1169 (void) pm_idle_component(dip, 0);
1169 1170 }
1170 1171
1171 1172 return (DDI_SUCCESS);
1172 1173
1173 1174 default:
1174 1175 return (DDI_FAILURE);
1175 1176
1176 1177 }
1177 1178
1178 1179 instance = ddi_get_instance(dip);
1179 1180
1180 1181 /*
1181 1182 * Allocate softc information.
1182 1183 */
1183 1184 if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
1184 1185 mptsas_log(NULL, CE_WARN,
1185 1186 "mptsas%d: cannot allocate soft state", instance);
1186 1187 goto fail;
1187 1188 }
1188 1189
1189 1190 mpt = ddi_get_soft_state(mptsas_state, instance);
1190 1191
1191 1192 if (mpt == NULL) {
1192 1193 mptsas_log(NULL, CE_WARN,
1193 1194 "mptsas%d: cannot get soft state", instance);
1194 1195 goto fail;
1195 1196 }
1196 1197
1197 1198 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
1198 1199 scsi_size_clean(dip);
1199 1200
1200 1201 mpt->m_dip = dip;
1201 1202 mpt->m_instance = instance;
1202 1203
1203 1204 /* Make a per-instance copy of the structures */
1204 1205 mpt->m_io_dma_attr = mptsas_dma_attrs64;
1205 1206 if (mptsas_use_64bit_msgaddr) {
1206 1207 mpt->m_msg_dma_attr = mptsas_dma_attrs64;
1207 1208 } else {
1208 1209 mpt->m_msg_dma_attr = mptsas_dma_attrs;
1209 1210 }
1210 1211 mpt->m_reg_acc_attr = mptsas_dev_attr;
1211 1212 mpt->m_dev_acc_attr = mptsas_dev_attr;
1212 1213
1213 1214 /*
1214 1215 * Size of individual request sense buffer
1215 1216 */
1216 1217 mpt->m_req_sense_size = EXTCMDS_STATUS_SIZE;
1217 1218
1218 1219 /*
1219 1220 * Initialize FMA
1220 1221 */
1221 1222 mpt->m_fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, mpt->m_dip,
1222 1223 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
1223 1224 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1224 1225 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1225 1226
1226 1227 mptsas_fm_init(mpt);
1227 1228
1228 1229 if (mptsas_alloc_handshake_msg(mpt,
1229 1230 sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) {
1230 1231 mptsas_log(mpt, CE_WARN, "cannot initialize handshake msg.");
1231 1232 goto fail;
1232 1233 }
1233 1234
1234 1235 /*
1235 1236 * Setup configuration space
1236 1237 */
1237 1238 if (mptsas_config_space_init(mpt) == FALSE) {
1238 1239 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init failed");
1239 1240 goto fail;
1240 1241 }
1241 1242 config_setup++;
1242 1243
1243 1244 if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&mpt->m_reg,
1244 1245 0, 0, &mpt->m_reg_acc_attr, &mpt->m_datap) != DDI_SUCCESS) {
1245 1246 mptsas_log(mpt, CE_WARN, "map setup failed");
1246 1247 goto fail;
1247 1248 }
1248 1249 map_setup++;
1249 1250
1250 1251 /*
1251 1252 * A taskq is created for dealing with the event handler
1252 1253 */
1253 1254 if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
1254 1255 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1255 1256 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
1256 1257 goto fail;
1257 1258 }
1258 1259 event_taskq_create++;
1259 1260
1260 1261 /*
1261 1262 * A taskq is created for dealing with dr events
↓ open down ↓ |
186 lines elided |
↑ open up ↑ |
1262 1263 */
1263 1264 if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1264 1265 "mptsas_dr_taskq",
1265 1266 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1266 1267 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1267 1268 "failed");
1268 1269 goto fail;
1269 1270 }
1270 1271 dr_taskq_create++;
1271 1272
1273 + /*
1274 + * A taskq is created for dealing with reset events
1275 + */
1276 + if ((mpt->m_reset_taskq = ddi_taskq_create(dip,
1277 + "mptsas_reset_taskq",
1278 + 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1279 + mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for reset "
1280 + "failed");
1281 + goto fail;
1282 + }
1283 + reset_taskq_create++;
1284 +
1272 1285 mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1273 1286 0, "mptsas_doneq_thread_threshold_prop", 10);
1274 1287 mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1275 1288 0, "mptsas_doneq_length_threshold_prop", 8);
1276 1289 mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1277 1290 0, "mptsas_doneq_thread_n_prop", 8);
1278 1291
1279 1292 if (mpt->m_doneq_thread_n) {
1280 1293 cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1281 1294 mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1282 1295
1283 1296 mutex_enter(&mpt->m_doneq_mutex);
1284 1297 mpt->m_doneq_thread_id =
1285 1298 kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1286 1299 * mpt->m_doneq_thread_n, KM_SLEEP);
1287 1300
1288 1301 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1289 1302 cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1290 1303 CV_DRIVER, NULL);
1291 1304 mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1292 1305 MUTEX_DRIVER, NULL);
1293 1306 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1294 1307 mpt->m_doneq_thread_id[j].flag |=
1295 1308 MPTSAS_DONEQ_THREAD_ACTIVE;
1296 1309 mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1297 1310 mpt->m_doneq_thread_id[j].arg.t = j;
1298 1311 mpt->m_doneq_thread_id[j].threadp =
1299 1312 thread_create(NULL, 0, mptsas_doneq_thread,
1300 1313 &mpt->m_doneq_thread_id[j].arg,
1301 1314 0, &p0, TS_RUN, minclsyspri);
1302 1315 mpt->m_doneq_thread_id[j].donetail =
1303 1316 &mpt->m_doneq_thread_id[j].doneq;
1304 1317 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1305 1318 }
1306 1319 mutex_exit(&mpt->m_doneq_mutex);
1307 1320 doneq_thread_create++;
1308 1321 }
1309 1322
1310 1323 /*
1311 1324 * Disable hardware interrupt since we're not ready to
1312 1325 * handle it yet.
1313 1326 */
1314 1327 MPTSAS_DISABLE_INTR(mpt);
1315 1328 if (mptsas_register_intrs(mpt) == FALSE)
1316 1329 goto fail;
1317 1330 intr_added++;
1318 1331
1319 1332 /* Initialize mutex used in interrupt handler */
1320 1333 mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1321 1334 DDI_INTR_PRI(mpt->m_intr_pri));
1322 1335 mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1323 1336 mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1324 1337 DDI_INTR_PRI(mpt->m_intr_pri));
1325 1338 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1326 1339 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1327 1340 NULL, MUTEX_DRIVER,
1328 1341 DDI_INTR_PRI(mpt->m_intr_pri));
1329 1342 }
1330 1343
1331 1344 cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1332 1345 cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1333 1346 cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1334 1347 cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1335 1348 cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1336 1349 mutex_init_done++;
1337 1350
1338 1351 mutex_enter(&mpt->m_mutex);
1339 1352 /*
1340 1353 * Initialize power management component
1341 1354 */
1342 1355 if (mpt->m_options & MPTSAS_OPT_PM) {
1343 1356 if (mptsas_init_pm(mpt)) {
1344 1357 mutex_exit(&mpt->m_mutex);
1345 1358 mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1346 1359 "failed");
1347 1360 goto fail;
1348 1361 }
1349 1362 }
1350 1363
1351 1364 /*
1352 1365 * Initialize chip using Message Unit Reset, if allowed
1353 1366 */
1354 1367 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1355 1368 if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1356 1369 mutex_exit(&mpt->m_mutex);
1357 1370 mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1358 1371 goto fail;
1359 1372 }
1360 1373
1361 1374 mpt->m_targets = refhash_create(MPTSAS_TARGET_BUCKET_COUNT,
1362 1375 mptsas_target_addr_hash, mptsas_target_addr_cmp,
1363 1376 mptsas_target_free, sizeof (mptsas_target_t),
1364 1377 offsetof(mptsas_target_t, m_link),
1365 1378 offsetof(mptsas_target_t, m_addr), KM_SLEEP);
1366 1379
1367 1380 /*
1368 1381 * Fill in the phy_info structure and get the base WWID
1369 1382 */
1370 1383 if (mptsas_get_manufacture_page5(mpt) == DDI_FAILURE) {
1371 1384 mptsas_log(mpt, CE_WARN,
1372 1385 "mptsas_get_manufacture_page5 failed!");
1373 1386 goto fail;
1374 1387 }
1375 1388
1376 1389 if (mptsas_get_sas_io_unit_page_hndshk(mpt)) {
1377 1390 mptsas_log(mpt, CE_WARN,
1378 1391 "mptsas_get_sas_io_unit_page_hndshk failed!");
1379 1392 goto fail;
1380 1393 }
1381 1394
1382 1395 if (mptsas_get_manufacture_page0(mpt) == DDI_FAILURE) {
1383 1396 mptsas_log(mpt, CE_WARN,
1384 1397 "mptsas_get_manufacture_page0 failed!");
1385 1398 goto fail;
1386 1399 }
1387 1400
1388 1401 mutex_exit(&mpt->m_mutex);
1389 1402
1390 1403 /*
1391 1404 * Register the iport for multiple port HBA
1392 1405 */
1393 1406 mptsas_iport_register(mpt);
1394 1407
1395 1408 /*
1396 1409 * initialize SCSI HBA transport structure
1397 1410 */
1398 1411 if (mptsas_hba_setup(mpt) == FALSE)
1399 1412 goto fail;
1400 1413 hba_attach_setup++;
1401 1414
1402 1415 if (mptsas_smp_setup(mpt) == FALSE)
1403 1416 goto fail;
1404 1417 smp_attach_setup++;
1405 1418
1406 1419 if (mptsas_cache_create(mpt) == FALSE)
1407 1420 goto fail;
1408 1421
1409 1422 mpt->m_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1410 1423 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
1411 1424 if (mpt->m_scsi_reset_delay == 0) {
1412 1425 mptsas_log(mpt, CE_NOTE,
1413 1426 "scsi_reset_delay of 0 is not recommended,"
1414 1427 " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1415 1428 mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1416 1429 }
1417 1430
1418 1431 /*
1419 1432 * Initialize the wait and done FIFO queue
1420 1433 */
1421 1434 mpt->m_donetail = &mpt->m_doneq;
1422 1435 mpt->m_waitqtail = &mpt->m_waitq;
1423 1436 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
1424 1437 mpt->m_tx_draining = 0;
1425 1438
1426 1439 /*
1427 1440 * ioc cmd queue initialize
1428 1441 */
1429 1442 mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1430 1443 mpt->m_dev_handle = 0xFFFF;
1431 1444
1432 1445 MPTSAS_ENABLE_INTR(mpt);
1433 1446
1434 1447 /*
1435 1448 * enable event notification
1436 1449 */
1437 1450 mutex_enter(&mpt->m_mutex);
1438 1451 if (mptsas_ioc_enable_event_notification(mpt)) {
1439 1452 mutex_exit(&mpt->m_mutex);
1440 1453 goto fail;
1441 1454 }
1442 1455 mutex_exit(&mpt->m_mutex);
1443 1456
1444 1457 /*
1445 1458 * used for mptsas_watch
1446 1459 */
1447 1460 mptsas_list_add(mpt);
1448 1461
1449 1462 mutex_enter(&mptsas_global_mutex);
1450 1463 if (mptsas_timeouts_enabled == 0) {
1451 1464 mptsas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
1452 1465 dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
1453 1466
1454 1467 mptsas_tick = mptsas_scsi_watchdog_tick *
1455 1468 drv_usectohz((clock_t)1000000);
1456 1469
1457 1470 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
1458 1471 mptsas_timeouts_enabled = 1;
1459 1472 }
1460 1473 mutex_exit(&mptsas_global_mutex);
1461 1474 added_watchdog++;
1462 1475
1463 1476 /*
1464 1477 * Initialize PHY info for smhba.
1465 1478 * This requires watchdog to be enabled otherwise if interrupts
1466 1479 * don't work the system will hang.
1467 1480 */
1468 1481 if (mptsas_smhba_setup(mpt)) {
1469 1482 mptsas_log(mpt, CE_WARN, "mptsas phy initialization "
1470 1483 "failed");
1471 1484 goto fail;
1472 1485 }
1473 1486
1474 1487 /* Check all dma handles allocated in attach */
1475 1488 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl)
1476 1489 != DDI_SUCCESS) ||
1477 1490 (mptsas_check_dma_handle(mpt->m_dma_req_sense_hdl)
1478 1491 != DDI_SUCCESS) ||
1479 1492 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl)
1480 1493 != DDI_SUCCESS) ||
1481 1494 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl)
1482 1495 != DDI_SUCCESS) ||
1483 1496 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl)
1484 1497 != DDI_SUCCESS) ||
1485 1498 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl)
1486 1499 != DDI_SUCCESS)) {
1487 1500 goto fail;
1488 1501 }
1489 1502
1490 1503 /* Check all acc handles allocated in attach */
1491 1504 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
1492 1505 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl)
1493 1506 != DDI_SUCCESS) ||
1494 1507 (mptsas_check_acc_handle(mpt->m_acc_req_sense_hdl)
1495 1508 != DDI_SUCCESS) ||
1496 1509 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl)
1497 1510 != DDI_SUCCESS) ||
1498 1511 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl)
1499 1512 != DDI_SUCCESS) ||
1500 1513 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl)
1501 1514 != DDI_SUCCESS) ||
1502 1515 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl)
1503 1516 != DDI_SUCCESS) ||
1504 1517 (mptsas_check_acc_handle(mpt->m_config_handle)
1505 1518 != DDI_SUCCESS)) {
1506 1519 goto fail;
1507 1520 }
1508 1521
1509 1522 /*
1510 1523 * After this point, we are not going to fail the attach.
1511 1524 */
1512 1525
1513 1526 /* Print message of HBA present */
1514 1527 ddi_report_dev(dip);
1515 1528
1516 1529 /* report idle status to pm framework */
1517 1530 if (mpt->m_options & MPTSAS_OPT_PM) {
1518 1531 (void) pm_idle_component(dip, 0);
1519 1532 }
1520 1533
1521 1534 return (DDI_SUCCESS);
1522 1535
1523 1536 fail:
1524 1537 mptsas_log(mpt, CE_WARN, "attach failed");
1525 1538 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
1526 1539 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
1527 1540 if (mpt) {
1528 1541 /* deallocate in reverse order */
1529 1542 if (added_watchdog) {
1530 1543 mptsas_list_del(mpt);
1531 1544 mutex_enter(&mptsas_global_mutex);
1532 1545
1533 1546 if (mptsas_timeout_id && (mptsas_head == NULL)) {
1534 1547 timeout_id_t tid = mptsas_timeout_id;
1535 1548 mptsas_timeouts_enabled = 0;
1536 1549 mptsas_timeout_id = 0;
1537 1550 mutex_exit(&mptsas_global_mutex);
1538 1551 (void) untimeout(tid);
1539 1552 mutex_enter(&mptsas_global_mutex);
1540 1553 }
1541 1554 mutex_exit(&mptsas_global_mutex);
1542 1555 }
1543 1556
1544 1557 mptsas_cache_destroy(mpt);
1545 1558
1546 1559 if (smp_attach_setup) {
1547 1560 mptsas_smp_teardown(mpt);
1548 1561 }
1549 1562 if (hba_attach_setup) {
1550 1563 mptsas_hba_teardown(mpt);
1551 1564 }
1552 1565
1553 1566 if (mpt->m_targets)
1554 1567 refhash_destroy(mpt->m_targets);
1555 1568 if (mpt->m_smp_targets)
1556 1569 refhash_destroy(mpt->m_smp_targets);
1557 1570
1558 1571 if (mpt->m_active) {
1559 1572 mptsas_free_active_slots(mpt);
1560 1573 }
1561 1574 if (intr_added) {
1562 1575 mptsas_unregister_intrs(mpt);
1563 1576 }
1564 1577
1565 1578 if (doneq_thread_create) {
1566 1579 mutex_enter(&mpt->m_doneq_mutex);
1567 1580 doneq_thread_num = mpt->m_doneq_thread_n;
1568 1581 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1569 1582 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1570 1583 mpt->m_doneq_thread_id[j].flag &=
1571 1584 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1572 1585 cv_signal(&mpt->m_doneq_thread_id[j].cv);
1573 1586 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1574 1587 }
1575 1588 while (mpt->m_doneq_thread_n) {
1576 1589 cv_wait(&mpt->m_doneq_thread_cv,
1577 1590 &mpt->m_doneq_mutex);
1578 1591 }
1579 1592 for (j = 0; j < doneq_thread_num; j++) {
1580 1593 cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1581 1594 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1582 1595 }
1583 1596 kmem_free(mpt->m_doneq_thread_id,
1584 1597 sizeof (mptsas_doneq_thread_list_t)
1585 1598 * doneq_thread_num);
↓ open down ↓ |
304 lines elided |
↑ open up ↑ |
1586 1599 mutex_exit(&mpt->m_doneq_mutex);
1587 1600 cv_destroy(&mpt->m_doneq_thread_cv);
1588 1601 mutex_destroy(&mpt->m_doneq_mutex);
1589 1602 }
1590 1603 if (event_taskq_create) {
1591 1604 ddi_taskq_destroy(mpt->m_event_taskq);
1592 1605 }
1593 1606 if (dr_taskq_create) {
1594 1607 ddi_taskq_destroy(mpt->m_dr_taskq);
1595 1608 }
1609 + if (reset_taskq_create) {
1610 + ddi_taskq_destroy(mpt->m_reset_taskq);
1611 + }
1596 1612 if (mutex_init_done) {
1597 1613 mutex_destroy(&mpt->m_tx_waitq_mutex);
1598 1614 mutex_destroy(&mpt->m_passthru_mutex);
1599 1615 mutex_destroy(&mpt->m_mutex);
1600 1616 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1601 1617 mutex_destroy(
1602 1618 &mpt->m_phy_info[i].smhba_info.phy_mutex);
1603 1619 }
1604 1620 cv_destroy(&mpt->m_cv);
1605 1621 cv_destroy(&mpt->m_passthru_cv);
1606 1622 cv_destroy(&mpt->m_fw_cv);
1607 1623 cv_destroy(&mpt->m_config_cv);
1608 1624 cv_destroy(&mpt->m_fw_diag_cv);
1609 1625 }
1610 1626
1611 1627 if (map_setup) {
1612 1628 mptsas_cfg_fini(mpt);
1613 1629 }
1614 1630 if (config_setup) {
1615 1631 mptsas_config_space_fini(mpt);
1616 1632 }
1617 1633 mptsas_free_handshake_msg(mpt);
1618 1634 mptsas_hba_fini(mpt);
1619 1635
1620 1636 mptsas_fm_fini(mpt);
1621 1637 ddi_soft_state_free(mptsas_state, instance);
1622 1638 ddi_prop_remove_all(dip);
1623 1639 }
1624 1640 return (DDI_FAILURE);
1625 1641 }
1626 1642
1627 1643 static int
1628 1644 mptsas_suspend(dev_info_t *devi)
1629 1645 {
1630 1646 mptsas_t *mpt, *g;
1631 1647 scsi_hba_tran_t *tran;
1632 1648
1633 1649 if (scsi_hba_iport_unit_address(devi)) {
1634 1650 return (DDI_SUCCESS);
1635 1651 }
1636 1652
1637 1653 if ((tran = ddi_get_driver_private(devi)) == NULL)
1638 1654 return (DDI_SUCCESS);
1639 1655
1640 1656 mpt = TRAN2MPT(tran);
1641 1657 if (!mpt) {
1642 1658 return (DDI_SUCCESS);
1643 1659 }
1644 1660
1645 1661 mutex_enter(&mpt->m_mutex);
1646 1662
1647 1663 if (mpt->m_suspended++) {
1648 1664 mutex_exit(&mpt->m_mutex);
1649 1665 return (DDI_SUCCESS);
1650 1666 }
1651 1667
1652 1668 /*
1653 1669 * Cancel timeout threads for this mpt
1654 1670 */
1655 1671 if (mpt->m_quiesce_timeid) {
1656 1672 timeout_id_t tid = mpt->m_quiesce_timeid;
1657 1673 mpt->m_quiesce_timeid = 0;
1658 1674 mutex_exit(&mpt->m_mutex);
1659 1675 (void) untimeout(tid);
1660 1676 mutex_enter(&mpt->m_mutex);
1661 1677 }
1662 1678
1663 1679 if (mpt->m_restart_cmd_timeid) {
1664 1680 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1665 1681 mpt->m_restart_cmd_timeid = 0;
1666 1682 mutex_exit(&mpt->m_mutex);
1667 1683 (void) untimeout(tid);
1668 1684 mutex_enter(&mpt->m_mutex);
1669 1685 }
1670 1686
1671 1687 mutex_exit(&mpt->m_mutex);
1672 1688
1673 1689 (void) pm_idle_component(mpt->m_dip, 0);
1674 1690
1675 1691 /*
1676 1692 * Cancel watch threads if all mpts suspended
1677 1693 */
1678 1694 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1679 1695 for (g = mptsas_head; g != NULL; g = g->m_next) {
1680 1696 if (!g->m_suspended)
1681 1697 break;
1682 1698 }
1683 1699 rw_exit(&mptsas_global_rwlock);
1684 1700
1685 1701 mutex_enter(&mptsas_global_mutex);
1686 1702 if (g == NULL) {
1687 1703 timeout_id_t tid;
1688 1704
1689 1705 mptsas_timeouts_enabled = 0;
1690 1706 if (mptsas_timeout_id) {
1691 1707 tid = mptsas_timeout_id;
1692 1708 mptsas_timeout_id = 0;
1693 1709 mutex_exit(&mptsas_global_mutex);
1694 1710 (void) untimeout(tid);
1695 1711 mutex_enter(&mptsas_global_mutex);
1696 1712 }
1697 1713 if (mptsas_reset_watch) {
1698 1714 tid = mptsas_reset_watch;
1699 1715 mptsas_reset_watch = 0;
1700 1716 mutex_exit(&mptsas_global_mutex);
1701 1717 (void) untimeout(tid);
1702 1718 mutex_enter(&mptsas_global_mutex);
1703 1719 }
1704 1720 }
1705 1721 mutex_exit(&mptsas_global_mutex);
1706 1722
1707 1723 mutex_enter(&mpt->m_mutex);
1708 1724
1709 1725 /*
1710 1726 * If this mpt is not in full power(PM_LEVEL_D0), just return.
1711 1727 */
1712 1728 if ((mpt->m_options & MPTSAS_OPT_PM) &&
1713 1729 (mpt->m_power_level != PM_LEVEL_D0)) {
1714 1730 mutex_exit(&mpt->m_mutex);
1715 1731 return (DDI_SUCCESS);
1716 1732 }
1717 1733
↓ open down ↓ |
112 lines elided |
↑ open up ↑ |
1718 1734 /* Disable HBA interrupts in hardware */
1719 1735 MPTSAS_DISABLE_INTR(mpt);
1720 1736 /*
1721 1737 * Send RAID action system shutdown to sync IR
1722 1738 */
1723 1739 mptsas_raid_action_system_shutdown(mpt);
1724 1740
1725 1741 mutex_exit(&mpt->m_mutex);
1726 1742
1727 1743 /* drain the taskq */
1744 + ddi_taskq_wait(mpt->m_reset_taskq);
1728 1745 ddi_taskq_wait(mpt->m_event_taskq);
1729 1746 ddi_taskq_wait(mpt->m_dr_taskq);
1730 1747
1731 1748 return (DDI_SUCCESS);
1732 1749 }
1733 1750
1734 1751 #ifdef __sparc
1735 1752 /*ARGSUSED*/
1736 1753 static int
1737 1754 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd)
1738 1755 {
1739 1756 mptsas_t *mpt;
1740 1757 scsi_hba_tran_t *tran;
1741 1758
1742 1759 /*
1743 1760 * If this call is for iport, just return.
1744 1761 */
1745 1762 if (scsi_hba_iport_unit_address(devi))
1746 1763 return (DDI_SUCCESS);
1747 1764
1748 1765 if ((tran = ddi_get_driver_private(devi)) == NULL)
1749 1766 return (DDI_SUCCESS);
1750 1767
1751 1768 if ((mpt = TRAN2MPT(tran)) == NULL)
1752 1769 return (DDI_SUCCESS);
1753 1770
1754 1771 /*
1755 1772 * Send RAID action system shutdown to sync IR. Disable HBA
1756 1773 * interrupts in hardware first.
1757 1774 */
1758 1775 MPTSAS_DISABLE_INTR(mpt);
1759 1776 mptsas_raid_action_system_shutdown(mpt);
1760 1777
1761 1778 return (DDI_SUCCESS);
1762 1779 }
1763 1780 #else /* __sparc */
1764 1781 /*
1765 1782 * quiesce(9E) entry point.
1766 1783 *
1767 1784 * This function is called when the system is single-threaded at high
1768 1785 * PIL with preemption disabled. Therefore, this function must not be
1769 1786 * blocked.
1770 1787 *
1771 1788 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1772 1789 * DDI_FAILURE indicates an error condition and should almost never happen.
1773 1790 */
1774 1791 static int
1775 1792 mptsas_quiesce(dev_info_t *devi)
1776 1793 {
1777 1794 mptsas_t *mpt;
1778 1795 scsi_hba_tran_t *tran;
1779 1796
1780 1797 /*
1781 1798 * If this call is for iport, just return.
1782 1799 */
1783 1800 if (scsi_hba_iport_unit_address(devi))
1784 1801 return (DDI_SUCCESS);
1785 1802
1786 1803 if ((tran = ddi_get_driver_private(devi)) == NULL)
1787 1804 return (DDI_SUCCESS);
1788 1805
1789 1806 if ((mpt = TRAN2MPT(tran)) == NULL)
1790 1807 return (DDI_SUCCESS);
1791 1808
1792 1809 /* Disable HBA interrupts in hardware */
1793 1810 MPTSAS_DISABLE_INTR(mpt);
1794 1811 /* Send RAID action system shutdonw to sync IR */
1795 1812 mptsas_raid_action_system_shutdown(mpt);
1796 1813
1797 1814 return (DDI_SUCCESS);
1798 1815 }
1799 1816 #endif /* __sparc */
1800 1817
1801 1818 /*
1802 1819 * detach(9E). Remove all device allocations and system resources;
1803 1820 * disable device interrupts.
1804 1821 * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
1805 1822 */
1806 1823 static int
1807 1824 mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1808 1825 {
1809 1826 /* CONSTCOND */
1810 1827 ASSERT(NO_COMPETING_THREADS);
1811 1828 NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi, (void *)cmd));
1812 1829
1813 1830 switch (cmd) {
1814 1831 case DDI_DETACH:
1815 1832 return (mptsas_do_detach(devi));
1816 1833
1817 1834 case DDI_SUSPEND:
1818 1835 return (mptsas_suspend(devi));
1819 1836
1820 1837 default:
1821 1838 return (DDI_FAILURE);
1822 1839 }
1823 1840 /* NOTREACHED */
1824 1841 }
1825 1842
1826 1843 static int
1827 1844 mptsas_do_detach(dev_info_t *dip)
1828 1845 {
1829 1846 mptsas_t *mpt;
1830 1847 scsi_hba_tran_t *tran;
1831 1848 int circ = 0;
1832 1849 int circ1 = 0;
1833 1850 mdi_pathinfo_t *pip = NULL;
1834 1851 int i;
1835 1852 int doneq_thread_num = 0;
1836 1853
1837 1854 NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1838 1855
1839 1856 if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1840 1857 return (DDI_FAILURE);
1841 1858
1842 1859 mpt = TRAN2MPT(tran);
1843 1860 if (!mpt) {
1844 1861 return (DDI_FAILURE);
1845 1862 }
1846 1863 /*
1847 1864 * Still have pathinfo child, should not detach mpt driver
1848 1865 */
1849 1866 if (scsi_hba_iport_unit_address(dip)) {
1850 1867 if (mpt->m_mpxio_enable) {
1851 1868 /*
1852 1869 * MPxIO enabled for the iport
1853 1870 */
1854 1871 ndi_devi_enter(scsi_vhci_dip, &circ1);
1855 1872 ndi_devi_enter(dip, &circ);
1856 1873 while (pip = mdi_get_next_client_path(dip, NULL)) {
1857 1874 if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1858 1875 continue;
1859 1876 }
1860 1877 ndi_devi_exit(dip, circ);
1861 1878 ndi_devi_exit(scsi_vhci_dip, circ1);
1862 1879 NDBG12(("detach failed because of "
1863 1880 "outstanding path info"));
1864 1881 return (DDI_FAILURE);
1865 1882 }
1866 1883 ndi_devi_exit(dip, circ);
1867 1884 ndi_devi_exit(scsi_vhci_dip, circ1);
1868 1885 (void) mdi_phci_unregister(dip, 0);
1869 1886 }
1870 1887
1871 1888 ddi_prop_remove_all(dip);
1872 1889
1873 1890 return (DDI_SUCCESS);
1874 1891 }
1875 1892
1876 1893 /* Make sure power level is D0 before accessing registers */
1877 1894 if (mpt->m_options & MPTSAS_OPT_PM) {
1878 1895 (void) pm_busy_component(dip, 0);
1879 1896 if (mpt->m_power_level != PM_LEVEL_D0) {
1880 1897 if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1881 1898 DDI_SUCCESS) {
1882 1899 mptsas_log(mpt, CE_WARN,
1883 1900 "mptsas%d: Raise power request failed.",
1884 1901 mpt->m_instance);
1885 1902 (void) pm_idle_component(dip, 0);
1886 1903 return (DDI_FAILURE);
1887 1904 }
1888 1905 }
1889 1906 }
1890 1907
1891 1908 /*
1892 1909 * Send RAID action system shutdown to sync IR. After action, send a
↓ open down ↓ |
155 lines elided |
↑ open up ↑ |
1893 1910 * Message Unit Reset. Since after that DMA resource will be freed,
1894 1911 * set ioc to READY state will avoid HBA initiated DMA operation.
1895 1912 */
1896 1913 mutex_enter(&mpt->m_mutex);
1897 1914 MPTSAS_DISABLE_INTR(mpt);
1898 1915 mptsas_raid_action_system_shutdown(mpt);
1899 1916 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1900 1917 (void) mptsas_ioc_reset(mpt, FALSE);
1901 1918 mutex_exit(&mpt->m_mutex);
1902 1919 mptsas_rem_intrs(mpt);
1920 + ddi_taskq_destroy(mpt->m_reset_taskq);
1903 1921 ddi_taskq_destroy(mpt->m_event_taskq);
1904 1922 ddi_taskq_destroy(mpt->m_dr_taskq);
1905 1923
1906 1924 if (mpt->m_doneq_thread_n) {
1907 1925 mutex_enter(&mpt->m_doneq_mutex);
1908 1926 doneq_thread_num = mpt->m_doneq_thread_n;
1909 1927 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1910 1928 mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1911 1929 mpt->m_doneq_thread_id[i].flag &=
1912 1930 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1913 1931 cv_signal(&mpt->m_doneq_thread_id[i].cv);
1914 1932 mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1915 1933 }
1916 1934 while (mpt->m_doneq_thread_n) {
1917 1935 cv_wait(&mpt->m_doneq_thread_cv,
1918 1936 &mpt->m_doneq_mutex);
1919 1937 }
1920 1938 for (i = 0; i < doneq_thread_num; i++) {
1921 1939 cv_destroy(&mpt->m_doneq_thread_id[i].cv);
1922 1940 mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);
1923 1941 }
1924 1942 kmem_free(mpt->m_doneq_thread_id,
1925 1943 sizeof (mptsas_doneq_thread_list_t)
1926 1944 * doneq_thread_num);
1927 1945 mutex_exit(&mpt->m_doneq_mutex);
1928 1946 cv_destroy(&mpt->m_doneq_thread_cv);
1929 1947 mutex_destroy(&mpt->m_doneq_mutex);
1930 1948 }
1931 1949
1932 1950 scsi_hba_reset_notify_tear_down(mpt->m_reset_notify_listf);
1933 1951
1934 1952 mptsas_list_del(mpt);
1935 1953
1936 1954 /*
1937 1955 * Cancel timeout threads for this mpt
1938 1956 */
1939 1957 mutex_enter(&mpt->m_mutex);
1940 1958 if (mpt->m_quiesce_timeid) {
1941 1959 timeout_id_t tid = mpt->m_quiesce_timeid;
1942 1960 mpt->m_quiesce_timeid = 0;
1943 1961 mutex_exit(&mpt->m_mutex);
1944 1962 (void) untimeout(tid);
1945 1963 mutex_enter(&mpt->m_mutex);
1946 1964 }
1947 1965
1948 1966 if (mpt->m_restart_cmd_timeid) {
1949 1967 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1950 1968 mpt->m_restart_cmd_timeid = 0;
1951 1969 mutex_exit(&mpt->m_mutex);
1952 1970 (void) untimeout(tid);
1953 1971 mutex_enter(&mpt->m_mutex);
1954 1972 }
1955 1973
1956 1974 mutex_exit(&mpt->m_mutex);
1957 1975
1958 1976 /*
1959 1977 * last mpt? ... if active, CANCEL watch threads.
1960 1978 */
1961 1979 mutex_enter(&mptsas_global_mutex);
1962 1980 if (mptsas_head == NULL) {
1963 1981 timeout_id_t tid;
1964 1982 /*
1965 1983 * Clear mptsas_timeouts_enable so that the watch thread
1966 1984 * gets restarted on DDI_ATTACH
1967 1985 */
1968 1986 mptsas_timeouts_enabled = 0;
1969 1987 if (mptsas_timeout_id) {
1970 1988 tid = mptsas_timeout_id;
1971 1989 mptsas_timeout_id = 0;
1972 1990 mutex_exit(&mptsas_global_mutex);
1973 1991 (void) untimeout(tid);
1974 1992 mutex_enter(&mptsas_global_mutex);
1975 1993 }
1976 1994 if (mptsas_reset_watch) {
1977 1995 tid = mptsas_reset_watch;
1978 1996 mptsas_reset_watch = 0;
1979 1997 mutex_exit(&mptsas_global_mutex);
1980 1998 (void) untimeout(tid);
1981 1999 mutex_enter(&mptsas_global_mutex);
1982 2000 }
1983 2001 }
1984 2002 mutex_exit(&mptsas_global_mutex);
1985 2003
1986 2004 /*
1987 2005 * Delete Phy stats
1988 2006 */
1989 2007 mptsas_destroy_phy_stats(mpt);
1990 2008
1991 2009 mptsas_destroy_hashes(mpt);
1992 2010
1993 2011 /*
1994 2012 * Delete nt_active.
1995 2013 */
1996 2014 mutex_enter(&mpt->m_mutex);
1997 2015 mptsas_free_active_slots(mpt);
1998 2016 mutex_exit(&mpt->m_mutex);
1999 2017
2000 2018 /* deallocate everything that was allocated in mptsas_attach */
2001 2019 mptsas_cache_destroy(mpt);
2002 2020
2003 2021 mptsas_hba_fini(mpt);
2004 2022 mptsas_cfg_fini(mpt);
2005 2023
2006 2024 /* Lower the power informing PM Framework */
2007 2025 if (mpt->m_options & MPTSAS_OPT_PM) {
2008 2026 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
2009 2027 mptsas_log(mpt, CE_WARN,
2010 2028 "!mptsas%d: Lower power request failed "
2011 2029 "during detach, ignoring.",
2012 2030 mpt->m_instance);
2013 2031 }
2014 2032
2015 2033 mutex_destroy(&mpt->m_tx_waitq_mutex);
2016 2034 mutex_destroy(&mpt->m_passthru_mutex);
2017 2035 mutex_destroy(&mpt->m_mutex);
2018 2036 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
2019 2037 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
2020 2038 }
2021 2039 cv_destroy(&mpt->m_cv);
2022 2040 cv_destroy(&mpt->m_passthru_cv);
2023 2041 cv_destroy(&mpt->m_fw_cv);
2024 2042 cv_destroy(&mpt->m_config_cv);
2025 2043 cv_destroy(&mpt->m_fw_diag_cv);
2026 2044
2027 2045
2028 2046 mptsas_smp_teardown(mpt);
2029 2047 mptsas_hba_teardown(mpt);
2030 2048
2031 2049 mptsas_config_space_fini(mpt);
2032 2050
2033 2051 mptsas_free_handshake_msg(mpt);
2034 2052
2035 2053 mptsas_fm_fini(mpt);
2036 2054 ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
2037 2055 ddi_prop_remove_all(dip);
2038 2056
2039 2057 return (DDI_SUCCESS);
2040 2058 }
2041 2059
2042 2060 static void
2043 2061 mptsas_list_add(mptsas_t *mpt)
2044 2062 {
2045 2063 rw_enter(&mptsas_global_rwlock, RW_WRITER);
2046 2064
2047 2065 if (mptsas_head == NULL) {
2048 2066 mptsas_head = mpt;
2049 2067 } else {
2050 2068 mptsas_tail->m_next = mpt;
2051 2069 }
2052 2070 mptsas_tail = mpt;
2053 2071 rw_exit(&mptsas_global_rwlock);
2054 2072 }
2055 2073
2056 2074 static void
2057 2075 mptsas_list_del(mptsas_t *mpt)
2058 2076 {
2059 2077 mptsas_t *m;
2060 2078 /*
2061 2079 * Remove device instance from the global linked list
2062 2080 */
2063 2081 rw_enter(&mptsas_global_rwlock, RW_WRITER);
2064 2082 if (mptsas_head == mpt) {
2065 2083 m = mptsas_head = mpt->m_next;
2066 2084 } else {
2067 2085 for (m = mptsas_head; m != NULL; m = m->m_next) {
2068 2086 if (m->m_next == mpt) {
2069 2087 m->m_next = mpt->m_next;
2070 2088 break;
2071 2089 }
2072 2090 }
2073 2091 if (m == NULL) {
2074 2092 mptsas_log(mpt, CE_PANIC, "Not in softc list!");
2075 2093 }
2076 2094 }
2077 2095
2078 2096 if (mptsas_tail == mpt) {
2079 2097 mptsas_tail = m;
2080 2098 }
2081 2099 rw_exit(&mptsas_global_rwlock);
2082 2100 }
2083 2101
2084 2102 static int
2085 2103 mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size)
2086 2104 {
2087 2105 ddi_dma_attr_t task_dma_attrs;
2088 2106
2089 2107 mpt->m_hshk_dma_size = 0;
2090 2108 task_dma_attrs = mpt->m_msg_dma_attr;
2091 2109 task_dma_attrs.dma_attr_sgllen = 1;
2092 2110 task_dma_attrs.dma_attr_granular = (uint32_t)(alloc_size);
2093 2111
2094 2112 /* allocate Task Management ddi_dma resources */
2095 2113 if (mptsas_dma_addr_create(mpt, task_dma_attrs,
2096 2114 &mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl, &mpt->m_hshk_memp,
2097 2115 alloc_size, NULL) == FALSE) {
2098 2116 return (DDI_FAILURE);
2099 2117 }
2100 2118 mpt->m_hshk_dma_size = alloc_size;
2101 2119
2102 2120 return (DDI_SUCCESS);
2103 2121 }
2104 2122
2105 2123 static void
2106 2124 mptsas_free_handshake_msg(mptsas_t *mpt)
2107 2125 {
2108 2126 if (mpt->m_hshk_dma_size == 0)
2109 2127 return;
2110 2128 mptsas_dma_addr_destroy(&mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl);
2111 2129 mpt->m_hshk_dma_size = 0;
2112 2130 }
2113 2131
2114 2132 static int
2115 2133 mptsas_hba_setup(mptsas_t *mpt)
2116 2134 {
2117 2135 scsi_hba_tran_t *hba_tran;
2118 2136 int tran_flags;
2119 2137
2120 2138 /* Allocate a transport structure */
2121 2139 hba_tran = mpt->m_tran = scsi_hba_tran_alloc(mpt->m_dip,
2122 2140 SCSI_HBA_CANSLEEP);
2123 2141 ASSERT(mpt->m_tran != NULL);
2124 2142
2125 2143 hba_tran->tran_hba_private = mpt;
2126 2144 hba_tran->tran_tgt_private = NULL;
2127 2145
2128 2146 hba_tran->tran_tgt_init = mptsas_scsi_tgt_init;
2129 2147 hba_tran->tran_tgt_free = mptsas_scsi_tgt_free;
2130 2148
2131 2149 hba_tran->tran_start = mptsas_scsi_start;
2132 2150 hba_tran->tran_reset = mptsas_scsi_reset;
2133 2151 hba_tran->tran_abort = mptsas_scsi_abort;
2134 2152 hba_tran->tran_getcap = mptsas_scsi_getcap;
2135 2153 hba_tran->tran_setcap = mptsas_scsi_setcap;
2136 2154 hba_tran->tran_init_pkt = mptsas_scsi_init_pkt;
2137 2155 hba_tran->tran_destroy_pkt = mptsas_scsi_destroy_pkt;
2138 2156
2139 2157 hba_tran->tran_dmafree = mptsas_scsi_dmafree;
2140 2158 hba_tran->tran_sync_pkt = mptsas_scsi_sync_pkt;
2141 2159 hba_tran->tran_reset_notify = mptsas_scsi_reset_notify;
2142 2160
2143 2161 hba_tran->tran_get_bus_addr = mptsas_get_bus_addr;
2144 2162 hba_tran->tran_get_name = mptsas_get_name;
2145 2163
2146 2164 hba_tran->tran_quiesce = mptsas_scsi_quiesce;
2147 2165 hba_tran->tran_unquiesce = mptsas_scsi_unquiesce;
2148 2166 hba_tran->tran_bus_reset = NULL;
2149 2167
2150 2168 hba_tran->tran_add_eventcall = NULL;
2151 2169 hba_tran->tran_get_eventcookie = NULL;
2152 2170 hba_tran->tran_post_event = NULL;
2153 2171 hba_tran->tran_remove_eventcall = NULL;
2154 2172
2155 2173 hba_tran->tran_bus_config = mptsas_bus_config;
2156 2174
2157 2175 hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
2158 2176
2159 2177 /*
2160 2178 * All children of the HBA are iports. We need tran was cloned.
2161 2179 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
2162 2180 * inherited to iport's tran vector.
2163 2181 */
2164 2182 tran_flags = (SCSI_HBA_HBA | SCSI_HBA_TRAN_CLONE);
2165 2183
2166 2184 if (scsi_hba_attach_setup(mpt->m_dip, &mpt->m_msg_dma_attr,
2167 2185 hba_tran, tran_flags) != DDI_SUCCESS) {
2168 2186 mptsas_log(mpt, CE_WARN, "hba attach setup failed");
2169 2187 scsi_hba_tran_free(hba_tran);
2170 2188 mpt->m_tran = NULL;
2171 2189 return (FALSE);
2172 2190 }
2173 2191 return (TRUE);
2174 2192 }
2175 2193
2176 2194 static void
2177 2195 mptsas_hba_teardown(mptsas_t *mpt)
2178 2196 {
2179 2197 (void) scsi_hba_detach(mpt->m_dip);
2180 2198 if (mpt->m_tran != NULL) {
2181 2199 scsi_hba_tran_free(mpt->m_tran);
2182 2200 mpt->m_tran = NULL;
2183 2201 }
2184 2202 }
2185 2203
2186 2204 static void
2187 2205 mptsas_iport_register(mptsas_t *mpt)
2188 2206 {
2189 2207 int i, j;
2190 2208 mptsas_phymask_t mask = 0x0;
2191 2209 /*
2192 2210 * initial value of mask is 0
2193 2211 */
2194 2212 mutex_enter(&mpt->m_mutex);
2195 2213 for (i = 0; i < mpt->m_num_phys; i++) {
2196 2214 mptsas_phymask_t phy_mask = 0x0;
2197 2215 char phy_mask_name[MPTSAS_MAX_PHYS];
2198 2216 uint8_t current_port;
2199 2217
2200 2218 if (mpt->m_phy_info[i].attached_devhdl == 0)
2201 2219 continue;
2202 2220
2203 2221 bzero(phy_mask_name, sizeof (phy_mask_name));
2204 2222
2205 2223 current_port = mpt->m_phy_info[i].port_num;
2206 2224
2207 2225 if ((mask & (1 << i)) != 0)
2208 2226 continue;
2209 2227
2210 2228 for (j = 0; j < mpt->m_num_phys; j++) {
2211 2229 if (mpt->m_phy_info[j].attached_devhdl &&
2212 2230 (mpt->m_phy_info[j].port_num == current_port)) {
2213 2231 phy_mask |= (1 << j);
2214 2232 }
2215 2233 }
2216 2234 mask = mask | phy_mask;
2217 2235
2218 2236 for (j = 0; j < mpt->m_num_phys; j++) {
2219 2237 if ((phy_mask >> j) & 0x01) {
2220 2238 mpt->m_phy_info[j].phy_mask = phy_mask;
2221 2239 }
2222 2240 }
2223 2241
2224 2242 (void) sprintf(phy_mask_name, "%x", phy_mask);
2225 2243
2226 2244 mutex_exit(&mpt->m_mutex);
2227 2245 /*
2228 2246 * register a iport
2229 2247 */
2230 2248 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
2231 2249 mutex_enter(&mpt->m_mutex);
2232 2250 }
2233 2251 mutex_exit(&mpt->m_mutex);
2234 2252 /*
2235 2253 * register a virtual port for RAID volume always
2236 2254 */
2237 2255 (void) scsi_hba_iport_register(mpt->m_dip, "v0");
2238 2256
2239 2257 }
2240 2258
2241 2259 static int
2242 2260 mptsas_smp_setup(mptsas_t *mpt)
2243 2261 {
2244 2262 mpt->m_smptran = smp_hba_tran_alloc(mpt->m_dip);
2245 2263 ASSERT(mpt->m_smptran != NULL);
2246 2264 mpt->m_smptran->smp_tran_hba_private = mpt;
2247 2265 mpt->m_smptran->smp_tran_start = mptsas_smp_start;
2248 2266 if (smp_hba_attach_setup(mpt->m_dip, mpt->m_smptran) != DDI_SUCCESS) {
2249 2267 mptsas_log(mpt, CE_WARN, "smp attach setup failed");
2250 2268 smp_hba_tran_free(mpt->m_smptran);
2251 2269 mpt->m_smptran = NULL;
2252 2270 return (FALSE);
2253 2271 }
2254 2272 /*
2255 2273 * Initialize smp hash table
2256 2274 */
2257 2275 mpt->m_smp_targets = refhash_create(MPTSAS_SMP_BUCKET_COUNT,
2258 2276 mptsas_target_addr_hash, mptsas_target_addr_cmp,
2259 2277 mptsas_smp_free, sizeof (mptsas_smp_t),
2260 2278 offsetof(mptsas_smp_t, m_link), offsetof(mptsas_smp_t, m_addr),
2261 2279 KM_SLEEP);
2262 2280 mpt->m_smp_devhdl = 0xFFFF;
2263 2281
2264 2282 return (TRUE);
2265 2283 }
2266 2284
2267 2285 static void
2268 2286 mptsas_smp_teardown(mptsas_t *mpt)
2269 2287 {
2270 2288 (void) smp_hba_detach(mpt->m_dip);
2271 2289 if (mpt->m_smptran != NULL) {
2272 2290 smp_hba_tran_free(mpt->m_smptran);
2273 2291 mpt->m_smptran = NULL;
2274 2292 }
2275 2293 mpt->m_smp_devhdl = 0;
2276 2294 }
2277 2295
2278 2296 static int
2279 2297 mptsas_cache_create(mptsas_t *mpt)
2280 2298 {
2281 2299 int instance = mpt->m_instance;
2282 2300 char buf[64];
2283 2301
2284 2302 /*
2285 2303 * create kmem cache for packets
2286 2304 */
2287 2305 (void) sprintf(buf, "mptsas%d_cache", instance);
2288 2306 mpt->m_kmem_cache = kmem_cache_create(buf,
2289 2307 sizeof (struct mptsas_cmd) + scsi_pkt_size(), 8,
2290 2308 mptsas_kmem_cache_constructor, mptsas_kmem_cache_destructor,
2291 2309 NULL, (void *)mpt, NULL, 0);
2292 2310
2293 2311 if (mpt->m_kmem_cache == NULL) {
2294 2312 mptsas_log(mpt, CE_WARN, "creating kmem cache failed");
2295 2313 return (FALSE);
2296 2314 }
2297 2315
2298 2316 /*
2299 2317 * create kmem cache for extra SGL frames if SGL cannot
2300 2318 * be accomodated into main request frame.
2301 2319 */
2302 2320 (void) sprintf(buf, "mptsas%d_cache_frames", instance);
2303 2321 mpt->m_cache_frames = kmem_cache_create(buf,
2304 2322 sizeof (mptsas_cache_frames_t), 8,
2305 2323 mptsas_cache_frames_constructor, mptsas_cache_frames_destructor,
2306 2324 NULL, (void *)mpt, NULL, 0);
2307 2325
2308 2326 if (mpt->m_cache_frames == NULL) {
2309 2327 mptsas_log(mpt, CE_WARN, "creating cache for frames failed");
2310 2328 return (FALSE);
2311 2329 }
2312 2330
2313 2331 return (TRUE);
2314 2332 }
2315 2333
2316 2334 static void
2317 2335 mptsas_cache_destroy(mptsas_t *mpt)
2318 2336 {
2319 2337 /* deallocate in reverse order */
2320 2338 if (mpt->m_cache_frames) {
2321 2339 kmem_cache_destroy(mpt->m_cache_frames);
2322 2340 mpt->m_cache_frames = NULL;
2323 2341 }
2324 2342 if (mpt->m_kmem_cache) {
2325 2343 kmem_cache_destroy(mpt->m_kmem_cache);
2326 2344 mpt->m_kmem_cache = NULL;
2327 2345 }
2328 2346 }
2329 2347
2330 2348 static int
2331 2349 mptsas_power(dev_info_t *dip, int component, int level)
2332 2350 {
2333 2351 #ifndef __lock_lint
2334 2352 _NOTE(ARGUNUSED(component))
2335 2353 #endif
2336 2354 mptsas_t *mpt;
2337 2355 int rval = DDI_SUCCESS;
2338 2356 int polls = 0;
2339 2357 uint32_t ioc_status;
2340 2358
2341 2359 if (scsi_hba_iport_unit_address(dip) != 0)
2342 2360 return (DDI_SUCCESS);
2343 2361
2344 2362 mpt = ddi_get_soft_state(mptsas_state, ddi_get_instance(dip));
2345 2363 if (mpt == NULL) {
2346 2364 return (DDI_FAILURE);
2347 2365 }
2348 2366
2349 2367 mutex_enter(&mpt->m_mutex);
2350 2368
2351 2369 /*
2352 2370 * If the device is busy, don't lower its power level
2353 2371 */
2354 2372 if (mpt->m_busy && (mpt->m_power_level > level)) {
2355 2373 mutex_exit(&mpt->m_mutex);
2356 2374 return (DDI_FAILURE);
2357 2375 }
2358 2376 switch (level) {
2359 2377 case PM_LEVEL_D0:
2360 2378 NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
2361 2379 MPTSAS_POWER_ON(mpt);
2362 2380 /*
2363 2381 * Wait up to 30 seconds for IOC to come out of reset.
2364 2382 */
2365 2383 while (((ioc_status = ddi_get32(mpt->m_datap,
2366 2384 &mpt->m_reg->Doorbell)) &
2367 2385 MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2368 2386 if (polls++ > 3000) {
2369 2387 break;
2370 2388 }
2371 2389 delay(drv_usectohz(10000));
2372 2390 }
2373 2391 /*
2374 2392 * If IOC is not in operational state, try to hard reset it.
2375 2393 */
2376 2394 if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2377 2395 MPI2_IOC_STATE_OPERATIONAL) {
2378 2396 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2379 2397 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2380 2398 mptsas_log(mpt, CE_WARN,
2381 2399 "mptsas_power: hard reset failed");
2382 2400 mutex_exit(&mpt->m_mutex);
2383 2401 return (DDI_FAILURE);
2384 2402 }
2385 2403 }
2386 2404 mpt->m_power_level = PM_LEVEL_D0;
2387 2405 break;
2388 2406 case PM_LEVEL_D3:
2389 2407 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2390 2408 MPTSAS_POWER_OFF(mpt);
2391 2409 break;
2392 2410 default:
2393 2411 mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2394 2412 mpt->m_instance, level);
2395 2413 rval = DDI_FAILURE;
2396 2414 break;
2397 2415 }
2398 2416 mutex_exit(&mpt->m_mutex);
2399 2417 return (rval);
2400 2418 }
2401 2419
2402 2420 /*
2403 2421 * Initialize configuration space and figure out which
2404 2422 * chip and revison of the chip the mpt driver is using.
2405 2423 */
2406 2424 static int
2407 2425 mptsas_config_space_init(mptsas_t *mpt)
2408 2426 {
2409 2427 NDBG0(("mptsas_config_space_init"));
2410 2428
2411 2429 if (mpt->m_config_handle != NULL)
2412 2430 return (TRUE);
2413 2431
2414 2432 if (pci_config_setup(mpt->m_dip,
2415 2433 &mpt->m_config_handle) != DDI_SUCCESS) {
2416 2434 mptsas_log(mpt, CE_WARN, "cannot map configuration space.");
2417 2435 return (FALSE);
2418 2436 }
2419 2437
2420 2438 /*
2421 2439 * This is a workaround for a XMITS ASIC bug which does not
2422 2440 * drive the CBE upper bits.
2423 2441 */
2424 2442 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) &
2425 2443 PCI_STAT_PERROR) {
2426 2444 pci_config_put16(mpt->m_config_handle, PCI_CONF_STAT,
2427 2445 PCI_STAT_PERROR);
2428 2446 }
2429 2447
2430 2448 mptsas_setup_cmd_reg(mpt);
2431 2449
2432 2450 /*
2433 2451 * Get the chip device id:
2434 2452 */
2435 2453 mpt->m_devid = pci_config_get16(mpt->m_config_handle, PCI_CONF_DEVID);
2436 2454
2437 2455 /*
2438 2456 * Save the revision.
2439 2457 */
2440 2458 mpt->m_revid = pci_config_get8(mpt->m_config_handle, PCI_CONF_REVID);
2441 2459
2442 2460 /*
2443 2461 * Save the SubSystem Vendor and Device IDs
2444 2462 */
2445 2463 mpt->m_svid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBVENID);
2446 2464 mpt->m_ssid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBSYSID);
2447 2465
2448 2466 /*
2449 2467 * Set the latency timer to 0x40 as specified by the upa -> pci
2450 2468 * bridge chip design team. This may be done by the sparc pci
2451 2469 * bus nexus driver, but the driver should make sure the latency
2452 2470 * timer is correct for performance reasons.
2453 2471 */
2454 2472 pci_config_put8(mpt->m_config_handle, PCI_CONF_LATENCY_TIMER,
2455 2473 MPTSAS_LATENCY_TIMER);
2456 2474
2457 2475 (void) mptsas_get_pci_cap(mpt);
2458 2476 return (TRUE);
2459 2477 }
2460 2478
2461 2479 static void
2462 2480 mptsas_config_space_fini(mptsas_t *mpt)
2463 2481 {
2464 2482 if (mpt->m_config_handle != NULL) {
2465 2483 mptsas_disable_bus_master(mpt);
2466 2484 pci_config_teardown(&mpt->m_config_handle);
2467 2485 mpt->m_config_handle = NULL;
2468 2486 }
2469 2487 }
2470 2488
2471 2489 static void
2472 2490 mptsas_setup_cmd_reg(mptsas_t *mpt)
2473 2491 {
2474 2492 ushort_t cmdreg;
2475 2493
2476 2494 /*
2477 2495 * Set the command register to the needed values.
2478 2496 */
2479 2497 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2480 2498 cmdreg |= (PCI_COMM_ME | PCI_COMM_SERR_ENABLE |
2481 2499 PCI_COMM_PARITY_DETECT | PCI_COMM_MAE);
2482 2500 cmdreg &= ~PCI_COMM_IO;
2483 2501 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2484 2502 }
2485 2503
2486 2504 static void
2487 2505 mptsas_disable_bus_master(mptsas_t *mpt)
2488 2506 {
2489 2507 ushort_t cmdreg;
2490 2508
2491 2509 /*
2492 2510 * Clear the master enable bit in the PCI command register.
2493 2511 * This prevents any bus mastering activity like DMA.
2494 2512 */
2495 2513 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2496 2514 cmdreg &= ~PCI_COMM_ME;
2497 2515 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2498 2516 }
2499 2517
2500 2518 int
2501 2519 mptsas_dma_alloc(mptsas_t *mpt, mptsas_dma_alloc_state_t *dma_statep)
2502 2520 {
2503 2521 ddi_dma_attr_t attrs;
2504 2522
2505 2523 attrs = mpt->m_io_dma_attr;
2506 2524 attrs.dma_attr_sgllen = 1;
2507 2525
2508 2526 ASSERT(dma_statep != NULL);
2509 2527
2510 2528 if (mptsas_dma_addr_create(mpt, attrs, &dma_statep->handle,
2511 2529 &dma_statep->accessp, &dma_statep->memp, dma_statep->size,
2512 2530 &dma_statep->cookie) == FALSE) {
2513 2531 return (DDI_FAILURE);
2514 2532 }
2515 2533
2516 2534 return (DDI_SUCCESS);
2517 2535 }
2518 2536
2519 2537 void
2520 2538 mptsas_dma_free(mptsas_dma_alloc_state_t *dma_statep)
2521 2539 {
2522 2540 ASSERT(dma_statep != NULL);
2523 2541 mptsas_dma_addr_destroy(&dma_statep->handle, &dma_statep->accessp);
2524 2542 dma_statep->size = 0;
2525 2543 }
2526 2544
2527 2545 int
2528 2546 mptsas_do_dma(mptsas_t *mpt, uint32_t size, int var, int (*callback)())
2529 2547 {
2530 2548 ddi_dma_attr_t attrs;
2531 2549 ddi_dma_handle_t dma_handle;
2532 2550 caddr_t memp;
2533 2551 ddi_acc_handle_t accessp;
2534 2552 int rval;
2535 2553
2536 2554 ASSERT(mutex_owned(&mpt->m_mutex));
2537 2555
2538 2556 attrs = mpt->m_msg_dma_attr;
2539 2557 attrs.dma_attr_sgllen = 1;
2540 2558 attrs.dma_attr_granular = size;
2541 2559
2542 2560 if (mptsas_dma_addr_create(mpt, attrs, &dma_handle,
2543 2561 &accessp, &memp, size, NULL) == FALSE) {
2544 2562 return (DDI_FAILURE);
2545 2563 }
2546 2564
2547 2565 rval = (*callback) (mpt, memp, var, accessp);
2548 2566
2549 2567 if ((mptsas_check_dma_handle(dma_handle) != DDI_SUCCESS) ||
2550 2568 (mptsas_check_acc_handle(accessp) != DDI_SUCCESS)) {
2551 2569 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
2552 2570 rval = DDI_FAILURE;
2553 2571 }
2554 2572
2555 2573 mptsas_dma_addr_destroy(&dma_handle, &accessp);
2556 2574 return (rval);
2557 2575
2558 2576 }
2559 2577
2560 2578 static int
2561 2579 mptsas_alloc_request_frames(mptsas_t *mpt)
2562 2580 {
2563 2581 ddi_dma_attr_t frame_dma_attrs;
2564 2582 caddr_t memp;
2565 2583 ddi_dma_cookie_t cookie;
2566 2584 size_t mem_size;
2567 2585
2568 2586 /*
2569 2587 * re-alloc when it has already alloced
2570 2588 */
2571 2589 if (mpt->m_dma_req_frame_hdl)
2572 2590 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2573 2591 &mpt->m_acc_req_frame_hdl);
2574 2592
2575 2593 /*
2576 2594 * The size of the request frame pool is:
2577 2595 * Number of Request Frames * Request Frame Size
2578 2596 */
2579 2597 mem_size = mpt->m_max_requests * mpt->m_req_frame_size;
2580 2598
2581 2599 /*
2582 2600 * set the DMA attributes. System Request Message Frames must be
2583 2601 * aligned on a 16-byte boundry.
2584 2602 */
2585 2603 frame_dma_attrs = mpt->m_msg_dma_attr;
2586 2604 frame_dma_attrs.dma_attr_align = 16;
2587 2605 frame_dma_attrs.dma_attr_sgllen = 1;
2588 2606
2589 2607 /*
2590 2608 * allocate the request frame pool.
2591 2609 */
2592 2610 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2593 2611 &mpt->m_dma_req_frame_hdl, &mpt->m_acc_req_frame_hdl, &memp,
2594 2612 mem_size, &cookie) == FALSE) {
2595 2613 return (DDI_FAILURE);
2596 2614 }
2597 2615
2598 2616 /*
2599 2617 * Store the request frame memory address. This chip uses this
2600 2618 * address to dma to and from the driver's frame. The second
2601 2619 * address is the address mpt uses to fill in the frame.
2602 2620 */
2603 2621 mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2604 2622 mpt->m_req_frame = memp;
2605 2623
2606 2624 /*
2607 2625 * Clear the request frame pool.
2608 2626 */
2609 2627 bzero(mpt->m_req_frame, mem_size);
2610 2628
2611 2629 return (DDI_SUCCESS);
2612 2630 }
2613 2631
2614 2632 static int
2615 2633 mptsas_alloc_sense_bufs(mptsas_t *mpt)
2616 2634 {
2617 2635 ddi_dma_attr_t sense_dma_attrs;
2618 2636 caddr_t memp;
2619 2637 ddi_dma_cookie_t cookie;
2620 2638 size_t mem_size;
2621 2639 int num_extrqsense_bufs;
2622 2640
2623 2641 /*
2624 2642 * re-alloc when it has already alloced
2625 2643 */
2626 2644 if (mpt->m_dma_req_sense_hdl) {
2627 2645 rmfreemap(mpt->m_erqsense_map);
2628 2646 mptsas_dma_addr_destroy(&mpt->m_dma_req_sense_hdl,
2629 2647 &mpt->m_acc_req_sense_hdl);
2630 2648 }
2631 2649
2632 2650 /*
2633 2651 * The size of the request sense pool is:
2634 2652 * (Number of Request Frames - 2 ) * Request Sense Size +
2635 2653 * extra memory for extended sense requests.
2636 2654 */
2637 2655 mem_size = ((mpt->m_max_requests - 2) * mpt->m_req_sense_size) +
2638 2656 mptsas_extreq_sense_bufsize;
2639 2657
2640 2658 /*
2641 2659 * set the DMA attributes. ARQ buffers
2642 2660 * aligned on a 16-byte boundry.
2643 2661 */
2644 2662 sense_dma_attrs = mpt->m_msg_dma_attr;
2645 2663 sense_dma_attrs.dma_attr_align = 16;
2646 2664 sense_dma_attrs.dma_attr_sgllen = 1;
2647 2665
2648 2666 /*
2649 2667 * allocate the request sense buffer pool.
2650 2668 */
2651 2669 if (mptsas_dma_addr_create(mpt, sense_dma_attrs,
2652 2670 &mpt->m_dma_req_sense_hdl, &mpt->m_acc_req_sense_hdl, &memp,
2653 2671 mem_size, &cookie) == FALSE) {
2654 2672 return (DDI_FAILURE);
2655 2673 }
2656 2674
2657 2675 /*
2658 2676 * Store the request sense base memory address. This chip uses this
2659 2677 * address to dma the request sense data. The second
2660 2678 * address is the address mpt uses to access the data.
2661 2679 * The third is the base for the extended rqsense buffers.
2662 2680 */
2663 2681 mpt->m_req_sense_dma_addr = cookie.dmac_laddress;
2664 2682 mpt->m_req_sense = memp;
2665 2683 memp += (mpt->m_max_requests - 2) * mpt->m_req_sense_size;
2666 2684 mpt->m_extreq_sense = memp;
2667 2685
2668 2686 /*
2669 2687 * The extra memory is divided up into multiples of the base
2670 2688 * buffer size in order to allocate via rmalloc().
2671 2689 * Note that the rmallocmap cannot start at zero!
2672 2690 */
2673 2691 num_extrqsense_bufs = mptsas_extreq_sense_bufsize /
2674 2692 mpt->m_req_sense_size;
2675 2693 mpt->m_erqsense_map = rmallocmap_wait(num_extrqsense_bufs);
2676 2694 rmfree(mpt->m_erqsense_map, num_extrqsense_bufs, 1);
2677 2695
2678 2696 /*
2679 2697 * Clear the pool.
2680 2698 */
2681 2699 bzero(mpt->m_req_sense, mem_size);
2682 2700
2683 2701 return (DDI_SUCCESS);
2684 2702 }
2685 2703
2686 2704 static int
2687 2705 mptsas_alloc_reply_frames(mptsas_t *mpt)
2688 2706 {
2689 2707 ddi_dma_attr_t frame_dma_attrs;
2690 2708 caddr_t memp;
2691 2709 ddi_dma_cookie_t cookie;
2692 2710 size_t mem_size;
2693 2711
2694 2712 /*
2695 2713 * re-alloc when it has already alloced
2696 2714 */
2697 2715 if (mpt->m_dma_reply_frame_hdl) {
2698 2716 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2699 2717 &mpt->m_acc_reply_frame_hdl);
2700 2718 }
2701 2719
2702 2720 /*
2703 2721 * The size of the reply frame pool is:
2704 2722 * Number of Reply Frames * Reply Frame Size
2705 2723 */
2706 2724 mem_size = mpt->m_max_replies * mpt->m_reply_frame_size;
2707 2725
2708 2726 /*
2709 2727 * set the DMA attributes. System Reply Message Frames must be
2710 2728 * aligned on a 4-byte boundry. This is the default.
2711 2729 */
2712 2730 frame_dma_attrs = mpt->m_msg_dma_attr;
2713 2731 frame_dma_attrs.dma_attr_sgllen = 1;
2714 2732
2715 2733 /*
2716 2734 * allocate the reply frame pool
2717 2735 */
2718 2736 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2719 2737 &mpt->m_dma_reply_frame_hdl, &mpt->m_acc_reply_frame_hdl, &memp,
2720 2738 mem_size, &cookie) == FALSE) {
2721 2739 return (DDI_FAILURE);
2722 2740 }
2723 2741
2724 2742 /*
2725 2743 * Store the reply frame memory address. This chip uses this
2726 2744 * address to dma to and from the driver's frame. The second
2727 2745 * address is the address mpt uses to process the frame.
2728 2746 */
2729 2747 mpt->m_reply_frame_dma_addr = cookie.dmac_laddress;
2730 2748 mpt->m_reply_frame = memp;
2731 2749
2732 2750 /*
2733 2751 * Clear the reply frame pool.
2734 2752 */
2735 2753 bzero(mpt->m_reply_frame, mem_size);
2736 2754
2737 2755 return (DDI_SUCCESS);
2738 2756 }
2739 2757
2740 2758 static int
2741 2759 mptsas_alloc_free_queue(mptsas_t *mpt)
2742 2760 {
2743 2761 ddi_dma_attr_t frame_dma_attrs;
2744 2762 caddr_t memp;
2745 2763 ddi_dma_cookie_t cookie;
2746 2764 size_t mem_size;
2747 2765
2748 2766 /*
2749 2767 * re-alloc when it has already alloced
2750 2768 */
2751 2769 if (mpt->m_dma_free_queue_hdl) {
2752 2770 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2753 2771 &mpt->m_acc_free_queue_hdl);
2754 2772 }
2755 2773
2756 2774 /*
2757 2775 * The reply free queue size is:
2758 2776 * Reply Free Queue Depth * 4
2759 2777 * The "4" is the size of one 32 bit address (low part of 64-bit
2760 2778 * address)
2761 2779 */
2762 2780 mem_size = mpt->m_free_queue_depth * 4;
2763 2781
2764 2782 /*
2765 2783 * set the DMA attributes The Reply Free Queue must be aligned on a
2766 2784 * 16-byte boundry.
2767 2785 */
2768 2786 frame_dma_attrs = mpt->m_msg_dma_attr;
2769 2787 frame_dma_attrs.dma_attr_align = 16;
2770 2788 frame_dma_attrs.dma_attr_sgllen = 1;
2771 2789
2772 2790 /*
2773 2791 * allocate the reply free queue
2774 2792 */
2775 2793 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2776 2794 &mpt->m_dma_free_queue_hdl, &mpt->m_acc_free_queue_hdl, &memp,
2777 2795 mem_size, &cookie) == FALSE) {
2778 2796 return (DDI_FAILURE);
2779 2797 }
2780 2798
2781 2799 /*
2782 2800 * Store the reply free queue memory address. This chip uses this
2783 2801 * address to read from the reply free queue. The second address
2784 2802 * is the address mpt uses to manage the queue.
2785 2803 */
2786 2804 mpt->m_free_queue_dma_addr = cookie.dmac_laddress;
2787 2805 mpt->m_free_queue = memp;
2788 2806
2789 2807 /*
2790 2808 * Clear the reply free queue memory.
2791 2809 */
2792 2810 bzero(mpt->m_free_queue, mem_size);
2793 2811
2794 2812 return (DDI_SUCCESS);
2795 2813 }
2796 2814
2797 2815 static int
2798 2816 mptsas_alloc_post_queue(mptsas_t *mpt)
2799 2817 {
2800 2818 ddi_dma_attr_t frame_dma_attrs;
2801 2819 caddr_t memp;
2802 2820 ddi_dma_cookie_t cookie;
2803 2821 size_t mem_size;
2804 2822
2805 2823 /*
2806 2824 * re-alloc when it has already alloced
2807 2825 */
2808 2826 if (mpt->m_dma_post_queue_hdl) {
2809 2827 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2810 2828 &mpt->m_acc_post_queue_hdl);
2811 2829 }
2812 2830
2813 2831 /*
2814 2832 * The reply descriptor post queue size is:
2815 2833 * Reply Descriptor Post Queue Depth * 8
2816 2834 * The "8" is the size of each descriptor (8 bytes or 64 bits).
2817 2835 */
2818 2836 mem_size = mpt->m_post_queue_depth * 8;
2819 2837
2820 2838 /*
2821 2839 * set the DMA attributes. The Reply Descriptor Post Queue must be
2822 2840 * aligned on a 16-byte boundry.
2823 2841 */
2824 2842 frame_dma_attrs = mpt->m_msg_dma_attr;
2825 2843 frame_dma_attrs.dma_attr_align = 16;
2826 2844 frame_dma_attrs.dma_attr_sgllen = 1;
2827 2845
2828 2846 /*
2829 2847 * allocate the reply post queue
2830 2848 */
2831 2849 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2832 2850 &mpt->m_dma_post_queue_hdl, &mpt->m_acc_post_queue_hdl, &memp,
2833 2851 mem_size, &cookie) == FALSE) {
2834 2852 return (DDI_FAILURE);
2835 2853 }
2836 2854
2837 2855 /*
2838 2856 * Store the reply descriptor post queue memory address. This chip
2839 2857 * uses this address to write to the reply descriptor post queue. The
2840 2858 * second address is the address mpt uses to manage the queue.
2841 2859 */
2842 2860 mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
2843 2861 mpt->m_post_queue = memp;
2844 2862
2845 2863 /*
2846 2864 * Clear the reply post queue memory.
2847 2865 */
2848 2866 bzero(mpt->m_post_queue, mem_size);
2849 2867
2850 2868 return (DDI_SUCCESS);
2851 2869 }
2852 2870
2853 2871 static void
2854 2872 mptsas_alloc_reply_args(mptsas_t *mpt)
2855 2873 {
2856 2874 if (mpt->m_replyh_args == NULL) {
2857 2875 mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
2858 2876 mpt->m_max_replies, KM_SLEEP);
2859 2877 }
2860 2878 }
2861 2879
2862 2880 static int
2863 2881 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2864 2882 {
2865 2883 mptsas_cache_frames_t *frames = NULL;
2866 2884 if (cmd->cmd_extra_frames == NULL) {
2867 2885 frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
2868 2886 if (frames == NULL) {
2869 2887 return (DDI_FAILURE);
2870 2888 }
2871 2889 cmd->cmd_extra_frames = frames;
2872 2890 }
2873 2891 return (DDI_SUCCESS);
2874 2892 }
2875 2893
2876 2894 static void
2877 2895 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2878 2896 {
2879 2897 if (cmd->cmd_extra_frames) {
2880 2898 kmem_cache_free(mpt->m_cache_frames,
2881 2899 (void *)cmd->cmd_extra_frames);
2882 2900 cmd->cmd_extra_frames = NULL;
2883 2901 }
2884 2902 }
2885 2903
2886 2904 static void
2887 2905 mptsas_cfg_fini(mptsas_t *mpt)
2888 2906 {
2889 2907 NDBG0(("mptsas_cfg_fini"));
2890 2908 ddi_regs_map_free(&mpt->m_datap);
2891 2909 }
2892 2910
2893 2911 static void
2894 2912 mptsas_hba_fini(mptsas_t *mpt)
2895 2913 {
2896 2914 NDBG0(("mptsas_hba_fini"));
2897 2915
2898 2916 /*
2899 2917 * Free up any allocated memory
2900 2918 */
2901 2919 if (mpt->m_dma_req_frame_hdl) {
2902 2920 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2903 2921 &mpt->m_acc_req_frame_hdl);
2904 2922 }
2905 2923
2906 2924 if (mpt->m_dma_req_sense_hdl) {
2907 2925 rmfreemap(mpt->m_erqsense_map);
2908 2926 mptsas_dma_addr_destroy(&mpt->m_dma_req_sense_hdl,
2909 2927 &mpt->m_acc_req_sense_hdl);
2910 2928 }
2911 2929
2912 2930 if (mpt->m_dma_reply_frame_hdl) {
2913 2931 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2914 2932 &mpt->m_acc_reply_frame_hdl);
2915 2933 }
2916 2934
2917 2935 if (mpt->m_dma_free_queue_hdl) {
2918 2936 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2919 2937 &mpt->m_acc_free_queue_hdl);
2920 2938 }
2921 2939
2922 2940 if (mpt->m_dma_post_queue_hdl) {
2923 2941 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2924 2942 &mpt->m_acc_post_queue_hdl);
2925 2943 }
2926 2944
2927 2945 if (mpt->m_replyh_args != NULL) {
2928 2946 kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
2929 2947 * mpt->m_max_replies);
2930 2948 }
2931 2949 }
2932 2950
2933 2951 static int
2934 2952 mptsas_name_child(dev_info_t *lun_dip, char *name, int len)
2935 2953 {
2936 2954 int lun = 0;
2937 2955 char *sas_wwn = NULL;
2938 2956 int phynum = -1;
2939 2957 int reallen = 0;
2940 2958
2941 2959 /* Get the target num */
2942 2960 lun = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
2943 2961 LUN_PROP, 0);
2944 2962
2945 2963 if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
2946 2964 DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
2947 2965 /*
2948 2966 * Stick in the address of form "pPHY,LUN"
2949 2967 */
2950 2968 reallen = snprintf(name, len, "p%x,%x", phynum, lun);
2951 2969 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
2952 2970 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn)
2953 2971 == DDI_PROP_SUCCESS) {
2954 2972 /*
2955 2973 * Stick in the address of the form "wWWN,LUN"
2956 2974 */
2957 2975 reallen = snprintf(name, len, "%s,%x", sas_wwn, lun);
2958 2976 ddi_prop_free(sas_wwn);
2959 2977 } else {
2960 2978 return (DDI_FAILURE);
2961 2979 }
2962 2980
2963 2981 ASSERT(reallen < len);
2964 2982 if (reallen >= len) {
2965 2983 mptsas_log(0, CE_WARN, "!mptsas_get_name: name parameter "
2966 2984 "length too small, it needs to be %d bytes", reallen + 1);
2967 2985 }
2968 2986 return (DDI_SUCCESS);
2969 2987 }
2970 2988
2971 2989 /*
2972 2990 * tran_tgt_init(9E) - target device instance initialization
2973 2991 */
2974 2992 static int
2975 2993 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2976 2994 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2977 2995 {
2978 2996 #ifndef __lock_lint
2979 2997 _NOTE(ARGUNUSED(hba_tran))
2980 2998 #endif
2981 2999
2982 3000 /*
2983 3001 * At this point, the scsi_device structure already exists
2984 3002 * and has been initialized.
2985 3003 *
2986 3004 * Use this function to allocate target-private data structures,
2987 3005 * if needed by this HBA. Add revised flow-control and queue
2988 3006 * properties for child here, if desired and if you can tell they
2989 3007 * support tagged queueing by now.
2990 3008 */
2991 3009 mptsas_t *mpt;
2992 3010 int lun = sd->sd_address.a_lun;
2993 3011 mdi_pathinfo_t *pip = NULL;
2994 3012 mptsas_tgt_private_t *tgt_private = NULL;
2995 3013 mptsas_target_t *ptgt = NULL;
2996 3014 char *psas_wwn = NULL;
2997 3015 mptsas_phymask_t phymask = 0;
2998 3016 uint64_t sas_wwn = 0;
2999 3017 mptsas_target_addr_t addr;
3000 3018 mpt = SDEV2MPT(sd);
3001 3019
3002 3020 ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
3003 3021
3004 3022 NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
3005 3023 (void *)hba_dip, (void *)tgt_dip, lun));
3006 3024
3007 3025 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
3008 3026 (void) ndi_merge_node(tgt_dip, mptsas_name_child);
3009 3027 ddi_set_name_addr(tgt_dip, NULL);
3010 3028 return (DDI_FAILURE);
3011 3029 }
3012 3030 /*
3013 3031 * phymask is 0 means the virtual port for RAID
3014 3032 */
3015 3033 phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
3016 3034 "phymask", 0);
3017 3035 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
3018 3036 if ((pip = (void *)(sd->sd_private)) == NULL) {
3019 3037 /*
3020 3038 * Very bad news if this occurs. Somehow scsi_vhci has
3021 3039 * lost the pathinfo node for this target.
3022 3040 */
3023 3041 return (DDI_NOT_WELL_FORMED);
3024 3042 }
3025 3043
3026 3044 if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
3027 3045 DDI_PROP_SUCCESS) {
3028 3046 mptsas_log(mpt, CE_WARN, "Get lun property failed\n");
3029 3047 return (DDI_FAILURE);
3030 3048 }
3031 3049
3032 3050 if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
3033 3051 &psas_wwn) == MDI_SUCCESS) {
3034 3052 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
3035 3053 sas_wwn = 0;
3036 3054 }
3037 3055 (void) mdi_prop_free(psas_wwn);
3038 3056 }
3039 3057 } else {
3040 3058 lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
3041 3059 DDI_PROP_DONTPASS, LUN_PROP, 0);
3042 3060 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
3043 3061 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
3044 3062 DDI_PROP_SUCCESS) {
3045 3063 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
3046 3064 sas_wwn = 0;
3047 3065 }
3048 3066 ddi_prop_free(psas_wwn);
3049 3067 } else {
3050 3068 sas_wwn = 0;
3051 3069 }
3052 3070 }
3053 3071
3054 3072 ASSERT((sas_wwn != 0) || (phymask != 0));
3055 3073 addr.mta_wwn = sas_wwn;
3056 3074 addr.mta_phymask = phymask;
3057 3075 mutex_enter(&mpt->m_mutex);
3058 3076 ptgt = refhash_lookup(mpt->m_targets, &addr);
3059 3077 mutex_exit(&mpt->m_mutex);
3060 3078 if (ptgt == NULL) {
3061 3079 mptsas_log(mpt, CE_WARN, "!tgt_init: target doesn't exist or "
3062 3080 "gone already! phymask:%x, saswwn %"PRIx64, phymask,
3063 3081 sas_wwn);
3064 3082 return (DDI_FAILURE);
3065 3083 }
3066 3084 if (hba_tran->tran_tgt_private == NULL) {
3067 3085 tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
3068 3086 KM_SLEEP);
3069 3087 tgt_private->t_lun = lun;
3070 3088 tgt_private->t_private = ptgt;
3071 3089 hba_tran->tran_tgt_private = tgt_private;
3072 3090 }
3073 3091
3074 3092 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
3075 3093 return (DDI_SUCCESS);
3076 3094 }
3077 3095 mutex_enter(&mpt->m_mutex);
3078 3096
3079 3097 if (ptgt->m_deviceinfo &
3080 3098 (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
3081 3099 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
3082 3100 uchar_t *inq89 = NULL;
3083 3101 int inq89_len = 0x238;
3084 3102 int reallen = 0;
3085 3103 int rval = 0;
3086 3104 struct sata_id *sid = NULL;
3087 3105 char model[SATA_ID_MODEL_LEN + 1];
3088 3106 char fw[SATA_ID_FW_LEN + 1];
3089 3107 char *vid, *pid;
3090 3108 int i;
3091 3109
3092 3110 mutex_exit(&mpt->m_mutex);
3093 3111 /*
3094 3112 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
3095 3113 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
3096 3114 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
3097 3115 */
3098 3116 inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
3099 3117 rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
3100 3118 inq89, inq89_len, &reallen, 1);
3101 3119
3102 3120 if (rval != 0) {
3103 3121 if (inq89 != NULL) {
3104 3122 kmem_free(inq89, inq89_len);
3105 3123 }
3106 3124
3107 3125 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
3108 3126 "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
3109 3127 return (DDI_SUCCESS);
3110 3128 }
3111 3129 sid = (void *)(&inq89[60]);
3112 3130
3113 3131 swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
3114 3132 swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
3115 3133
3116 3134 model[SATA_ID_MODEL_LEN] = 0;
3117 3135 fw[SATA_ID_FW_LEN] = 0;
3118 3136
3119 3137 /*
3120 3138 * split model into into vid/pid
3121 3139 */
3122 3140 for (i = 0, pid = model; i < SATA_ID_MODEL_LEN; i++, pid++)
3123 3141 if ((*pid == ' ') || (*pid == '\t'))
3124 3142 break;
3125 3143 if (i < SATA_ID_MODEL_LEN) {
3126 3144 vid = model;
3127 3145 /*
3128 3146 * terminate vid, establish pid
3129 3147 */
3130 3148 *pid++ = 0;
3131 3149 } else {
3132 3150 /*
3133 3151 * vid will stay "ATA ", the rule is same
3134 3152 * as sata framework implementation.
3135 3153 */
3136 3154 vid = NULL;
3137 3155 /*
3138 3156 * model is all pid
3139 3157 */
3140 3158 pid = model;
3141 3159 }
3142 3160
3143 3161 /*
3144 3162 * override SCSA "inquiry-*" properties
3145 3163 */
3146 3164 if (vid)
3147 3165 (void) scsi_device_prop_update_inqstring(sd,
3148 3166 INQUIRY_VENDOR_ID, vid, strlen(vid));
3149 3167 if (pid)
3150 3168 (void) scsi_device_prop_update_inqstring(sd,
3151 3169 INQUIRY_PRODUCT_ID, pid, strlen(pid));
3152 3170 (void) scsi_device_prop_update_inqstring(sd,
3153 3171 INQUIRY_REVISION_ID, fw, strlen(fw));
3154 3172
3155 3173 if (inq89 != NULL) {
3156 3174 kmem_free(inq89, inq89_len);
3157 3175 }
3158 3176 } else {
3159 3177 mutex_exit(&mpt->m_mutex);
3160 3178 }
3161 3179
3162 3180 return (DDI_SUCCESS);
3163 3181 }
3164 3182 /*
3165 3183 * tran_tgt_free(9E) - target device instance deallocation
3166 3184 */
3167 3185 static void
3168 3186 mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3169 3187 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
3170 3188 {
3171 3189 #ifndef __lock_lint
3172 3190 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran, sd))
3173 3191 #endif
3174 3192
3175 3193 mptsas_tgt_private_t *tgt_private = hba_tran->tran_tgt_private;
3176 3194
3177 3195 if (tgt_private != NULL) {
3178 3196 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
3179 3197 hba_tran->tran_tgt_private = NULL;
3180 3198 }
3181 3199 }
3182 3200
3183 3201 /*
3184 3202 * scsi_pkt handling
3185 3203 *
3186 3204 * Visible to the external world via the transport structure.
3187 3205 */
3188 3206
3189 3207 /*
3190 3208 * Notes:
3191 3209 * - transport the command to the addressed SCSI target/lun device
3192 3210 * - normal operation is to schedule the command to be transported,
3193 3211 * and return TRAN_ACCEPT if this is successful.
3194 3212 * - if NO_INTR, tran_start must poll device for command completion
3195 3213 */
3196 3214 static int
3197 3215 mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
3198 3216 {
3199 3217 #ifndef __lock_lint
3200 3218 _NOTE(ARGUNUSED(ap))
3201 3219 #endif
3202 3220 mptsas_t *mpt = PKT2MPT(pkt);
3203 3221 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3204 3222 int rval;
3205 3223 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3206 3224
3207 3225 NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt));
3208 3226 ASSERT(ptgt);
3209 3227 if (ptgt == NULL)
3210 3228 return (TRAN_FATAL_ERROR);
3211 3229
3212 3230 /*
3213 3231 * prepare the pkt before taking mutex.
3214 3232 */
3215 3233 rval = mptsas_prepare_pkt(cmd);
3216 3234 if (rval != TRAN_ACCEPT) {
3217 3235 return (rval);
3218 3236 }
3219 3237
3220 3238 /*
3221 3239 * Send the command to target/lun, however your HBA requires it.
3222 3240 * If busy, return TRAN_BUSY; if there's some other formatting error
3223 3241 * in the packet, return TRAN_BADPKT; otherwise, fall through to the
3224 3242 * return of TRAN_ACCEPT.
3225 3243 *
3226 3244 * Remember that access to shared resources, including the mptsas_t
3227 3245 * data structure and the HBA hardware registers, must be protected
3228 3246 * with mutexes, here and everywhere.
3229 3247 *
3230 3248 * Also remember that at interrupt time, you'll get an argument
3231 3249 * to the interrupt handler which is a pointer to your mptsas_t
3232 3250 * structure; you'll have to remember which commands are outstanding
3233 3251 * and which scsi_pkt is the currently-running command so the
3234 3252 * interrupt handler can refer to the pkt to set completion
3235 3253 * status, call the target driver back through pkt_comp, etc.
3236 3254 *
3237 3255 * If the instance lock is held by other thread, don't spin to wait
3238 3256 * for it. Instead, queue the cmd and next time when the instance lock
3239 3257 * is not held, accept all the queued cmd. A extra tx_waitq is
3240 3258 * introduced to protect the queue.
3241 3259 *
3242 3260 * The polled cmd will not be queud and accepted as usual.
3243 3261 *
3244 3262 * Under the tx_waitq mutex, record whether a thread is draining
3245 3263 * the tx_waitq. An IO requesting thread that finds the instance
3246 3264 * mutex contended appends to the tx_waitq and while holding the
3247 3265 * tx_wait mutex, if the draining flag is not set, sets it and then
3248 3266 * proceeds to spin for the instance mutex. This scheme ensures that
3249 3267 * the last cmd in a burst be processed.
3250 3268 *
3251 3269 * we enable this feature only when the helper threads are enabled,
3252 3270 * at which we think the loads are heavy.
3253 3271 *
3254 3272 * per instance mutex m_tx_waitq_mutex is introduced to protect the
3255 3273 * m_tx_waitqtail, m_tx_waitq, m_tx_draining.
3256 3274 */
3257 3275
3258 3276 if (mpt->m_doneq_thread_n) {
3259 3277 if (mutex_tryenter(&mpt->m_mutex) != 0) {
3260 3278 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3261 3279 mutex_exit(&mpt->m_mutex);
3262 3280 } else if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3263 3281 mutex_enter(&mpt->m_mutex);
3264 3282 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3265 3283 mutex_exit(&mpt->m_mutex);
3266 3284 } else {
3267 3285 mutex_enter(&mpt->m_tx_waitq_mutex);
3268 3286 /*
3269 3287 * ptgt->m_dr_flag is protected by m_mutex or
3270 3288 * m_tx_waitq_mutex. In this case, m_tx_waitq_mutex
3271 3289 * is acquired.
3272 3290 */
3273 3291 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3274 3292 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3275 3293 /*
3276 3294 * The command should be allowed to
3277 3295 * retry by returning TRAN_BUSY to
3278 3296 * to stall the I/O's which come from
3279 3297 * scsi_vhci since the device/path is
3280 3298 * in unstable state now.
3281 3299 */
3282 3300 mutex_exit(&mpt->m_tx_waitq_mutex);
3283 3301 return (TRAN_BUSY);
3284 3302 } else {
3285 3303 /*
3286 3304 * The device is offline, just fail the
3287 3305 * command by returning
3288 3306 * TRAN_FATAL_ERROR.
3289 3307 */
3290 3308 mutex_exit(&mpt->m_tx_waitq_mutex);
3291 3309 return (TRAN_FATAL_ERROR);
3292 3310 }
3293 3311 }
3294 3312 if (mpt->m_tx_draining) {
3295 3313 cmd->cmd_flags |= CFLAG_TXQ;
3296 3314 *mpt->m_tx_waitqtail = cmd;
3297 3315 mpt->m_tx_waitqtail = &cmd->cmd_linkp;
3298 3316 mutex_exit(&mpt->m_tx_waitq_mutex);
3299 3317 } else { /* drain the queue */
3300 3318 mpt->m_tx_draining = 1;
3301 3319 mutex_exit(&mpt->m_tx_waitq_mutex);
3302 3320 mutex_enter(&mpt->m_mutex);
3303 3321 rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3304 3322 mutex_exit(&mpt->m_mutex);
3305 3323 }
3306 3324 }
3307 3325 } else {
3308 3326 mutex_enter(&mpt->m_mutex);
3309 3327 /*
3310 3328 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3311 3329 * in this case, m_mutex is acquired.
3312 3330 */
3313 3331 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3314 3332 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3315 3333 /*
3316 3334 * commands should be allowed to retry by
3317 3335 * returning TRAN_BUSY to stall the I/O's
3318 3336 * which come from scsi_vhci since the device/
3319 3337 * path is in unstable state now.
3320 3338 */
3321 3339 mutex_exit(&mpt->m_mutex);
3322 3340 return (TRAN_BUSY);
3323 3341 } else {
3324 3342 /*
3325 3343 * The device is offline, just fail the
3326 3344 * command by returning TRAN_FATAL_ERROR.
3327 3345 */
3328 3346 mutex_exit(&mpt->m_mutex);
3329 3347 return (TRAN_FATAL_ERROR);
3330 3348 }
3331 3349 }
3332 3350 rval = mptsas_accept_pkt(mpt, cmd);
3333 3351 mutex_exit(&mpt->m_mutex);
3334 3352 }
3335 3353
3336 3354 return (rval);
3337 3355 }
3338 3356
3339 3357 /*
3340 3358 * Accept all the queued cmds(if any) before accept the current one.
3341 3359 */
3342 3360 static int
3343 3361 mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3344 3362 {
3345 3363 int rval;
3346 3364 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3347 3365
3348 3366 ASSERT(mutex_owned(&mpt->m_mutex));
3349 3367 /*
3350 3368 * The call to mptsas_accept_tx_waitq() must always be performed
3351 3369 * because that is where mpt->m_tx_draining is cleared.
3352 3370 */
3353 3371 mutex_enter(&mpt->m_tx_waitq_mutex);
3354 3372 mptsas_accept_tx_waitq(mpt);
3355 3373 mutex_exit(&mpt->m_tx_waitq_mutex);
3356 3374 /*
3357 3375 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3358 3376 * in this case, m_mutex is acquired.
3359 3377 */
3360 3378 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3361 3379 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3362 3380 /*
3363 3381 * The command should be allowed to retry by returning
3364 3382 * TRAN_BUSY to stall the I/O's which come from
3365 3383 * scsi_vhci since the device/path is in unstable state
3366 3384 * now.
3367 3385 */
3368 3386 return (TRAN_BUSY);
3369 3387 } else {
3370 3388 /*
3371 3389 * The device is offline, just fail the command by
3372 3390 * return TRAN_FATAL_ERROR.
3373 3391 */
3374 3392 return (TRAN_FATAL_ERROR);
3375 3393 }
3376 3394 }
3377 3395 rval = mptsas_accept_pkt(mpt, cmd);
3378 3396
3379 3397 return (rval);
3380 3398 }
3381 3399
3382 3400 static int
3383 3401 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3384 3402 {
3385 3403 int rval = TRAN_ACCEPT;
3386 3404 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3387 3405
3388 3406 NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3389 3407
3390 3408 ASSERT(mutex_owned(&mpt->m_mutex));
3391 3409
3392 3410 if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3393 3411 rval = mptsas_prepare_pkt(cmd);
3394 3412 if (rval != TRAN_ACCEPT) {
3395 3413 cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3396 3414 return (rval);
3397 3415 }
3398 3416 }
3399 3417
3400 3418 /*
3401 3419 * reset the throttle if we were draining
3402 3420 */
3403 3421 if ((ptgt->m_t_ncmds == 0) &&
3404 3422 (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3405 3423 NDBG23(("reset throttle"));
3406 3424 ASSERT(ptgt->m_reset_delay == 0);
3407 3425 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3408 3426 }
3409 3427
3410 3428 /*
3411 3429 * If HBA is being reset, the DevHandles are being re-initialized,
3412 3430 * which means that they could be invalid even if the target is still
3413 3431 * attached. Check if being reset and if DevHandle is being
3414 3432 * re-initialized. If this is the case, return BUSY so the I/O can be
3415 3433 * retried later.
3416 3434 */
3417 3435 if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3418 3436 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
3419 3437 if (cmd->cmd_flags & CFLAG_TXQ) {
3420 3438 mptsas_doneq_add(mpt, cmd);
3421 3439 mptsas_doneq_empty(mpt);
3422 3440 return (rval);
3423 3441 } else {
3424 3442 return (TRAN_BUSY);
3425 3443 }
3426 3444 }
3427 3445
3428 3446 /*
3429 3447 * If device handle has already been invalidated, just
3430 3448 * fail the command. In theory, command from scsi_vhci
3431 3449 * client is impossible send down command with invalid
3432 3450 * devhdl since devhdl is set after path offline, target
3433 3451 * driver is not suppose to select a offlined path.
3434 3452 */
3435 3453 if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3436 3454 NDBG3(("rejecting command, it might because invalid devhdl "
3437 3455 "request."));
3438 3456 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3439 3457 if (cmd->cmd_flags & CFLAG_TXQ) {
3440 3458 mptsas_doneq_add(mpt, cmd);
3441 3459 mptsas_doneq_empty(mpt);
3442 3460 return (rval);
3443 3461 } else {
3444 3462 return (TRAN_FATAL_ERROR);
3445 3463 }
3446 3464 }
3447 3465 /*
3448 3466 * The first case is the normal case. mpt gets a command from the
3449 3467 * target driver and starts it.
3450 3468 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3451 3469 * commands is m_max_requests - 2.
3452 3470 */
3453 3471 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
3454 3472 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
3455 3473 (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3456 3474 (ptgt->m_reset_delay == 0) &&
3457 3475 (ptgt->m_t_nwait == 0) &&
3458 3476 ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3459 3477 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3460 3478 (void) mptsas_start_cmd(mpt, cmd);
3461 3479 } else {
3462 3480 mptsas_waitq_add(mpt, cmd);
3463 3481 }
3464 3482 } else {
3465 3483 /*
3466 3484 * Add this pkt to the work queue
3467 3485 */
3468 3486 mptsas_waitq_add(mpt, cmd);
3469 3487
3470 3488 if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3471 3489 (void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3472 3490
3473 3491 /*
3474 3492 * Only flush the doneq if this is not a TM
3475 3493 * cmd. For TM cmds the flushing of the
3476 3494 * doneq will be done in those routines.
3477 3495 */
3478 3496 if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
3479 3497 mptsas_doneq_empty(mpt);
3480 3498 }
3481 3499 }
3482 3500 }
3483 3501 return (rval);
3484 3502 }
3485 3503
3486 3504 int
3487 3505 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3488 3506 {
3489 3507 mptsas_slots_t *slots = mpt->m_active;
3490 3508 uint_t slot, start_rotor;
3491 3509 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3492 3510
3493 3511 ASSERT(MUTEX_HELD(&mpt->m_mutex));
3494 3512
3495 3513 /*
3496 3514 * Account for reserved TM request slot and reserved SMID of 0.
3497 3515 */
3498 3516 ASSERT(slots->m_n_normal == (mpt->m_max_requests - 2));
3499 3517
3500 3518 /*
3501 3519 * Find the next available slot, beginning at m_rotor. If no slot is
3502 3520 * available, we'll return FALSE to indicate that. This mechanism
3503 3521 * considers only the normal slots, not the reserved slot 0 nor the
3504 3522 * task management slot m_n_normal + 1. The rotor is left to point to
3505 3523 * the normal slot after the one we select, unless we select the last
3506 3524 * normal slot in which case it returns to slot 1.
3507 3525 */
3508 3526 start_rotor = slots->m_rotor;
3509 3527 do {
3510 3528 slot = slots->m_rotor++;
3511 3529 if (slots->m_rotor > slots->m_n_normal)
3512 3530 slots->m_rotor = 1;
3513 3531
3514 3532 if (slots->m_rotor == start_rotor)
3515 3533 break;
3516 3534 } while (slots->m_slot[slot] != NULL);
3517 3535
3518 3536 if (slots->m_slot[slot] != NULL)
3519 3537 return (FALSE);
3520 3538
3521 3539 ASSERT(slot != 0 && slot <= slots->m_n_normal);
3522 3540
3523 3541 cmd->cmd_slot = slot;
3524 3542 slots->m_slot[slot] = cmd;
3525 3543 mpt->m_ncmds++;
3526 3544
3527 3545 /*
3528 3546 * only increment per target ncmds if this is not a
3529 3547 * command that has no target associated with it (i.e. a
3530 3548 * event acknoledgment)
3531 3549 */
3532 3550 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3533 3551 /*
3534 3552 * Expiration time is set in mptsas_start_cmd
3535 3553 */
3536 3554 ptgt->m_t_ncmds++;
3537 3555 cmd->cmd_active_expiration = 0;
3538 3556 } else {
3539 3557 /*
3540 3558 * Initialize expiration time for passthrough commands,
3541 3559 */
3542 3560 cmd->cmd_active_expiration = gethrtime() +
3543 3561 (hrtime_t)cmd->cmd_pkt->pkt_time * NANOSEC;
3544 3562 }
3545 3563 return (TRUE);
3546 3564 }
3547 3565
3548 3566 /*
3549 3567 * prepare the pkt:
3550 3568 * the pkt may have been resubmitted or just reused so
3551 3569 * initialize some fields and do some checks.
3552 3570 */
3553 3571 static int
3554 3572 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3555 3573 {
3556 3574 struct scsi_pkt *pkt = CMD2PKT(cmd);
3557 3575
3558 3576 NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3559 3577
3560 3578 /*
3561 3579 * Reinitialize some fields that need it; the packet may
3562 3580 * have been resubmitted
3563 3581 */
3564 3582 pkt->pkt_reason = CMD_CMPLT;
3565 3583 pkt->pkt_state = 0;
3566 3584 pkt->pkt_statistics = 0;
3567 3585 pkt->pkt_resid = 0;
3568 3586 cmd->cmd_age = 0;
3569 3587 cmd->cmd_pkt_flags = pkt->pkt_flags;
3570 3588
3571 3589 /*
3572 3590 * zero status byte.
3573 3591 */
3574 3592 *(pkt->pkt_scbp) = 0;
3575 3593
3576 3594 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3577 3595 pkt->pkt_resid = cmd->cmd_dmacount;
3578 3596
3579 3597 /*
3580 3598 * consistent packets need to be sync'ed first
3581 3599 * (only for data going out)
3582 3600 */
3583 3601 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
3584 3602 (cmd->cmd_flags & CFLAG_DMASEND)) {
3585 3603 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
3586 3604 DDI_DMA_SYNC_FORDEV);
3587 3605 }
3588 3606 }
3589 3607
3590 3608 cmd->cmd_flags =
3591 3609 (cmd->cmd_flags & ~(CFLAG_TRANFLAG)) |
3592 3610 CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
3593 3611
3594 3612 return (TRAN_ACCEPT);
3595 3613 }
3596 3614
3597 3615 /*
3598 3616 * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3599 3617 *
3600 3618 * One of three possibilities:
3601 3619 * - allocate scsi_pkt
3602 3620 * - allocate scsi_pkt and DMA resources
3603 3621 * - allocate DMA resources to an already-allocated pkt
3604 3622 */
3605 3623 static struct scsi_pkt *
3606 3624 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3607 3625 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3608 3626 int (*callback)(), caddr_t arg)
3609 3627 {
3610 3628 mptsas_cmd_t *cmd, *new_cmd;
3611 3629 mptsas_t *mpt = ADDR2MPT(ap);
3612 3630 int failure = 1;
3613 3631 uint_t oldcookiec;
3614 3632 mptsas_target_t *ptgt = NULL;
3615 3633 int rval;
3616 3634 mptsas_tgt_private_t *tgt_private;
3617 3635 int kf;
3618 3636
3619 3637 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3620 3638
3621 3639 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3622 3640 tran_tgt_private;
3623 3641 ASSERT(tgt_private != NULL);
3624 3642 if (tgt_private == NULL) {
3625 3643 return (NULL);
3626 3644 }
3627 3645 ptgt = tgt_private->t_private;
3628 3646 ASSERT(ptgt != NULL);
3629 3647 if (ptgt == NULL)
3630 3648 return (NULL);
3631 3649 ap->a_target = ptgt->m_devhdl;
3632 3650 ap->a_lun = tgt_private->t_lun;
3633 3651
3634 3652 ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3635 3653 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3636 3654 statuslen *= 100; tgtlen *= 4;
3637 3655 #endif
3638 3656 NDBG3(("mptsas_scsi_init_pkt:\n"
3639 3657 "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3640 3658 ap->a_target, (void *)pkt, (void *)bp,
3641 3659 cmdlen, statuslen, tgtlen, flags));
3642 3660
3643 3661 /*
3644 3662 * Allocate the new packet.
3645 3663 */
3646 3664 if (pkt == NULL) {
3647 3665 ddi_dma_handle_t save_dma_handle;
3648 3666
3649 3667 cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3650 3668
3651 3669 if (cmd) {
3652 3670 save_dma_handle = cmd->cmd_dmahandle;
3653 3671 bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3654 3672 cmd->cmd_dmahandle = save_dma_handle;
3655 3673
3656 3674 pkt = (void *)((uchar_t *)cmd +
3657 3675 sizeof (struct mptsas_cmd));
3658 3676 pkt->pkt_ha_private = (opaque_t)cmd;
3659 3677 pkt->pkt_address = *ap;
3660 3678 pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3661 3679 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3662 3680 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3663 3681 cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3664 3682 cmd->cmd_cdblen = (uchar_t)cmdlen;
3665 3683 cmd->cmd_scblen = statuslen;
3666 3684 cmd->cmd_rqslen = SENSE_LENGTH;
3667 3685 cmd->cmd_tgt_addr = ptgt;
3668 3686 failure = 0;
3669 3687 }
3670 3688
3671 3689 if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
3672 3690 (tgtlen > PKT_PRIV_LEN) ||
3673 3691 (statuslen > EXTCMDS_STATUS_SIZE)) {
3674 3692 if (failure == 0) {
3675 3693 /*
3676 3694 * if extern alloc fails, all will be
3677 3695 * deallocated, including cmd
3678 3696 */
3679 3697 failure = mptsas_pkt_alloc_extern(mpt, cmd,
3680 3698 cmdlen, tgtlen, statuslen, kf);
3681 3699 }
3682 3700 if (failure) {
3683 3701 /*
3684 3702 * if extern allocation fails, it will
3685 3703 * deallocate the new pkt as well
3686 3704 */
3687 3705 return (NULL);
3688 3706 }
3689 3707 }
3690 3708 new_cmd = cmd;
3691 3709
3692 3710 } else {
3693 3711 cmd = PKT2CMD(pkt);
3694 3712 new_cmd = NULL;
3695 3713 }
3696 3714
3697 3715
3698 3716 /* grab cmd->cmd_cookiec here as oldcookiec */
3699 3717
3700 3718 oldcookiec = cmd->cmd_cookiec;
3701 3719
3702 3720 /*
3703 3721 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3704 3722 * greater than 0 and we'll need to grab the next dma window
3705 3723 */
3706 3724 /*
3707 3725 * SLM-not doing extra command frame right now; may add later
3708 3726 */
3709 3727
3710 3728 if (cmd->cmd_nwin > 0) {
3711 3729
3712 3730 /*
3713 3731 * Make sure we havn't gone past the the total number
3714 3732 * of windows
3715 3733 */
3716 3734 if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3717 3735 return (NULL);
3718 3736 }
3719 3737 if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3720 3738 &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
3721 3739 &cmd->cmd_cookie, &cmd->cmd_cookiec) == DDI_FAILURE) {
3722 3740 return (NULL);
3723 3741 }
3724 3742 goto get_dma_cookies;
3725 3743 }
3726 3744
3727 3745
3728 3746 if (flags & PKT_XARQ) {
3729 3747 cmd->cmd_flags |= CFLAG_XARQ;
3730 3748 }
3731 3749
3732 3750 /*
3733 3751 * DMA resource allocation. This version assumes your
3734 3752 * HBA has some sort of bus-mastering or onboard DMA capability, with a
3735 3753 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
3736 3754 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
3737 3755 */
3738 3756 if (bp && (bp->b_bcount != 0) &&
3739 3757 (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
3740 3758
3741 3759 int cnt, dma_flags;
3742 3760 mptti_t *dmap; /* ptr to the S/G list */
3743 3761
3744 3762 /*
3745 3763 * Set up DMA memory and position to the next DMA segment.
3746 3764 */
3747 3765 ASSERT(cmd->cmd_dmahandle != NULL);
3748 3766
3749 3767 if (bp->b_flags & B_READ) {
3750 3768 dma_flags = DDI_DMA_READ;
3751 3769 cmd->cmd_flags &= ~CFLAG_DMASEND;
3752 3770 } else {
3753 3771 dma_flags = DDI_DMA_WRITE;
3754 3772 cmd->cmd_flags |= CFLAG_DMASEND;
3755 3773 }
3756 3774 if (flags & PKT_CONSISTENT) {
3757 3775 cmd->cmd_flags |= CFLAG_CMDIOPB;
3758 3776 dma_flags |= DDI_DMA_CONSISTENT;
3759 3777 }
3760 3778
3761 3779 if (flags & PKT_DMA_PARTIAL) {
3762 3780 dma_flags |= DDI_DMA_PARTIAL;
3763 3781 }
3764 3782
3765 3783 /*
3766 3784 * workaround for byte hole issue on psycho and
3767 3785 * schizo pre 2.1
3768 3786 */
3769 3787 if ((bp->b_flags & B_READ) && ((bp->b_flags &
3770 3788 (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
3771 3789 ((uintptr_t)bp->b_un.b_addr & 0x7)) {
3772 3790 dma_flags |= DDI_DMA_CONSISTENT;
3773 3791 }
3774 3792
3775 3793 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
3776 3794 dma_flags, callback, arg,
3777 3795 &cmd->cmd_cookie, &cmd->cmd_cookiec);
3778 3796 if (rval == DDI_DMA_PARTIAL_MAP) {
3779 3797 (void) ddi_dma_numwin(cmd->cmd_dmahandle,
3780 3798 &cmd->cmd_nwin);
3781 3799 cmd->cmd_winindex = 0;
3782 3800 (void) ddi_dma_getwin(cmd->cmd_dmahandle,
3783 3801 cmd->cmd_winindex, &cmd->cmd_dma_offset,
3784 3802 &cmd->cmd_dma_len, &cmd->cmd_cookie,
3785 3803 &cmd->cmd_cookiec);
3786 3804 } else if (rval && (rval != DDI_DMA_MAPPED)) {
3787 3805 switch (rval) {
3788 3806 case DDI_DMA_NORESOURCES:
3789 3807 bioerror(bp, 0);
3790 3808 break;
3791 3809 case DDI_DMA_BADATTR:
3792 3810 case DDI_DMA_NOMAPPING:
3793 3811 bioerror(bp, EFAULT);
3794 3812 break;
3795 3813 case DDI_DMA_TOOBIG:
3796 3814 default:
3797 3815 bioerror(bp, EINVAL);
3798 3816 break;
3799 3817 }
3800 3818 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3801 3819 if (new_cmd) {
3802 3820 mptsas_scsi_destroy_pkt(ap, pkt);
3803 3821 }
3804 3822 return ((struct scsi_pkt *)NULL);
3805 3823 }
3806 3824
3807 3825 get_dma_cookies:
3808 3826 cmd->cmd_flags |= CFLAG_DMAVALID;
3809 3827 ASSERT(cmd->cmd_cookiec > 0);
3810 3828
3811 3829 if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
3812 3830 mptsas_log(mpt, CE_NOTE, "large cookiec received %d\n",
3813 3831 cmd->cmd_cookiec);
3814 3832 bioerror(bp, EINVAL);
3815 3833 if (new_cmd) {
3816 3834 mptsas_scsi_destroy_pkt(ap, pkt);
3817 3835 }
3818 3836 return ((struct scsi_pkt *)NULL);
3819 3837 }
3820 3838
3821 3839 /*
3822 3840 * Allocate extra SGL buffer if needed.
3823 3841 */
3824 3842 if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
3825 3843 (cmd->cmd_extra_frames == NULL)) {
3826 3844 if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
3827 3845 DDI_FAILURE) {
3828 3846 mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
3829 3847 "failed");
3830 3848 bioerror(bp, ENOMEM);
3831 3849 if (new_cmd) {
3832 3850 mptsas_scsi_destroy_pkt(ap, pkt);
3833 3851 }
3834 3852 return ((struct scsi_pkt *)NULL);
3835 3853 }
3836 3854 }
3837 3855
3838 3856 /*
3839 3857 * Always use scatter-gather transfer
3840 3858 * Use the loop below to store physical addresses of
3841 3859 * DMA segments, from the DMA cookies, into your HBA's
3842 3860 * scatter-gather list.
3843 3861 * We need to ensure we have enough kmem alloc'd
3844 3862 * for the sg entries since we are no longer using an
3845 3863 * array inside mptsas_cmd_t.
3846 3864 *
3847 3865 * We check cmd->cmd_cookiec against oldcookiec so
3848 3866 * the scatter-gather list is correctly allocated
3849 3867 */
3850 3868
3851 3869 if (oldcookiec != cmd->cmd_cookiec) {
3852 3870 if (cmd->cmd_sg != (mptti_t *)NULL) {
3853 3871 kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
3854 3872 oldcookiec);
3855 3873 cmd->cmd_sg = NULL;
3856 3874 }
3857 3875 }
3858 3876
3859 3877 if (cmd->cmd_sg == (mptti_t *)NULL) {
3860 3878 cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3861 3879 cmd->cmd_cookiec), kf);
3862 3880
3863 3881 if (cmd->cmd_sg == (mptti_t *)NULL) {
3864 3882 mptsas_log(mpt, CE_WARN,
3865 3883 "unable to kmem_alloc enough memory "
3866 3884 "for scatter/gather list");
3867 3885 /*
3868 3886 * if we have an ENOMEM condition we need to behave
3869 3887 * the same way as the rest of this routine
3870 3888 */
3871 3889
3872 3890 bioerror(bp, ENOMEM);
3873 3891 if (new_cmd) {
3874 3892 mptsas_scsi_destroy_pkt(ap, pkt);
3875 3893 }
3876 3894 return ((struct scsi_pkt *)NULL);
3877 3895 }
3878 3896 }
3879 3897
3880 3898 dmap = cmd->cmd_sg;
3881 3899
3882 3900 ASSERT(cmd->cmd_cookie.dmac_size != 0);
3883 3901
3884 3902 /*
3885 3903 * store the first segment into the S/G list
3886 3904 */
3887 3905 dmap->count = cmd->cmd_cookie.dmac_size;
3888 3906 dmap->addr.address64.Low = (uint32_t)
3889 3907 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3890 3908 dmap->addr.address64.High = (uint32_t)
3891 3909 (cmd->cmd_cookie.dmac_laddress >> 32);
3892 3910
3893 3911 /*
3894 3912 * dmacount counts the size of the dma for this window
3895 3913 * (if partial dma is being used). totaldmacount
3896 3914 * keeps track of the total amount of dma we have
3897 3915 * transferred for all the windows (needed to calculate
3898 3916 * the resid value below).
3899 3917 */
3900 3918 cmd->cmd_dmacount = cmd->cmd_cookie.dmac_size;
3901 3919 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3902 3920
3903 3921 /*
3904 3922 * We already stored the first DMA scatter gather segment,
3905 3923 * start at 1 if we need to store more.
3906 3924 */
3907 3925 for (cnt = 1; cnt < cmd->cmd_cookiec; cnt++) {
3908 3926 /*
3909 3927 * Get next DMA cookie
3910 3928 */
3911 3929 ddi_dma_nextcookie(cmd->cmd_dmahandle,
3912 3930 &cmd->cmd_cookie);
3913 3931 dmap++;
3914 3932
3915 3933 cmd->cmd_dmacount += cmd->cmd_cookie.dmac_size;
3916 3934 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3917 3935
3918 3936 /*
3919 3937 * store the segment parms into the S/G list
3920 3938 */
3921 3939 dmap->count = cmd->cmd_cookie.dmac_size;
3922 3940 dmap->addr.address64.Low = (uint32_t)
3923 3941 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3924 3942 dmap->addr.address64.High = (uint32_t)
3925 3943 (cmd->cmd_cookie.dmac_laddress >> 32);
3926 3944 }
3927 3945
3928 3946 /*
3929 3947 * If this was partially allocated we set the resid
3930 3948 * the amount of data NOT transferred in this window
3931 3949 * If there is only one window, the resid will be 0
3932 3950 */
3933 3951 pkt->pkt_resid = (bp->b_bcount - cmd->cmd_totaldmacount);
3934 3952 NDBG3(("mptsas_scsi_init_pkt: cmd_dmacount=%d.",
3935 3953 cmd->cmd_dmacount));
3936 3954 }
3937 3955 return (pkt);
3938 3956 }
3939 3957
3940 3958 /*
3941 3959 * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
3942 3960 *
3943 3961 * Notes:
3944 3962 * - also frees DMA resources if allocated
3945 3963 * - implicit DMA synchonization
3946 3964 */
3947 3965 static void
3948 3966 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
3949 3967 {
3950 3968 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3951 3969 mptsas_t *mpt = ADDR2MPT(ap);
3952 3970
3953 3971 NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
3954 3972 ap->a_target, (void *)pkt));
3955 3973
3956 3974 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3957 3975 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
3958 3976 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3959 3977 }
3960 3978
3961 3979 if (cmd->cmd_sg) {
3962 3980 kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
3963 3981 cmd->cmd_sg = NULL;
3964 3982 }
3965 3983
3966 3984 mptsas_free_extra_sgl_frame(mpt, cmd);
3967 3985
3968 3986 if ((cmd->cmd_flags &
3969 3987 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
3970 3988 CFLAG_SCBEXTERN)) == 0) {
3971 3989 cmd->cmd_flags = CFLAG_FREE;
3972 3990 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
3973 3991 } else {
3974 3992 mptsas_pkt_destroy_extern(mpt, cmd);
3975 3993 }
3976 3994 }
3977 3995
3978 3996 /*
3979 3997 * kmem cache constructor and destructor:
3980 3998 * When constructing, we bzero the cmd and allocate the dma handle
3981 3999 * When destructing, just free the dma handle
3982 4000 */
3983 4001 static int
3984 4002 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
3985 4003 {
3986 4004 mptsas_cmd_t *cmd = buf;
3987 4005 mptsas_t *mpt = cdrarg;
3988 4006 int (*callback)(caddr_t);
3989 4007
3990 4008 callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3991 4009
3992 4010 NDBG4(("mptsas_kmem_cache_constructor"));
3993 4011
3994 4012 /*
3995 4013 * allocate a dma handle
3996 4014 */
3997 4015 if ((ddi_dma_alloc_handle(mpt->m_dip, &mpt->m_io_dma_attr, callback,
3998 4016 NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
3999 4017 cmd->cmd_dmahandle = NULL;
4000 4018 return (-1);
4001 4019 }
4002 4020 return (0);
4003 4021 }
4004 4022
4005 4023 static void
4006 4024 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
4007 4025 {
4008 4026 #ifndef __lock_lint
4009 4027 _NOTE(ARGUNUSED(cdrarg))
4010 4028 #endif
4011 4029 mptsas_cmd_t *cmd = buf;
4012 4030
4013 4031 NDBG4(("mptsas_kmem_cache_destructor"));
4014 4032
4015 4033 if (cmd->cmd_dmahandle) {
4016 4034 ddi_dma_free_handle(&cmd->cmd_dmahandle);
4017 4035 cmd->cmd_dmahandle = NULL;
4018 4036 }
4019 4037 }
4020 4038
4021 4039 static int
4022 4040 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
4023 4041 {
4024 4042 mptsas_cache_frames_t *p = buf;
4025 4043 mptsas_t *mpt = cdrarg;
4026 4044 ddi_dma_attr_t frame_dma_attr;
4027 4045 size_t mem_size, alloc_len;
4028 4046 ddi_dma_cookie_t cookie;
4029 4047 uint_t ncookie;
4030 4048 int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
4031 4049 ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
4032 4050
4033 4051 frame_dma_attr = mpt->m_msg_dma_attr;
4034 4052 frame_dma_attr.dma_attr_align = 0x10;
4035 4053 frame_dma_attr.dma_attr_sgllen = 1;
4036 4054
4037 4055 if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
4038 4056 &p->m_dma_hdl) != DDI_SUCCESS) {
4039 4057 mptsas_log(mpt, CE_WARN, "Unable to allocate dma handle for"
4040 4058 " extra SGL.");
4041 4059 return (DDI_FAILURE);
4042 4060 }
4043 4061
4044 4062 mem_size = (mpt->m_max_request_frames - 1) * mpt->m_req_frame_size;
4045 4063
4046 4064 if (ddi_dma_mem_alloc(p->m_dma_hdl, mem_size, &mpt->m_dev_acc_attr,
4047 4065 DDI_DMA_CONSISTENT, callback, NULL, (caddr_t *)&p->m_frames_addr,
4048 4066 &alloc_len, &p->m_acc_hdl) != DDI_SUCCESS) {
4049 4067 ddi_dma_free_handle(&p->m_dma_hdl);
4050 4068 p->m_dma_hdl = NULL;
4051 4069 mptsas_log(mpt, CE_WARN, "Unable to allocate dma memory for"
4052 4070 " extra SGL.");
4053 4071 return (DDI_FAILURE);
4054 4072 }
4055 4073
4056 4074 if (ddi_dma_addr_bind_handle(p->m_dma_hdl, NULL, p->m_frames_addr,
4057 4075 alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
4058 4076 &cookie, &ncookie) != DDI_DMA_MAPPED) {
4059 4077 (void) ddi_dma_mem_free(&p->m_acc_hdl);
4060 4078 ddi_dma_free_handle(&p->m_dma_hdl);
4061 4079 p->m_dma_hdl = NULL;
4062 4080 mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources for"
4063 4081 " extra SGL");
4064 4082 return (DDI_FAILURE);
4065 4083 }
4066 4084
4067 4085 /*
4068 4086 * Store the SGL memory address. This chip uses this
4069 4087 * address to dma to and from the driver. The second
4070 4088 * address is the address mpt uses to fill in the SGL.
4071 4089 */
4072 4090 p->m_phys_addr = cookie.dmac_laddress;
4073 4091
4074 4092 return (DDI_SUCCESS);
4075 4093 }
4076 4094
4077 4095 static void
4078 4096 mptsas_cache_frames_destructor(void *buf, void *cdrarg)
4079 4097 {
4080 4098 #ifndef __lock_lint
4081 4099 _NOTE(ARGUNUSED(cdrarg))
4082 4100 #endif
4083 4101 mptsas_cache_frames_t *p = buf;
4084 4102 if (p->m_dma_hdl != NULL) {
4085 4103 (void) ddi_dma_unbind_handle(p->m_dma_hdl);
4086 4104 (void) ddi_dma_mem_free(&p->m_acc_hdl);
4087 4105 ddi_dma_free_handle(&p->m_dma_hdl);
4088 4106 p->m_phys_addr = NULL;
4089 4107 p->m_frames_addr = NULL;
4090 4108 p->m_dma_hdl = NULL;
4091 4109 p->m_acc_hdl = NULL;
4092 4110 }
4093 4111
4094 4112 }
4095 4113
4096 4114 /*
4097 4115 * Figure out if we need to use a different method for the request
4098 4116 * sense buffer and allocate from the map if necessary.
4099 4117 */
4100 4118 static boolean_t
4101 4119 mptsas_cmdarqsize(mptsas_t *mpt, mptsas_cmd_t *cmd, size_t senselength, int kf)
4102 4120 {
4103 4121 if (senselength > mpt->m_req_sense_size) {
4104 4122 unsigned long i;
4105 4123
4106 4124 /* Sense length is limited to an 8 bit value in MPI Spec. */
4107 4125 if (senselength > 255)
4108 4126 senselength = 255;
4109 4127 cmd->cmd_extrqschunks = (senselength +
4110 4128 (mpt->m_req_sense_size - 1))/mpt->m_req_sense_size;
4111 4129 i = (kf == KM_SLEEP ? rmalloc_wait : rmalloc)
4112 4130 (mpt->m_erqsense_map, cmd->cmd_extrqschunks);
4113 4131
4114 4132 if (i == 0)
4115 4133 return (B_FALSE);
4116 4134
4117 4135 cmd->cmd_extrqslen = (uint16_t)senselength;
4118 4136 cmd->cmd_extrqsidx = i - 1;
4119 4137 cmd->cmd_arq_buf = mpt->m_extreq_sense +
4120 4138 (cmd->cmd_extrqsidx * mpt->m_req_sense_size);
4121 4139 } else {
4122 4140 cmd->cmd_rqslen = (uchar_t)senselength;
4123 4141 }
4124 4142
4125 4143 return (B_TRUE);
4126 4144 }
4127 4145
4128 4146 /*
4129 4147 * allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
4130 4148 * for non-standard length cdb, pkt_private, status areas
4131 4149 * if allocation fails, then deallocate all external space and the pkt
4132 4150 */
4133 4151 /* ARGSUSED */
4134 4152 static int
4135 4153 mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
4136 4154 int cmdlen, int tgtlen, int statuslen, int kf)
4137 4155 {
4138 4156 caddr_t cdbp, scbp, tgt;
4139 4157
4140 4158 NDBG3(("mptsas_pkt_alloc_extern: "
4141 4159 "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
4142 4160 (void *)cmd, cmdlen, tgtlen, statuslen, kf));
4143 4161
4144 4162 tgt = cdbp = scbp = NULL;
4145 4163 cmd->cmd_scblen = statuslen;
4146 4164 cmd->cmd_privlen = (uchar_t)tgtlen;
4147 4165
4148 4166 if (cmdlen > sizeof (cmd->cmd_cdb)) {
4149 4167 if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
4150 4168 goto fail;
4151 4169 }
4152 4170 cmd->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
4153 4171 cmd->cmd_flags |= CFLAG_CDBEXTERN;
4154 4172 }
4155 4173 if (tgtlen > PKT_PRIV_LEN) {
4156 4174 if ((tgt = kmem_zalloc((size_t)tgtlen, kf)) == NULL) {
4157 4175 goto fail;
4158 4176 }
4159 4177 cmd->cmd_flags |= CFLAG_PRIVEXTERN;
4160 4178 cmd->cmd_pkt->pkt_private = tgt;
4161 4179 }
4162 4180 if (statuslen > EXTCMDS_STATUS_SIZE) {
4163 4181 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
4164 4182 goto fail;
4165 4183 }
4166 4184 cmd->cmd_flags |= CFLAG_SCBEXTERN;
4167 4185 cmd->cmd_pkt->pkt_scbp = (opaque_t)scbp;
4168 4186
4169 4187 /* allocate sense data buf for DMA */
4170 4188 if (mptsas_cmdarqsize(mpt, cmd, statuslen -
4171 4189 MPTSAS_GET_ITEM_OFF(struct scsi_arq_status, sts_sensedata),
4172 4190 kf) == B_FALSE)
4173 4191 goto fail;
4174 4192 }
4175 4193 return (0);
4176 4194 fail:
4177 4195 mptsas_pkt_destroy_extern(mpt, cmd);
4178 4196 return (1);
4179 4197 }
4180 4198
4181 4199 /*
4182 4200 * deallocate external pkt space and deallocate the pkt
4183 4201 */
4184 4202 static void
4185 4203 mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd)
4186 4204 {
4187 4205 NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd));
4188 4206
4189 4207 if (cmd->cmd_flags & CFLAG_FREE) {
4190 4208 mptsas_log(mpt, CE_PANIC,
4191 4209 "mptsas_pkt_destroy_extern: freeing free packet");
4192 4210 _NOTE(NOT_REACHED)
4193 4211 /* NOTREACHED */
4194 4212 }
4195 4213 if (cmd->cmd_extrqslen != 0) {
4196 4214 rmfree(mpt->m_erqsense_map, cmd->cmd_extrqschunks,
4197 4215 cmd->cmd_extrqsidx + 1);
4198 4216 }
4199 4217 if (cmd->cmd_flags & CFLAG_CDBEXTERN) {
4200 4218 kmem_free(cmd->cmd_pkt->pkt_cdbp, (size_t)cmd->cmd_cdblen);
4201 4219 }
4202 4220 if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
4203 4221 kmem_free(cmd->cmd_pkt->pkt_scbp, (size_t)cmd->cmd_scblen);
4204 4222 }
4205 4223 if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
4206 4224 kmem_free(cmd->cmd_pkt->pkt_private, (size_t)cmd->cmd_privlen);
4207 4225 }
4208 4226 cmd->cmd_flags = CFLAG_FREE;
4209 4227 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4210 4228 }
4211 4229
4212 4230 /*
4213 4231 * tran_sync_pkt(9E) - explicit DMA synchronization
4214 4232 */
4215 4233 /*ARGSUSED*/
4216 4234 static void
4217 4235 mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4218 4236 {
4219 4237 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4220 4238
4221 4239 NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
4222 4240 ap->a_target, (void *)pkt));
4223 4241
4224 4242 if (cmd->cmd_dmahandle) {
4225 4243 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4226 4244 (cmd->cmd_flags & CFLAG_DMASEND) ?
4227 4245 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
4228 4246 }
4229 4247 }
4230 4248
4231 4249 /*
4232 4250 * tran_dmafree(9E) - deallocate DMA resources allocated for command
4233 4251 */
4234 4252 /*ARGSUSED*/
4235 4253 static void
4236 4254 mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
4237 4255 {
4238 4256 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4239 4257 mptsas_t *mpt = ADDR2MPT(ap);
4240 4258
4241 4259 NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
4242 4260 ap->a_target, (void *)pkt));
4243 4261
4244 4262 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4245 4263 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4246 4264 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4247 4265 }
4248 4266
4249 4267 mptsas_free_extra_sgl_frame(mpt, cmd);
4250 4268 }
4251 4269
4252 4270 static void
4253 4271 mptsas_pkt_comp(struct scsi_pkt *pkt, mptsas_cmd_t *cmd)
4254 4272 {
4255 4273 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
4256 4274 (!(cmd->cmd_flags & CFLAG_DMASEND))) {
4257 4275 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4258 4276 DDI_DMA_SYNC_FORCPU);
4259 4277 }
4260 4278 (*pkt->pkt_comp)(pkt);
4261 4279 }
4262 4280
4263 4281 static void
4264 4282 mptsas_sge_mainframe(mptsas_cmd_t *cmd, pMpi2SCSIIORequest_t frame,
4265 4283 ddi_acc_handle_t acc_hdl, uint_t cookiec, uint32_t end_flags)
4266 4284 {
4267 4285 pMpi2SGESimple64_t sge;
4268 4286 mptti_t *dmap;
4269 4287 uint32_t flags;
4270 4288
4271 4289 dmap = cmd->cmd_sg;
4272 4290
4273 4291 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4274 4292 while (cookiec--) {
4275 4293 ddi_put32(acc_hdl,
4276 4294 &sge->Address.Low, dmap->addr.address64.Low);
4277 4295 ddi_put32(acc_hdl,
4278 4296 &sge->Address.High, dmap->addr.address64.High);
4279 4297 ddi_put32(acc_hdl, &sge->FlagsLength,
4280 4298 dmap->count);
4281 4299 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4282 4300 flags |= ((uint32_t)
4283 4301 (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4284 4302 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4285 4303 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4286 4304 MPI2_SGE_FLAGS_SHIFT);
4287 4305
4288 4306 /*
4289 4307 * If this is the last cookie, we set the flags
4290 4308 * to indicate so
4291 4309 */
4292 4310 if (cookiec == 0) {
4293 4311 flags |= end_flags;
4294 4312 }
4295 4313 if (cmd->cmd_flags & CFLAG_DMASEND) {
4296 4314 flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4297 4315 MPI2_SGE_FLAGS_SHIFT);
4298 4316 } else {
4299 4317 flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4300 4318 MPI2_SGE_FLAGS_SHIFT);
4301 4319 }
4302 4320 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4303 4321 dmap++;
4304 4322 sge++;
4305 4323 }
4306 4324 }
4307 4325
4308 4326 static void
4309 4327 mptsas_sge_chain(mptsas_t *mpt, mptsas_cmd_t *cmd,
4310 4328 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4311 4329 {
4312 4330 pMpi2SGESimple64_t sge;
4313 4331 pMpi2SGEChain64_t sgechain;
4314 4332 uint64_t nframe_phys_addr;
4315 4333 uint_t cookiec;
4316 4334 mptti_t *dmap;
4317 4335 uint32_t flags;
4318 4336
4319 4337 /*
4320 4338 * Save the number of entries in the DMA
4321 4339 * Scatter/Gather list
4322 4340 */
4323 4341 cookiec = cmd->cmd_cookiec;
4324 4342
4325 4343 /*
4326 4344 * Hereby we start to deal with multiple frames.
4327 4345 * The process is as follows:
4328 4346 * 1. Determine how many frames are needed for SGL element
4329 4347 * storage; Note that all frames are stored in contiguous
4330 4348 * memory space and in 64-bit DMA mode each element is
4331 4349 * 3 double-words (12 bytes) long.
4332 4350 * 2. Fill up the main frame. We need to do this separately
4333 4351 * since it contains the SCSI IO request header and needs
4334 4352 * dedicated processing. Note that the last 4 double-words
4335 4353 * of the SCSI IO header is for SGL element storage
4336 4354 * (MPI2_SGE_IO_UNION).
4337 4355 * 3. Fill the chain element in the main frame, so the DMA
4338 4356 * engine can use the following frames.
4339 4357 * 4. Enter a loop to fill the remaining frames. Note that the
4340 4358 * last frame contains no chain element. The remaining
4341 4359 * frames go into the mpt SGL buffer allocated on the fly,
4342 4360 * not immediately following the main message frame, as in
4343 4361 * Gen1.
4344 4362 * Some restrictions:
4345 4363 * 1. For 64-bit DMA, the simple element and chain element
4346 4364 * are both of 3 double-words (12 bytes) in size, even
4347 4365 * though all frames are stored in the first 4G of mem
4348 4366 * range and the higher 32-bits of the address are always 0.
4349 4367 * 2. On some controllers (like the 1064/1068), a frame can
4350 4368 * hold SGL elements with the last 1 or 2 double-words
4351 4369 * (4 or 8 bytes) un-used. On these controllers, we should
4352 4370 * recognize that there's not enough room for another SGL
4353 4371 * element and move the sge pointer to the next frame.
4354 4372 */
4355 4373 int i, j, k, l, frames, sgemax;
4356 4374 int temp;
4357 4375 uint8_t chainflags;
4358 4376 uint16_t chainlength;
4359 4377 mptsas_cache_frames_t *p;
4360 4378
4361 4379 /*
4362 4380 * Sgemax is the number of SGE's that will fit
4363 4381 * each extra frame and frames is total
4364 4382 * number of frames we'll need. 1 sge entry per
4365 4383 * frame is reseverd for the chain element thus the -1 below.
4366 4384 */
4367 4385 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64))
4368 4386 - 1);
4369 4387 temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4370 4388
4371 4389 /*
4372 4390 * A little check to see if we need to round up the number
4373 4391 * of frames we need
4374 4392 */
4375 4393 if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4376 4394 sgemax) > 1) {
4377 4395 frames = (temp + 1);
4378 4396 } else {
4379 4397 frames = temp;
4380 4398 }
4381 4399 dmap = cmd->cmd_sg;
4382 4400 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4383 4401
4384 4402 /*
4385 4403 * First fill in the main frame
4386 4404 */
4387 4405 j = MPTSAS_MAX_FRAME_SGES64(mpt) - 1;
4388 4406 mptsas_sge_mainframe(cmd, frame, acc_hdl, j,
4389 4407 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4390 4408 MPI2_SGE_FLAGS_SHIFT));
4391 4409 dmap += j;
4392 4410 sge += j;
4393 4411 j++;
4394 4412
4395 4413 /*
4396 4414 * Fill in the chain element in the main frame.
4397 4415 * About calculation on ChainOffset:
4398 4416 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4399 4417 * in the end reserved for SGL element storage
4400 4418 * (MPI2_SGE_IO_UNION); we should count it in our
4401 4419 * calculation. See its definition in the header file.
4402 4420 * 2. Constant j is the counter of the current SGL element
4403 4421 * that will be processed, and (j - 1) is the number of
4404 4422 * SGL elements that have been processed (stored in the
4405 4423 * main frame).
4406 4424 * 3. ChainOffset value should be in units of double-words (4
4407 4425 * bytes) so the last value should be divided by 4.
4408 4426 */
4409 4427 ddi_put8(acc_hdl, &frame->ChainOffset,
4410 4428 (sizeof (MPI2_SCSI_IO_REQUEST) -
4411 4429 sizeof (MPI2_SGE_IO_UNION) +
4412 4430 (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4413 4431 sgechain = (pMpi2SGEChain64_t)sge;
4414 4432 chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4415 4433 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4416 4434 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4417 4435 ddi_put8(acc_hdl, &sgechain->Flags, chainflags);
4418 4436
4419 4437 /*
4420 4438 * The size of the next frame is the accurate size of space
4421 4439 * (in bytes) used to store the SGL elements. j is the counter
4422 4440 * of SGL elements. (j - 1) is the number of SGL elements that
4423 4441 * have been processed (stored in frames).
4424 4442 */
4425 4443 if (frames >= 2) {
4426 4444 ASSERT(mpt->m_req_frame_size >= sizeof (MPI2_SGE_SIMPLE64));
4427 4445 chainlength = mpt->m_req_frame_size /
4428 4446 sizeof (MPI2_SGE_SIMPLE64) *
4429 4447 sizeof (MPI2_SGE_SIMPLE64);
4430 4448 } else {
4431 4449 chainlength = ((cookiec - (j - 1)) *
4432 4450 sizeof (MPI2_SGE_SIMPLE64));
4433 4451 }
4434 4452
4435 4453 p = cmd->cmd_extra_frames;
4436 4454
4437 4455 ddi_put16(acc_hdl, &sgechain->Length, chainlength);
4438 4456 ddi_put32(acc_hdl, &sgechain->Address.Low, p->m_phys_addr);
4439 4457 ddi_put32(acc_hdl, &sgechain->Address.High, p->m_phys_addr >> 32);
4440 4458
4441 4459 /*
4442 4460 * If there are more than 2 frames left we have to
4443 4461 * fill in the next chain offset to the location of
4444 4462 * the chain element in the next frame.
4445 4463 * sgemax is the number of simple elements in an extra
4446 4464 * frame. Note that the value NextChainOffset should be
4447 4465 * in double-words (4 bytes).
4448 4466 */
4449 4467 if (frames >= 2) {
4450 4468 ddi_put8(acc_hdl, &sgechain->NextChainOffset,
4451 4469 (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4452 4470 } else {
4453 4471 ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0);
4454 4472 }
4455 4473
4456 4474 /*
4457 4475 * Jump to next frame;
4458 4476 * Starting here, chain buffers go into the per command SGL.
4459 4477 * This buffer is allocated when chain buffers are needed.
4460 4478 */
4461 4479 sge = (pMpi2SGESimple64_t)p->m_frames_addr;
4462 4480 i = cookiec;
4463 4481
4464 4482 /*
4465 4483 * Start filling in frames with SGE's. If we
4466 4484 * reach the end of frame and still have SGE's
4467 4485 * to fill we need to add a chain element and
4468 4486 * use another frame. j will be our counter
4469 4487 * for what cookie we are at and i will be
4470 4488 * the total cookiec. k is the current frame
4471 4489 */
4472 4490 for (k = 1; k <= frames; k++) {
4473 4491 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4474 4492
4475 4493 /*
4476 4494 * If we have reached the end of frame
4477 4495 * and we have more SGE's to fill in
4478 4496 * we have to fill the final entry
4479 4497 * with a chain element and then
4480 4498 * continue to the next frame
4481 4499 */
4482 4500 if ((l == (sgemax + 1)) && (k != frames)) {
4483 4501 sgechain = (pMpi2SGEChain64_t)sge;
4484 4502 j--;
4485 4503 chainflags = (
4486 4504 MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4487 4505 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4488 4506 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4489 4507 ddi_put8(p->m_acc_hdl,
4490 4508 &sgechain->Flags, chainflags);
4491 4509 /*
4492 4510 * k is the frame counter and (k + 1)
4493 4511 * is the number of the next frame.
4494 4512 * Note that frames are in contiguous
4495 4513 * memory space.
4496 4514 */
4497 4515 nframe_phys_addr = p->m_phys_addr +
4498 4516 (mpt->m_req_frame_size * k);
4499 4517 ddi_put32(p->m_acc_hdl,
4500 4518 &sgechain->Address.Low,
4501 4519 nframe_phys_addr);
4502 4520 ddi_put32(p->m_acc_hdl,
4503 4521 &sgechain->Address.High,
4504 4522 nframe_phys_addr >> 32);
4505 4523
4506 4524 /*
4507 4525 * If there are more than 2 frames left
4508 4526 * we have to next chain offset to
4509 4527 * the location of the chain element
4510 4528 * in the next frame and fill in the
4511 4529 * length of the next chain
4512 4530 */
4513 4531 if ((frames - k) >= 2) {
4514 4532 ddi_put8(p->m_acc_hdl,
4515 4533 &sgechain->NextChainOffset,
4516 4534 (sgemax *
4517 4535 sizeof (MPI2_SGE_SIMPLE64))
4518 4536 >> 2);
4519 4537 ddi_put16(p->m_acc_hdl,
4520 4538 &sgechain->Length,
4521 4539 mpt->m_req_frame_size /
4522 4540 sizeof (MPI2_SGE_SIMPLE64) *
4523 4541 sizeof (MPI2_SGE_SIMPLE64));
4524 4542 } else {
4525 4543 /*
4526 4544 * This is the last frame. Set
4527 4545 * the NextChainOffset to 0 and
4528 4546 * Length is the total size of
4529 4547 * all remaining simple elements
4530 4548 */
4531 4549 ddi_put8(p->m_acc_hdl,
4532 4550 &sgechain->NextChainOffset,
4533 4551 0);
4534 4552 ddi_put16(p->m_acc_hdl,
4535 4553 &sgechain->Length,
4536 4554 (cookiec - j) *
4537 4555 sizeof (MPI2_SGE_SIMPLE64));
4538 4556 }
4539 4557
4540 4558 /* Jump to the next frame */
4541 4559 sge = (pMpi2SGESimple64_t)
4542 4560 ((char *)p->m_frames_addr +
4543 4561 (int)mpt->m_req_frame_size * k);
4544 4562
4545 4563 continue;
4546 4564 }
4547 4565
4548 4566 ddi_put32(p->m_acc_hdl,
4549 4567 &sge->Address.Low,
4550 4568 dmap->addr.address64.Low);
4551 4569 ddi_put32(p->m_acc_hdl,
4552 4570 &sge->Address.High,
4553 4571 dmap->addr.address64.High);
4554 4572 ddi_put32(p->m_acc_hdl,
4555 4573 &sge->FlagsLength, dmap->count);
4556 4574 flags = ddi_get32(p->m_acc_hdl,
4557 4575 &sge->FlagsLength);
4558 4576 flags |= ((uint32_t)(
4559 4577 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4560 4578 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4561 4579 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4562 4580 MPI2_SGE_FLAGS_SHIFT);
4563 4581
4564 4582 /*
4565 4583 * If we are at the end of the frame and
4566 4584 * there is another frame to fill in
4567 4585 * we set the last simple element as last
4568 4586 * element
4569 4587 */
4570 4588 if ((l == sgemax) && (k != frames)) {
4571 4589 flags |= ((uint32_t)
4572 4590 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4573 4591 MPI2_SGE_FLAGS_SHIFT);
4574 4592 }
4575 4593
4576 4594 /*
4577 4595 * If this is the final cookie we
4578 4596 * indicate it by setting the flags
4579 4597 */
4580 4598 if (j == i) {
4581 4599 flags |= ((uint32_t)
4582 4600 (MPI2_SGE_FLAGS_LAST_ELEMENT |
4583 4601 MPI2_SGE_FLAGS_END_OF_BUFFER |
4584 4602 MPI2_SGE_FLAGS_END_OF_LIST) <<
4585 4603 MPI2_SGE_FLAGS_SHIFT);
4586 4604 }
4587 4605 if (cmd->cmd_flags & CFLAG_DMASEND) {
4588 4606 flags |=
4589 4607 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4590 4608 MPI2_SGE_FLAGS_SHIFT);
4591 4609 } else {
4592 4610 flags |=
4593 4611 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4594 4612 MPI2_SGE_FLAGS_SHIFT);
4595 4613 }
4596 4614 ddi_put32(p->m_acc_hdl,
4597 4615 &sge->FlagsLength, flags);
4598 4616 dmap++;
4599 4617 sge++;
4600 4618 }
4601 4619 }
4602 4620
4603 4621 /*
4604 4622 * Sync DMA with the chain buffers that were just created
4605 4623 */
4606 4624 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4607 4625 }
4608 4626
4609 4627 static void
4610 4628 mptsas_ieee_sge_mainframe(mptsas_cmd_t *cmd, pMpi2SCSIIORequest_t frame,
4611 4629 ddi_acc_handle_t acc_hdl, uint_t cookiec, uint8_t end_flag)
4612 4630 {
4613 4631 pMpi2IeeeSgeSimple64_t ieeesge;
4614 4632 mptti_t *dmap;
4615 4633 uint8_t flags;
4616 4634
4617 4635 dmap = cmd->cmd_sg;
4618 4636
4619 4637 NDBG1(("mptsas_ieee_sge_mainframe: cookiec=%d, %s", cookiec,
4620 4638 cmd->cmd_flags & CFLAG_DMASEND?"Out":"In"));
4621 4639
4622 4640 ieeesge = (pMpi2IeeeSgeSimple64_t)(&frame->SGL);
4623 4641 while (cookiec--) {
4624 4642 ddi_put32(acc_hdl,
4625 4643 &ieeesge->Address.Low, dmap->addr.address64.Low);
4626 4644 ddi_put32(acc_hdl,
4627 4645 &ieeesge->Address.High, dmap->addr.address64.High);
4628 4646 ddi_put32(acc_hdl, &ieeesge->Length,
4629 4647 dmap->count);
4630 4648 NDBG1(("mptsas_ieee_sge_mainframe: len=%d", dmap->count));
4631 4649 flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
4632 4650 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4633 4651
4634 4652 /*
4635 4653 * If this is the last cookie, we set the flags
4636 4654 * to indicate so
4637 4655 */
4638 4656 if (cookiec == 0) {
4639 4657 flags |= end_flag;
4640 4658 }
4641 4659
4642 4660 ddi_put8(acc_hdl, &ieeesge->Flags, flags);
4643 4661 dmap++;
4644 4662 ieeesge++;
4645 4663 }
4646 4664 }
4647 4665
4648 4666 static void
4649 4667 mptsas_ieee_sge_chain(mptsas_t *mpt, mptsas_cmd_t *cmd,
4650 4668 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4651 4669 {
4652 4670 pMpi2IeeeSgeSimple64_t ieeesge;
4653 4671 pMpi25IeeeSgeChain64_t ieeesgechain;
4654 4672 uint64_t nframe_phys_addr;
4655 4673 uint_t cookiec;
4656 4674 mptti_t *dmap;
4657 4675 uint8_t flags;
4658 4676
4659 4677 /*
4660 4678 * Save the number of entries in the DMA
4661 4679 * Scatter/Gather list
4662 4680 */
4663 4681 cookiec = cmd->cmd_cookiec;
4664 4682
4665 4683 NDBG1(("mptsas_ieee_sge_chain: cookiec=%d", cookiec));
4666 4684
4667 4685 /*
4668 4686 * Hereby we start to deal with multiple frames.
4669 4687 * The process is as follows:
4670 4688 * 1. Determine how many frames are needed for SGL element
4671 4689 * storage; Note that all frames are stored in contiguous
4672 4690 * memory space and in 64-bit DMA mode each element is
4673 4691 * 4 double-words (16 bytes) long.
4674 4692 * 2. Fill up the main frame. We need to do this separately
4675 4693 * since it contains the SCSI IO request header and needs
4676 4694 * dedicated processing. Note that the last 4 double-words
4677 4695 * of the SCSI IO header is for SGL element storage
4678 4696 * (MPI2_SGE_IO_UNION).
4679 4697 * 3. Fill the chain element in the main frame, so the DMA
4680 4698 * engine can use the following frames.
4681 4699 * 4. Enter a loop to fill the remaining frames. Note that the
4682 4700 * last frame contains no chain element. The remaining
4683 4701 * frames go into the mpt SGL buffer allocated on the fly,
4684 4702 * not immediately following the main message frame, as in
4685 4703 * Gen1.
4686 4704 * Restrictions:
4687 4705 * For 64-bit DMA, the simple element and chain element
4688 4706 * are both of 4 double-words (16 bytes) in size, even
4689 4707 * though all frames are stored in the first 4G of mem
4690 4708 * range and the higher 32-bits of the address are always 0.
4691 4709 */
4692 4710 int i, j, k, l, frames, sgemax;
4693 4711 int temp;
4694 4712 uint8_t chainflags;
4695 4713 uint32_t chainlength;
4696 4714 mptsas_cache_frames_t *p;
4697 4715
4698 4716 /*
4699 4717 * Sgemax is the number of SGE's that will fit
4700 4718 * each extra frame and frames is total
4701 4719 * number of frames we'll need. 1 sge entry per
4702 4720 * frame is reseverd for the chain element thus the -1 below.
4703 4721 */
4704 4722 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_IEEE_SGE_SIMPLE64))
4705 4723 - 1);
4706 4724 temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4707 4725
4708 4726 /*
4709 4727 * A little check to see if we need to round up the number
4710 4728 * of frames we need
4711 4729 */
4712 4730 if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4713 4731 sgemax) > 1) {
4714 4732 frames = (temp + 1);
4715 4733 } else {
4716 4734 frames = temp;
4717 4735 }
4718 4736 NDBG1(("mptsas_ieee_sge_chain: temp=%d, frames=%d", temp, frames));
4719 4737 dmap = cmd->cmd_sg;
4720 4738 ieeesge = (pMpi2IeeeSgeSimple64_t)(&frame->SGL);
4721 4739
4722 4740 /*
4723 4741 * First fill in the main frame
4724 4742 */
4725 4743 j = MPTSAS_MAX_FRAME_SGES64(mpt) - 1;
4726 4744 mptsas_ieee_sge_mainframe(cmd, frame, acc_hdl, j, 0);
4727 4745 dmap += j;
4728 4746 ieeesge += j;
4729 4747 j++;
4730 4748
4731 4749 /*
4732 4750 * Fill in the chain element in the main frame.
4733 4751 * About calculation on ChainOffset:
4734 4752 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4735 4753 * in the end reserved for SGL element storage
4736 4754 * (MPI2_SGE_IO_UNION); we should count it in our
4737 4755 * calculation. See its definition in the header file.
4738 4756 * 2. Constant j is the counter of the current SGL element
4739 4757 * that will be processed, and (j - 1) is the number of
4740 4758 * SGL elements that have been processed (stored in the
4741 4759 * main frame).
4742 4760 * 3. ChainOffset value should be in units of quad-words (16
4743 4761 * bytes) so the last value should be divided by 16.
4744 4762 */
4745 4763 ddi_put8(acc_hdl, &frame->ChainOffset,
4746 4764 (sizeof (MPI2_SCSI_IO_REQUEST) -
4747 4765 sizeof (MPI2_SGE_IO_UNION) +
4748 4766 (j - 1) * sizeof (MPI2_IEEE_SGE_SIMPLE64)) >> 4);
4749 4767 ieeesgechain = (pMpi25IeeeSgeChain64_t)ieeesge;
4750 4768 chainflags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
4751 4769 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4752 4770 ddi_put8(acc_hdl, &ieeesgechain->Flags, chainflags);
4753 4771
4754 4772 /*
4755 4773 * The size of the next frame is the accurate size of space
4756 4774 * (in bytes) used to store the SGL elements. j is the counter
4757 4775 * of SGL elements. (j - 1) is the number of SGL elements that
4758 4776 * have been processed (stored in frames).
4759 4777 */
4760 4778 if (frames >= 2) {
4761 4779 ASSERT(mpt->m_req_frame_size >=
4762 4780 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4763 4781 chainlength = mpt->m_req_frame_size /
4764 4782 sizeof (MPI2_IEEE_SGE_SIMPLE64) *
4765 4783 sizeof (MPI2_IEEE_SGE_SIMPLE64);
4766 4784 } else {
4767 4785 chainlength = ((cookiec - (j - 1)) *
4768 4786 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4769 4787 }
4770 4788
4771 4789 p = cmd->cmd_extra_frames;
4772 4790
4773 4791 ddi_put32(acc_hdl, &ieeesgechain->Length, chainlength);
4774 4792 ddi_put32(acc_hdl, &ieeesgechain->Address.Low, p->m_phys_addr);
4775 4793 ddi_put32(acc_hdl, &ieeesgechain->Address.High, p->m_phys_addr >> 32);
4776 4794
4777 4795 /*
4778 4796 * If there are more than 2 frames left we have to
4779 4797 * fill in the next chain offset to the location of
4780 4798 * the chain element in the next frame.
4781 4799 * sgemax is the number of simple elements in an extra
4782 4800 * frame. Note that the value NextChainOffset should be
4783 4801 * in double-words (4 bytes).
4784 4802 */
4785 4803 if (frames >= 2) {
4786 4804 ddi_put8(acc_hdl, &ieeesgechain->NextChainOffset,
4787 4805 (sgemax * sizeof (MPI2_IEEE_SGE_SIMPLE64)) >> 4);
4788 4806 } else {
4789 4807 ddi_put8(acc_hdl, &ieeesgechain->NextChainOffset, 0);
4790 4808 }
4791 4809
4792 4810 /*
4793 4811 * Jump to next frame;
4794 4812 * Starting here, chain buffers go into the per command SGL.
4795 4813 * This buffer is allocated when chain buffers are needed.
4796 4814 */
4797 4815 ieeesge = (pMpi2IeeeSgeSimple64_t)p->m_frames_addr;
4798 4816 i = cookiec;
4799 4817
4800 4818 /*
4801 4819 * Start filling in frames with SGE's. If we
4802 4820 * reach the end of frame and still have SGE's
4803 4821 * to fill we need to add a chain element and
4804 4822 * use another frame. j will be our counter
4805 4823 * for what cookie we are at and i will be
4806 4824 * the total cookiec. k is the current frame
4807 4825 */
4808 4826 for (k = 1; k <= frames; k++) {
4809 4827 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4810 4828
4811 4829 /*
4812 4830 * If we have reached the end of frame
4813 4831 * and we have more SGE's to fill in
4814 4832 * we have to fill the final entry
4815 4833 * with a chain element and then
4816 4834 * continue to the next frame
4817 4835 */
4818 4836 if ((l == (sgemax + 1)) && (k != frames)) {
4819 4837 ieeesgechain = (pMpi25IeeeSgeChain64_t)ieeesge;
4820 4838 j--;
4821 4839 chainflags =
4822 4840 MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
4823 4841 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
4824 4842 ddi_put8(p->m_acc_hdl,
4825 4843 &ieeesgechain->Flags, chainflags);
4826 4844 /*
4827 4845 * k is the frame counter and (k + 1)
4828 4846 * is the number of the next frame.
4829 4847 * Note that frames are in contiguous
4830 4848 * memory space.
4831 4849 */
4832 4850 nframe_phys_addr = p->m_phys_addr +
4833 4851 (mpt->m_req_frame_size * k);
4834 4852 ddi_put32(p->m_acc_hdl,
4835 4853 &ieeesgechain->Address.Low,
4836 4854 nframe_phys_addr);
4837 4855 ddi_put32(p->m_acc_hdl,
4838 4856 &ieeesgechain->Address.High,
4839 4857 nframe_phys_addr >> 32);
4840 4858
4841 4859 /*
4842 4860 * If there are more than 2 frames left
4843 4861 * we have to next chain offset to
4844 4862 * the location of the chain element
4845 4863 * in the next frame and fill in the
4846 4864 * length of the next chain
4847 4865 */
4848 4866 if ((frames - k) >= 2) {
4849 4867 ddi_put8(p->m_acc_hdl,
4850 4868 &ieeesgechain->NextChainOffset,
4851 4869 (sgemax *
4852 4870 sizeof (MPI2_IEEE_SGE_SIMPLE64))
4853 4871 >> 4);
4854 4872 ASSERT(mpt->m_req_frame_size >=
4855 4873 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4856 4874 ddi_put32(p->m_acc_hdl,
4857 4875 &ieeesgechain->Length,
4858 4876 mpt->m_req_frame_size /
4859 4877 sizeof (MPI2_IEEE_SGE_SIMPLE64) *
4860 4878 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4861 4879 } else {
4862 4880 /*
4863 4881 * This is the last frame. Set
4864 4882 * the NextChainOffset to 0 and
4865 4883 * Length is the total size of
4866 4884 * all remaining simple elements
4867 4885 */
4868 4886 ddi_put8(p->m_acc_hdl,
4869 4887 &ieeesgechain->NextChainOffset,
4870 4888 0);
4871 4889 ddi_put32(p->m_acc_hdl,
4872 4890 &ieeesgechain->Length,
4873 4891 (cookiec - j) *
4874 4892 sizeof (MPI2_IEEE_SGE_SIMPLE64));
4875 4893 }
4876 4894
4877 4895 /* Jump to the next frame */
4878 4896 ieeesge = (pMpi2IeeeSgeSimple64_t)
4879 4897 ((char *)p->m_frames_addr +
4880 4898 (int)mpt->m_req_frame_size * k);
4881 4899
4882 4900 continue;
4883 4901 }
4884 4902
4885 4903 ddi_put32(p->m_acc_hdl,
4886 4904 &ieeesge->Address.Low,
4887 4905 dmap->addr.address64.Low);
4888 4906 ddi_put32(p->m_acc_hdl,
4889 4907 &ieeesge->Address.High,
4890 4908 dmap->addr.address64.High);
4891 4909 ddi_put32(p->m_acc_hdl,
4892 4910 &ieeesge->Length, dmap->count);
4893 4911 flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
4894 4912 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4895 4913
4896 4914 /*
4897 4915 * If we are at the end of the frame and
4898 4916 * there is another frame to fill in
4899 4917 * do we need to do anything?
4900 4918 * if ((l == sgemax) && (k != frames)) {
4901 4919 * }
4902 4920 */
4903 4921
4904 4922 /*
4905 4923 * If this is the final cookie set end of list.
4906 4924 */
4907 4925 if (j == i) {
4908 4926 flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
4909 4927 }
4910 4928
4911 4929 ddi_put8(p->m_acc_hdl, &ieeesge->Flags, flags);
4912 4930 dmap++;
4913 4931 ieeesge++;
4914 4932 }
4915 4933 }
4916 4934
4917 4935 /*
4918 4936 * Sync DMA with the chain buffers that were just created
4919 4937 */
4920 4938 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4921 4939 }
4922 4940
4923 4941 static void
4924 4942 mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control,
4925 4943 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4926 4944 {
4927 4945 ASSERT(cmd->cmd_flags & CFLAG_DMAVALID);
4928 4946
4929 4947 NDBG1(("mptsas_sge_setup: cookiec=%d", cmd->cmd_cookiec));
4930 4948
4931 4949 /*
4932 4950 * Set read/write bit in control.
4933 4951 */
4934 4952 if (cmd->cmd_flags & CFLAG_DMASEND) {
4935 4953 *control |= MPI2_SCSIIO_CONTROL_WRITE;
4936 4954 } else {
4937 4955 *control |= MPI2_SCSIIO_CONTROL_READ;
4938 4956 }
4939 4957
4940 4958 ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount);
4941 4959
4942 4960 /*
4943 4961 * We have 4 cases here. First where we can fit all the
4944 4962 * SG elements into the main frame, and the case
4945 4963 * where we can't. The SG element is also different when using
4946 4964 * MPI2.5 interface.
4947 4965 * If we have more cookies than we can attach to a frame
4948 4966 * we will need to use a chain element to point
4949 4967 * a location of memory where the rest of the S/G
4950 4968 * elements reside.
4951 4969 */
4952 4970 if (cmd->cmd_cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) {
4953 4971 if (mpt->m_MPI25) {
4954 4972 mptsas_ieee_sge_mainframe(cmd, frame, acc_hdl,
4955 4973 cmd->cmd_cookiec,
4956 4974 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
4957 4975 } else {
4958 4976 mptsas_sge_mainframe(cmd, frame, acc_hdl,
4959 4977 cmd->cmd_cookiec,
4960 4978 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
4961 4979 | MPI2_SGE_FLAGS_END_OF_BUFFER
4962 4980 | MPI2_SGE_FLAGS_END_OF_LIST) <<
4963 4981 MPI2_SGE_FLAGS_SHIFT));
4964 4982 }
4965 4983 } else {
4966 4984 if (mpt->m_MPI25) {
4967 4985 mptsas_ieee_sge_chain(mpt, cmd, frame, acc_hdl);
4968 4986 } else {
4969 4987 mptsas_sge_chain(mpt, cmd, frame, acc_hdl);
4970 4988 }
4971 4989 }
4972 4990 }
4973 4991
4974 4992 /*
4975 4993 * Interrupt handling
4976 4994 * Utility routine. Poll for status of a command sent to HBA
4977 4995 * without interrupts (a FLAG_NOINTR command).
4978 4996 */
4979 4997 int
4980 4998 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
4981 4999 {
4982 5000 int rval = TRUE;
4983 5001
4984 5002 NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
4985 5003
4986 5004 if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
4987 5005 mptsas_restart_hba(mpt);
4988 5006 }
4989 5007
4990 5008 /*
4991 5009 * Wait, using drv_usecwait(), long enough for the command to
4992 5010 * reasonably return from the target if the target isn't
4993 5011 * "dead". A polled command may well be sent from scsi_poll, and
4994 5012 * there are retries built in to scsi_poll if the transport
4995 5013 * accepted the packet (TRAN_ACCEPT). scsi_poll waits 1 second
4996 5014 * and retries the transport up to scsi_poll_busycnt times
4997 5015 * (currently 60) if
4998 5016 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
4999 5017 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
5000 5018 *
5001 5019 * limit the waiting to avoid a hang in the event that the
5002 5020 * cmd never gets started but we are still receiving interrupts
5003 5021 */
5004 5022 while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
5005 5023 if (mptsas_wait_intr(mpt, polltime) == FALSE) {
5006 5024 NDBG5(("mptsas_poll: command incomplete"));
5007 5025 rval = FALSE;
5008 5026 break;
5009 5027 }
5010 5028 }
5011 5029
5012 5030 if (rval == FALSE) {
5013 5031
5014 5032 /*
5015 5033 * this isn't supposed to happen, the hba must be wedged
5016 5034 * Mark this cmd as a timeout.
5017 5035 */
5018 5036 mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
5019 5037 (STAT_TIMEOUT|STAT_ABORTED));
5020 5038
5021 5039 if (poll_cmd->cmd_queued == FALSE) {
5022 5040
5023 5041 NDBG5(("mptsas_poll: not on waitq"));
5024 5042
5025 5043 poll_cmd->cmd_pkt->pkt_state |=
5026 5044 (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
5027 5045 } else {
5028 5046
5029 5047 /* find and remove it from the waitq */
5030 5048 NDBG5(("mptsas_poll: delete from waitq"));
5031 5049 mptsas_waitq_delete(mpt, poll_cmd);
5032 5050 }
5033 5051
5034 5052 }
5035 5053 mptsas_fma_check(mpt, poll_cmd);
5036 5054 NDBG5(("mptsas_poll: done"));
5037 5055 return (rval);
5038 5056 }
5039 5057
5040 5058 /*
5041 5059 * Used for polling cmds and TM function
5042 5060 */
5043 5061 static int
5044 5062 mptsas_wait_intr(mptsas_t *mpt, int polltime)
5045 5063 {
5046 5064 int cnt;
5047 5065 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
5048 5066 uint32_t int_mask;
5049 5067
5050 5068 NDBG5(("mptsas_wait_intr"));
5051 5069
5052 5070 mpt->m_polled_intr = 1;
5053 5071
5054 5072 /*
5055 5073 * Get the current interrupt mask and disable interrupts. When
5056 5074 * re-enabling ints, set mask to saved value.
5057 5075 */
5058 5076 int_mask = ddi_get32(mpt->m_datap, &mpt->m_reg->HostInterruptMask);
5059 5077 MPTSAS_DISABLE_INTR(mpt);
5060 5078
5061 5079 /*
5062 5080 * Keep polling for at least (polltime * 1000) seconds
5063 5081 */
5064 5082 for (cnt = 0; cnt < polltime; cnt++) {
5065 5083 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5066 5084 DDI_DMA_SYNC_FORCPU);
5067 5085
5068 5086 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5069 5087 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5070 5088
5071 5089 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5072 5090 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5073 5091 ddi_get32(mpt->m_acc_post_queue_hdl,
5074 5092 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5075 5093 drv_usecwait(1000);
5076 5094 continue;
5077 5095 }
5078 5096
5079 5097 /*
5080 5098 * The reply is valid, process it according to its
5081 5099 * type.
5082 5100 */
5083 5101 mptsas_process_intr(mpt, reply_desc_union);
5084 5102
5085 5103 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5086 5104 mpt->m_post_index = 0;
5087 5105 }
5088 5106
5089 5107 /*
5090 5108 * Update the global reply index
5091 5109 */
5092 5110 ddi_put32(mpt->m_datap,
5093 5111 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5094 5112 mpt->m_polled_intr = 0;
5095 5113
5096 5114 /*
5097 5115 * Re-enable interrupts and quit.
5098 5116 */
5099 5117 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask,
5100 5118 int_mask);
5101 5119 return (TRUE);
5102 5120
5103 5121 }
5104 5122
5105 5123 /*
5106 5124 * Clear polling flag, re-enable interrupts and quit.
5107 5125 */
5108 5126 mpt->m_polled_intr = 0;
5109 5127 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask);
5110 5128 return (FALSE);
5111 5129 }
5112 5130
5113 5131 static void
5114 5132 mptsas_handle_scsi_io_success(mptsas_t *mpt,
5115 5133 pMpi2ReplyDescriptorsUnion_t reply_desc)
5116 5134 {
5117 5135 pMpi2SCSIIOSuccessReplyDescriptor_t scsi_io_success;
5118 5136 uint16_t SMID;
5119 5137 mptsas_slots_t *slots = mpt->m_active;
5120 5138 mptsas_cmd_t *cmd = NULL;
5121 5139 struct scsi_pkt *pkt;
5122 5140
5123 5141 ASSERT(mutex_owned(&mpt->m_mutex));
5124 5142
5125 5143 scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
5126 5144 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &scsi_io_success->SMID);
5127 5145
5128 5146 /*
5129 5147 * This is a success reply so just complete the IO. First, do a sanity
5130 5148 * check on the SMID. The final slot is used for TM requests, which
5131 5149 * would not come into this reply handler.
5132 5150 */
5133 5151 if ((SMID == 0) || (SMID > slots->m_n_normal)) {
5134 5152 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
5135 5153 SMID);
5136 5154 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5137 5155 return;
5138 5156 }
5139 5157
5140 5158 cmd = slots->m_slot[SMID];
5141 5159
5142 5160 /*
5143 5161 * print warning and return if the slot is empty
5144 5162 */
5145 5163 if (cmd == NULL) {
5146 5164 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
5147 5165 "in slot %d", SMID);
5148 5166 return;
5149 5167 }
5150 5168
5151 5169 pkt = CMD2PKT(cmd);
5152 5170 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
5153 5171 STATE_GOT_STATUS);
5154 5172 if (cmd->cmd_flags & CFLAG_DMAVALID) {
5155 5173 pkt->pkt_state |= STATE_XFERRED_DATA;
5156 5174 }
5157 5175 pkt->pkt_resid = 0;
5158 5176
5159 5177 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
5160 5178 cmd->cmd_flags |= CFLAG_FINISHED;
5161 5179 cv_broadcast(&mpt->m_passthru_cv);
5162 5180 return;
5163 5181 } else {
5164 5182 mptsas_remove_cmd(mpt, cmd);
5165 5183 }
5166 5184
5167 5185 if (cmd->cmd_flags & CFLAG_RETRY) {
5168 5186 /*
5169 5187 * The target returned QFULL or busy, do not add tihs
5170 5188 * pkt to the doneq since the hba will retry
5171 5189 * this cmd.
5172 5190 *
5173 5191 * The pkt has already been resubmitted in
5174 5192 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5175 5193 * Remove this cmd_flag here.
5176 5194 */
5177 5195 cmd->cmd_flags &= ~CFLAG_RETRY;
5178 5196 } else {
5179 5197 mptsas_doneq_add(mpt, cmd);
5180 5198 }
5181 5199 }
5182 5200
5183 5201 static void
5184 5202 mptsas_handle_address_reply(mptsas_t *mpt,
5185 5203 pMpi2ReplyDescriptorsUnion_t reply_desc)
5186 5204 {
5187 5205 pMpi2AddressReplyDescriptor_t address_reply;
5188 5206 pMPI2DefaultReply_t reply;
5189 5207 mptsas_fw_diagnostic_buffer_t *pBuffer;
5190 5208 uint32_t reply_addr, reply_frame_dma_baseaddr;
5191 5209 uint16_t SMID, iocstatus;
5192 5210 mptsas_slots_t *slots = mpt->m_active;
5193 5211 mptsas_cmd_t *cmd = NULL;
5194 5212 uint8_t function, buffer_type;
5195 5213 m_replyh_arg_t *args;
5196 5214 int reply_frame_no;
5197 5215
5198 5216 ASSERT(mutex_owned(&mpt->m_mutex));
5199 5217
5200 5218 address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
5201 5219 reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
5202 5220 &address_reply->ReplyFrameAddress);
5203 5221 SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID);
5204 5222
5205 5223 /*
5206 5224 * If reply frame is not in the proper range we should ignore this
5207 5225 * message and exit the interrupt handler.
5208 5226 */
5209 5227 reply_frame_dma_baseaddr = mpt->m_reply_frame_dma_addr & 0xffffffffu;
5210 5228 if ((reply_addr < reply_frame_dma_baseaddr) ||
5211 5229 (reply_addr >= (reply_frame_dma_baseaddr +
5212 5230 (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
5213 5231 ((reply_addr - reply_frame_dma_baseaddr) %
5214 5232 mpt->m_reply_frame_size != 0)) {
5215 5233 mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
5216 5234 "address 0x%x\n", reply_addr);
5217 5235 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5218 5236 return;
5219 5237 }
5220 5238
5221 5239 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
5222 5240 DDI_DMA_SYNC_FORCPU);
5223 5241 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
5224 5242 reply_frame_dma_baseaddr));
5225 5243 function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
5226 5244
5227 5245 NDBG31(("mptsas_handle_address_reply: function 0x%x, reply_addr=0x%x",
5228 5246 function, reply_addr));
5229 5247
5230 5248 /*
5231 5249 * don't get slot information and command for events since these values
5232 5250 * don't exist
5233 5251 */
5234 5252 if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) &&
5235 5253 (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) {
5236 5254 /*
5237 5255 * This could be a TM reply, which use the last allocated SMID,
5238 5256 * so allow for that.
5239 5257 */
5240 5258 if ((SMID == 0) || (SMID > (slots->m_n_normal + 1))) {
5241 5259 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of "
5242 5260 "%d\n", SMID);
5243 5261 ddi_fm_service_impact(mpt->m_dip,
5244 5262 DDI_SERVICE_UNAFFECTED);
5245 5263 return;
5246 5264 }
5247 5265
5248 5266 cmd = slots->m_slot[SMID];
5249 5267
5250 5268 /*
5251 5269 * print warning and return if the slot is empty
5252 5270 */
5253 5271 if (cmd == NULL) {
5254 5272 mptsas_log(mpt, CE_WARN, "?NULL command for address "
5255 5273 "reply in slot %d", SMID);
5256 5274 return;
5257 5275 }
5258 5276 if ((cmd->cmd_flags &
5259 5277 (CFLAG_PASSTHRU | CFLAG_CONFIG | CFLAG_FW_DIAG))) {
5260 5278 cmd->cmd_rfm = reply_addr;
5261 5279 cmd->cmd_flags |= CFLAG_FINISHED;
5262 5280 cv_broadcast(&mpt->m_passthru_cv);
5263 5281 cv_broadcast(&mpt->m_config_cv);
5264 5282 cv_broadcast(&mpt->m_fw_diag_cv);
5265 5283 return;
5266 5284 } else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
5267 5285 mptsas_remove_cmd(mpt, cmd);
5268 5286 }
5269 5287 NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
5270 5288 }
5271 5289 /*
5272 5290 * Depending on the function, we need to handle
5273 5291 * the reply frame (and cmd) differently.
5274 5292 */
5275 5293 switch (function) {
5276 5294 case MPI2_FUNCTION_SCSI_IO_REQUEST:
5277 5295 mptsas_check_scsi_io_error(mpt, (pMpi2SCSIIOReply_t)reply, cmd);
5278 5296 break;
5279 5297 case MPI2_FUNCTION_SCSI_TASK_MGMT:
5280 5298 cmd->cmd_rfm = reply_addr;
5281 5299 mptsas_check_task_mgt(mpt, (pMpi2SCSIManagementReply_t)reply,
5282 5300 cmd);
5283 5301 break;
5284 5302 case MPI2_FUNCTION_FW_DOWNLOAD:
5285 5303 cmd->cmd_flags |= CFLAG_FINISHED;
5286 5304 cv_signal(&mpt->m_fw_cv);
5287 5305 break;
5288 5306 case MPI2_FUNCTION_EVENT_NOTIFICATION:
5289 5307 reply_frame_no = (reply_addr - reply_frame_dma_baseaddr) /
5290 5308 mpt->m_reply_frame_size;
5291 5309 args = &mpt->m_replyh_args[reply_frame_no];
5292 5310 args->mpt = (void *)mpt;
5293 5311 args->rfm = reply_addr;
5294 5312
5295 5313 /*
5296 5314 * Record the event if its type is enabled in
5297 5315 * this mpt instance by ioctl.
5298 5316 */
5299 5317 mptsas_record_event(args);
5300 5318
5301 5319 /*
5302 5320 * Handle time critical events
5303 5321 * NOT_RESPONDING/ADDED only now
5304 5322 */
5305 5323 if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
5306 5324 /*
5307 5325 * Would not return main process,
5308 5326 * just let taskq resolve ack action
5309 5327 * and ack would be sent in taskq thread
5310 5328 */
5311 5329 NDBG20(("send mptsas_handle_event_sync success"));
5312 5330 }
5313 5331
5314 5332 if (mpt->m_in_reset) {
5315 5333 NDBG20(("dropping event received during reset"));
5316 5334 return;
5317 5335 }
5318 5336
5319 5337 if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
5320 5338 (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
5321 5339 mptsas_log(mpt, CE_WARN, "No memory available"
5322 5340 "for dispatch taskq");
5323 5341 /*
5324 5342 * Return the reply frame to the free queue.
5325 5343 */
5326 5344 ddi_put32(mpt->m_acc_free_queue_hdl,
5327 5345 &((uint32_t *)(void *)
5328 5346 mpt->m_free_queue)[mpt->m_free_index], reply_addr);
5329 5347 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5330 5348 DDI_DMA_SYNC_FORDEV);
5331 5349 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5332 5350 mpt->m_free_index = 0;
5333 5351 }
5334 5352
5335 5353 ddi_put32(mpt->m_datap,
5336 5354 &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
5337 5355 }
5338 5356 return;
5339 5357 case MPI2_FUNCTION_DIAG_BUFFER_POST:
5340 5358 /*
5341 5359 * If SMID is 0, this implies that the reply is due to a
5342 5360 * release function with a status that the buffer has been
5343 5361 * released. Set the buffer flags accordingly.
5344 5362 */
5345 5363 if (SMID == 0) {
5346 5364 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
5347 5365 &reply->IOCStatus);
5348 5366 buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl,
5349 5367 &(((pMpi2DiagBufferPostReply_t)reply)->BufferType));
5350 5368 if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) {
5351 5369 pBuffer =
5352 5370 &mpt->m_fw_diag_buffer_list[buffer_type];
5353 5371 pBuffer->valid_data = TRUE;
5354 5372 pBuffer->owned_by_firmware = FALSE;
5355 5373 pBuffer->immediate = FALSE;
5356 5374 }
5357 5375 } else {
5358 5376 /*
5359 5377 * Normal handling of diag post reply with SMID.
5360 5378 */
5361 5379 cmd = slots->m_slot[SMID];
5362 5380
5363 5381 /*
5364 5382 * print warning and return if the slot is empty
5365 5383 */
5366 5384 if (cmd == NULL) {
5367 5385 mptsas_log(mpt, CE_WARN, "?NULL command for "
5368 5386 "address reply in slot %d", SMID);
5369 5387 return;
5370 5388 }
5371 5389 cmd->cmd_rfm = reply_addr;
5372 5390 cmd->cmd_flags |= CFLAG_FINISHED;
5373 5391 cv_broadcast(&mpt->m_fw_diag_cv);
5374 5392 }
5375 5393 return;
5376 5394 default:
5377 5395 mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
5378 5396 break;
5379 5397 }
5380 5398
5381 5399 /*
5382 5400 * Return the reply frame to the free queue.
5383 5401 */
5384 5402 ddi_put32(mpt->m_acc_free_queue_hdl,
5385 5403 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
5386 5404 reply_addr);
5387 5405 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5388 5406 DDI_DMA_SYNC_FORDEV);
5389 5407 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5390 5408 mpt->m_free_index = 0;
5391 5409 }
5392 5410 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
5393 5411 mpt->m_free_index);
5394 5412
5395 5413 if (cmd->cmd_flags & CFLAG_FW_CMD)
5396 5414 return;
5397 5415
5398 5416 if (cmd->cmd_flags & CFLAG_RETRY) {
5399 5417 /*
5400 5418 * The target returned QFULL or busy, do not add this
5401 5419 * pkt to the doneq since the hba will retry
5402 5420 * this cmd.
5403 5421 *
5404 5422 * The pkt has already been resubmitted in
5405 5423 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5406 5424 * Remove this cmd_flag here.
5407 5425 */
5408 5426 cmd->cmd_flags &= ~CFLAG_RETRY;
5409 5427 } else {
5410 5428 mptsas_doneq_add(mpt, cmd);
5411 5429 }
5412 5430 }
5413 5431
5414 5432 #ifdef MPTSAS_DEBUG
5415 5433 static uint8_t mptsas_last_sense[256];
5416 5434 #endif
5417 5435
5418 5436 static void
5419 5437 mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
5420 5438 mptsas_cmd_t *cmd)
5421 5439 {
5422 5440 uint8_t scsi_status, scsi_state;
5423 5441 uint16_t ioc_status, cmd_rqs_len;
5424 5442 uint32_t xferred, sensecount, responsedata, loginfo = 0;
5425 5443 struct scsi_pkt *pkt;
5426 5444 struct scsi_arq_status *arqstat;
5427 5445 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5428 5446 uint8_t *sensedata = NULL;
5429 5447 uint64_t sas_wwn;
5430 5448 uint8_t phy;
5431 5449 char wwn_str[MPTSAS_WWN_STRLEN];
5432 5450
5433 5451 scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
5434 5452 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5435 5453 scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
5436 5454 xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
5437 5455 sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
5438 5456 responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl,
5439 5457 &reply->ResponseInfo);
5440 5458
5441 5459 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
5442 5460 sas_wwn = ptgt->m_addr.mta_wwn;
5443 5461 phy = ptgt->m_phynum;
5444 5462 if (sas_wwn == 0) {
5445 5463 (void) sprintf(wwn_str, "p%x", phy);
5446 5464 } else {
5447 5465 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
5448 5466 }
5449 5467 loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
5450 5468 &reply->IOCLogInfo);
5451 5469 mptsas_log(mpt, CE_NOTE,
5452 5470 "?Log info 0x%x received for target %d %s.\n"
5453 5471 "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5454 5472 loginfo, Tgt(cmd), wwn_str, scsi_status, ioc_status,
5455 5473 scsi_state);
5456 5474 }
5457 5475
5458 5476 NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5459 5477 scsi_status, ioc_status, scsi_state));
5460 5478
5461 5479 pkt = CMD2PKT(cmd);
5462 5480 *(pkt->pkt_scbp) = scsi_status;
5463 5481
5464 5482 if (loginfo == 0x31170000) {
5465 5483 /*
5466 5484 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
5467 5485 * 0x31170000 comes, that means the device missing delay
5468 5486 * is in progressing, the command need retry later.
5469 5487 */
5470 5488 *(pkt->pkt_scbp) = STATUS_BUSY;
5471 5489 return;
5472 5490 }
5473 5491
5474 5492 if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
5475 5493 ((ioc_status & MPI2_IOCSTATUS_MASK) ==
5476 5494 MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
5477 5495 pkt->pkt_reason = CMD_INCOMPLETE;
5478 5496 pkt->pkt_state |= STATE_GOT_BUS;
5479 5497 if (ptgt->m_reset_delay == 0) {
5480 5498 mptsas_set_throttle(mpt, ptgt,
5481 5499 DRAIN_THROTTLE);
5482 5500 }
5483 5501 return;
5484 5502 }
5485 5503
5486 5504 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5487 5505 responsedata &= 0x000000FF;
5488 5506 if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
5489 5507 mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n");
5490 5508 pkt->pkt_reason = CMD_TLR_OFF;
5491 5509 return;
5492 5510 }
5493 5511 }
5494 5512
5495 5513
5496 5514 switch (scsi_status) {
5497 5515 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5498 5516 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5499 5517 arqstat = (void*)(pkt->pkt_scbp);
5500 5518 arqstat->sts_rqpkt_status = *((struct scsi_status *)
5501 5519 (pkt->pkt_scbp));
5502 5520 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
5503 5521 STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
5504 5522 if (cmd->cmd_flags & CFLAG_XARQ) {
5505 5523 pkt->pkt_state |= STATE_XARQ_DONE;
5506 5524 }
5507 5525 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5508 5526 pkt->pkt_state |= STATE_XFERRED_DATA;
5509 5527 }
5510 5528 arqstat->sts_rqpkt_reason = pkt->pkt_reason;
5511 5529 arqstat->sts_rqpkt_state = pkt->pkt_state;
5512 5530 arqstat->sts_rqpkt_state |= STATE_XFERRED_DATA;
5513 5531 arqstat->sts_rqpkt_statistics = pkt->pkt_statistics;
5514 5532 sensedata = (uint8_t *)&arqstat->sts_sensedata;
5515 5533 cmd_rqs_len = cmd->cmd_extrqslen ?
5516 5534 cmd->cmd_extrqslen : cmd->cmd_rqslen;
5517 5535 (void) ddi_dma_sync(mpt->m_dma_req_sense_hdl, 0, 0,
5518 5536 DDI_DMA_SYNC_FORKERNEL);
5519 5537 #ifdef MPTSAS_DEBUG
5520 5538 bcopy(cmd->cmd_arq_buf, mptsas_last_sense,
5521 5539 ((cmd_rqs_len >= sizeof (mptsas_last_sense)) ?
5522 5540 sizeof (mptsas_last_sense):cmd_rqs_len));
5523 5541 #endif
5524 5542 bcopy((uchar_t *)cmd->cmd_arq_buf, sensedata,
5525 5543 ((cmd_rqs_len >= sensecount) ? sensecount :
5526 5544 cmd_rqs_len));
5527 5545 arqstat->sts_rqpkt_resid = (cmd_rqs_len - sensecount);
5528 5546 cmd->cmd_flags |= CFLAG_CMDARQ;
5529 5547 /*
5530 5548 * Set proper status for pkt if autosense was valid
5531 5549 */
5532 5550 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5533 5551 struct scsi_status zero_status = { 0 };
5534 5552 arqstat->sts_rqpkt_status = zero_status;
5535 5553 }
5536 5554
5537 5555 /*
5538 5556 * ASC=0x47 is parity error
5539 5557 * ASC=0x48 is initiator detected error received
5540 5558 */
5541 5559 if ((scsi_sense_key(sensedata) == KEY_ABORTED_COMMAND) &&
5542 5560 ((scsi_sense_asc(sensedata) == 0x47) ||
5543 5561 (scsi_sense_asc(sensedata) == 0x48))) {
5544 5562 mptsas_log(mpt, CE_NOTE, "Aborted_command!");
5545 5563 }
5546 5564
5547 5565 /*
5548 5566 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5549 5567 * ASC/ASCQ=0x25/0x00 means invalid lun
5550 5568 */
5551 5569 if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
5552 5570 (scsi_sense_asc(sensedata) == 0x3F) &&
5553 5571 (scsi_sense_ascq(sensedata) == 0x0E)) ||
5554 5572 ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
5555 5573 (scsi_sense_asc(sensedata) == 0x25) &&
5556 5574 (scsi_sense_ascq(sensedata) == 0x00))) {
5557 5575 mptsas_topo_change_list_t *topo_node = NULL;
5558 5576
5559 5577 topo_node = kmem_zalloc(
5560 5578 sizeof (mptsas_topo_change_list_t),
5561 5579 KM_NOSLEEP);
5562 5580 if (topo_node == NULL) {
5563 5581 mptsas_log(mpt, CE_NOTE, "No memory"
5564 5582 "resource for handle SAS dynamic"
5565 5583 "reconfigure.\n");
5566 5584 break;
5567 5585 }
5568 5586 topo_node->mpt = mpt;
5569 5587 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
5570 5588 topo_node->un.phymask = ptgt->m_addr.mta_phymask;
5571 5589 topo_node->devhdl = ptgt->m_devhdl;
5572 5590 topo_node->object = (void *)ptgt;
5573 5591 topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5574 5592
5575 5593 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5576 5594 mptsas_handle_dr,
5577 5595 (void *)topo_node,
5578 5596 DDI_NOSLEEP)) != DDI_SUCCESS) {
5579 5597 kmem_free(topo_node,
5580 5598 sizeof (mptsas_topo_change_list_t));
5581 5599 mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5582 5600 "for handle SAS dynamic reconfigure"
5583 5601 "failed. \n");
5584 5602 }
5585 5603 }
5586 5604 break;
5587 5605 case MPI2_SCSI_STATUS_GOOD:
5588 5606 switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5589 5607 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5590 5608 pkt->pkt_reason = CMD_DEV_GONE;
5591 5609 pkt->pkt_state |= STATE_GOT_BUS;
5592 5610 if (ptgt->m_reset_delay == 0) {
5593 5611 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5594 5612 }
5595 5613 NDBG31(("lost disk for target%d, command:%x",
5596 5614 Tgt(cmd), pkt->pkt_cdbp[0]));
5597 5615 break;
5598 5616 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5599 5617 NDBG31(("data overrun: xferred=%d", xferred));
5600 5618 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5601 5619 pkt->pkt_reason = CMD_DATA_OVR;
5602 5620 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5603 5621 | STATE_SENT_CMD | STATE_GOT_STATUS
5604 5622 | STATE_XFERRED_DATA);
5605 5623 pkt->pkt_resid = 0;
5606 5624 break;
5607 5625 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5608 5626 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5609 5627 NDBG31(("data underrun: xferred=%d", xferred));
5610 5628 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5611 5629 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5612 5630 | STATE_SENT_CMD | STATE_GOT_STATUS);
5613 5631 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5614 5632 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5615 5633 pkt->pkt_state |= STATE_XFERRED_DATA;
5616 5634 }
5617 5635 break;
5618 5636 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5619 5637 if (cmd->cmd_active_expiration <= gethrtime()) {
5620 5638 /*
5621 5639 * When timeout requested, propagate
5622 5640 * proper reason and statistics to
5623 5641 * target drivers.
5624 5642 */
5625 5643 mptsas_set_pkt_reason(mpt, cmd, CMD_TIMEOUT,
5626 5644 STAT_BUS_RESET | STAT_TIMEOUT);
5627 5645 } else {
5628 5646 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
5629 5647 STAT_BUS_RESET);
5630 5648 }
5631 5649 break;
5632 5650 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5633 5651 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5634 5652 mptsas_set_pkt_reason(mpt,
5635 5653 cmd, CMD_RESET, STAT_DEV_RESET);
5636 5654 break;
5637 5655 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5638 5656 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5639 5657 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5640 5658 mptsas_set_pkt_reason(mpt,
5641 5659 cmd, CMD_TERMINATED, STAT_TERMINATED);
5642 5660 break;
5643 5661 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5644 5662 case MPI2_IOCSTATUS_BUSY:
5645 5663 /*
5646 5664 * set throttles to drain
5647 5665 */
5648 5666 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
5649 5667 ptgt = refhash_next(mpt->m_targets, ptgt)) {
5650 5668 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5651 5669 }
5652 5670
5653 5671 /*
5654 5672 * retry command
5655 5673 */
5656 5674 cmd->cmd_flags |= CFLAG_RETRY;
5657 5675 cmd->cmd_pkt_flags |= FLAG_HEAD;
5658 5676
5659 5677 (void) mptsas_accept_pkt(mpt, cmd);
5660 5678 break;
5661 5679 default:
5662 5680 mptsas_log(mpt, CE_WARN,
5663 5681 "unknown ioc_status = %x\n", ioc_status);
5664 5682 mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5665 5683 "count = %x, scsi_status = %x", scsi_state,
5666 5684 xferred, scsi_status);
5667 5685 break;
5668 5686 }
5669 5687 break;
5670 5688 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5671 5689 mptsas_handle_qfull(mpt, cmd);
5672 5690 break;
5673 5691 case MPI2_SCSI_STATUS_BUSY:
5674 5692 NDBG31(("scsi_status busy received"));
5675 5693 break;
5676 5694 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5677 5695 NDBG31(("scsi_status reservation conflict received"));
5678 5696 break;
5679 5697 default:
5680 5698 mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x\n",
5681 5699 scsi_status, ioc_status);
5682 5700 mptsas_log(mpt, CE_WARN,
5683 5701 "mptsas_process_intr: invalid scsi status\n");
5684 5702 break;
5685 5703 }
5686 5704 }
5687 5705
5688 5706 static void
5689 5707 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5690 5708 mptsas_cmd_t *cmd)
5691 5709 {
5692 5710 uint8_t task_type;
5693 5711 uint16_t ioc_status;
5694 5712 uint32_t log_info;
5695 5713 uint16_t dev_handle;
5696 5714 struct scsi_pkt *pkt = CMD2PKT(cmd);
5697 5715
5698 5716 task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5699 5717 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5700 5718 log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5701 5719 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5702 5720
5703 5721 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5704 5722 mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5705 5723 "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
5706 5724 task_type, ioc_status, log_info, dev_handle);
5707 5725 pkt->pkt_reason = CMD_INCOMPLETE;
5708 5726 return;
5709 5727 }
5710 5728
5711 5729 switch (task_type) {
5712 5730 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5713 5731 case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5714 5732 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5715 5733 case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5716 5734 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5717 5735 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5718 5736 break;
5719 5737 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5720 5738 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5721 5739 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5722 5740 /*
5723 5741 * Check for invalid DevHandle of 0 in case application
5724 5742 * sends bad command. DevHandle of 0 could cause problems.
5725 5743 */
5726 5744 if (dev_handle == 0) {
5727 5745 mptsas_log(mpt, CE_WARN, "!Can't flush target with"
5728 5746 " DevHandle of 0.");
5729 5747 } else {
5730 5748 mptsas_flush_target(mpt, dev_handle, Lun(cmd),
5731 5749 task_type);
5732 5750 }
5733 5751 break;
5734 5752 default:
5735 5753 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5736 5754 task_type);
5737 5755 mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5738 5756 break;
5739 5757 }
5740 5758 }
5741 5759
5742 5760 static void
5743 5761 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5744 5762 {
5745 5763 mptsas_t *mpt = arg->mpt;
5746 5764 uint64_t t = arg->t;
5747 5765 mptsas_cmd_t *cmd;
5748 5766 struct scsi_pkt *pkt;
5749 5767 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
5750 5768
5751 5769 mutex_enter(&item->mutex);
5752 5770 while (item->flag & MPTSAS_DONEQ_THREAD_ACTIVE) {
5753 5771 if (!item->doneq) {
5754 5772 cv_wait(&item->cv, &item->mutex);
5755 5773 }
5756 5774 pkt = NULL;
5757 5775 if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5758 5776 cmd->cmd_flags |= CFLAG_COMPLETED;
5759 5777 pkt = CMD2PKT(cmd);
5760 5778 }
5761 5779 mutex_exit(&item->mutex);
5762 5780 if (pkt) {
5763 5781 mptsas_pkt_comp(pkt, cmd);
5764 5782 }
5765 5783 mutex_enter(&item->mutex);
5766 5784 }
5767 5785 mutex_exit(&item->mutex);
5768 5786 mutex_enter(&mpt->m_doneq_mutex);
5769 5787 mpt->m_doneq_thread_n--;
5770 5788 cv_broadcast(&mpt->m_doneq_thread_cv);
5771 5789 mutex_exit(&mpt->m_doneq_mutex);
5772 5790 }
5773 5791
5774 5792
5775 5793 /*
5776 5794 * mpt interrupt handler.
5777 5795 */
5778 5796 static uint_t
5779 5797 mptsas_intr(caddr_t arg1, caddr_t arg2)
5780 5798 {
5781 5799 mptsas_t *mpt = (void *)arg1;
5782 5800 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
5783 5801 uchar_t did_reply = FALSE;
5784 5802
5785 5803 NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5786 5804
5787 5805 mutex_enter(&mpt->m_mutex);
5788 5806
5789 5807 /*
5790 5808 * If interrupts are shared by two channels then check whether this
5791 5809 * interrupt is genuinely for this channel by making sure first the
5792 5810 * chip is in high power state.
5793 5811 */
5794 5812 if ((mpt->m_options & MPTSAS_OPT_PM) &&
5795 5813 (mpt->m_power_level != PM_LEVEL_D0)) {
5796 5814 mutex_exit(&mpt->m_mutex);
5797 5815 return (DDI_INTR_UNCLAIMED);
5798 5816 }
5799 5817
5800 5818 /*
5801 5819 * If polling, interrupt was triggered by some shared interrupt because
5802 5820 * IOC interrupts are disabled during polling, so polling routine will
5803 5821 * handle any replies. Considering this, if polling is happening,
5804 5822 * return with interrupt unclaimed.
5805 5823 */
5806 5824 if (mpt->m_polled_intr) {
5807 5825 mutex_exit(&mpt->m_mutex);
5808 5826 mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt");
5809 5827 return (DDI_INTR_UNCLAIMED);
5810 5828 }
5811 5829
5812 5830 /*
5813 5831 * Read the istat register.
5814 5832 */
5815 5833 if ((INTPENDING(mpt)) != 0) {
5816 5834 /*
5817 5835 * read fifo until empty.
5818 5836 */
5819 5837 #ifndef __lock_lint
5820 5838 _NOTE(CONSTCOND)
5821 5839 #endif
5822 5840 while (TRUE) {
5823 5841 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5824 5842 DDI_DMA_SYNC_FORCPU);
5825 5843 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5826 5844 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5827 5845
5828 5846 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5829 5847 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5830 5848 ddi_get32(mpt->m_acc_post_queue_hdl,
5831 5849 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5832 5850 break;
5833 5851 }
5834 5852
5835 5853 /*
5836 5854 * The reply is valid, process it according to its
5837 5855 * type. Also, set a flag for updating the reply index
5838 5856 * after they've all been processed.
5839 5857 */
5840 5858 did_reply = TRUE;
5841 5859
5842 5860 mptsas_process_intr(mpt, reply_desc_union);
5843 5861
5844 5862 /*
5845 5863 * Increment post index and roll over if needed.
5846 5864 */
5847 5865 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5848 5866 mpt->m_post_index = 0;
5849 5867 }
5850 5868 }
5851 5869
5852 5870 /*
5853 5871 * Update the global reply index if at least one reply was
5854 5872 * processed.
5855 5873 */
5856 5874 if (did_reply) {
5857 5875 ddi_put32(mpt->m_datap,
5858 5876 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5859 5877 }
5860 5878 } else {
5861 5879 mutex_exit(&mpt->m_mutex);
5862 5880 return (DDI_INTR_UNCLAIMED);
5863 5881 }
5864 5882 NDBG1(("mptsas_intr complete"));
5865 5883
5866 5884 /*
5867 5885 * If no helper threads are created, process the doneq in ISR. If
5868 5886 * helpers are created, use the doneq length as a metric to measure the
5869 5887 * load on the interrupt CPU. If it is long enough, which indicates the
5870 5888 * load is heavy, then we deliver the IO completions to the helpers.
5871 5889 * This measurement has some limitations, although it is simple and
5872 5890 * straightforward and works well for most of the cases at present.
5873 5891 */
5874 5892 if (!mpt->m_doneq_thread_n ||
5875 5893 (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)) {
5876 5894 mptsas_doneq_empty(mpt);
5877 5895 } else {
5878 5896 mptsas_deliver_doneq_thread(mpt);
5879 5897 }
5880 5898
5881 5899 /*
5882 5900 * If there are queued cmd, start them now.
5883 5901 */
5884 5902 if (mpt->m_waitq != NULL) {
5885 5903 mptsas_restart_waitq(mpt);
5886 5904 }
5887 5905
5888 5906 mutex_exit(&mpt->m_mutex);
5889 5907 return (DDI_INTR_CLAIMED);
5890 5908 }
5891 5909
5892 5910 static void
5893 5911 mptsas_process_intr(mptsas_t *mpt,
5894 5912 pMpi2ReplyDescriptorsUnion_t reply_desc_union)
5895 5913 {
5896 5914 uint8_t reply_type;
5897 5915
5898 5916 ASSERT(mutex_owned(&mpt->m_mutex));
5899 5917
5900 5918 /*
5901 5919 * The reply is valid, process it according to its
5902 5920 * type. Also, set a flag for updated the reply index
5903 5921 * after they've all been processed.
5904 5922 */
5905 5923 reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
5906 5924 &reply_desc_union->Default.ReplyFlags);
5907 5925 reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
5908 5926 if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
5909 5927 reply_type == MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS) {
5910 5928 mptsas_handle_scsi_io_success(mpt, reply_desc_union);
5911 5929 } else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
5912 5930 mptsas_handle_address_reply(mpt, reply_desc_union);
5913 5931 } else {
5914 5932 mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
5915 5933 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5916 5934 }
5917 5935
5918 5936 /*
5919 5937 * Clear the reply descriptor for re-use and increment
5920 5938 * index.
5921 5939 */
5922 5940 ddi_put64(mpt->m_acc_post_queue_hdl,
5923 5941 &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
5924 5942 0xFFFFFFFFFFFFFFFF);
5925 5943 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5926 5944 DDI_DMA_SYNC_FORDEV);
5927 5945 }
5928 5946
5929 5947 /*
5930 5948 * handle qfull condition
5931 5949 */
5932 5950 static void
5933 5951 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
5934 5952 {
5935 5953 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5936 5954
5937 5955 if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
5938 5956 (ptgt->m_qfull_retries == 0)) {
5939 5957 /*
5940 5958 * We have exhausted the retries on QFULL, or,
5941 5959 * the target driver has indicated that it
5942 5960 * wants to handle QFULL itself by setting
5943 5961 * qfull-retries capability to 0. In either case
5944 5962 * we want the target driver's QFULL handling
5945 5963 * to kick in. We do this by having pkt_reason
5946 5964 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
5947 5965 */
5948 5966 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5949 5967 } else {
5950 5968 if (ptgt->m_reset_delay == 0) {
5951 5969 ptgt->m_t_throttle =
5952 5970 max((ptgt->m_t_ncmds - 2), 0);
5953 5971 }
5954 5972
5955 5973 cmd->cmd_pkt_flags |= FLAG_HEAD;
5956 5974 cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
5957 5975 cmd->cmd_flags |= CFLAG_RETRY;
5958 5976
5959 5977 (void) mptsas_accept_pkt(mpt, cmd);
5960 5978
5961 5979 /*
5962 5980 * when target gives queue full status with no commands
5963 5981 * outstanding (m_t_ncmds == 0), throttle is set to 0
5964 5982 * (HOLD_THROTTLE), and the queue full handling start
5965 5983 * (see psarc/1994/313); if there are commands outstanding,
5966 5984 * throttle is set to (m_t_ncmds - 2)
5967 5985 */
5968 5986 if (ptgt->m_t_throttle == HOLD_THROTTLE) {
5969 5987 /*
5970 5988 * By setting throttle to QFULL_THROTTLE, we
5971 5989 * avoid submitting new commands and in
5972 5990 * mptsas_restart_cmd find out slots which need
5973 5991 * their throttles to be cleared.
5974 5992 */
5975 5993 mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
5976 5994 if (mpt->m_restart_cmd_timeid == 0) {
5977 5995 mpt->m_restart_cmd_timeid =
5978 5996 timeout(mptsas_restart_cmd, mpt,
5979 5997 ptgt->m_qfull_retry_interval);
5980 5998 }
5981 5999 }
5982 6000 }
5983 6001 }
5984 6002
5985 6003 mptsas_phymask_t
5986 6004 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
5987 6005 {
5988 6006 mptsas_phymask_t phy_mask = 0;
5989 6007 uint8_t i = 0;
5990 6008
5991 6009 NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
5992 6010
5993 6011 ASSERT(mutex_owned(&mpt->m_mutex));
5994 6012
5995 6013 /*
5996 6014 * If physport is 0xFF, this is a RAID volume. Use phymask of 0.
5997 6015 */
5998 6016 if (physport == 0xFF) {
5999 6017 return (0);
6000 6018 }
6001 6019
6002 6020 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
6003 6021 if (mpt->m_phy_info[i].attached_devhdl &&
6004 6022 (mpt->m_phy_info[i].phy_mask != 0) &&
6005 6023 (mpt->m_phy_info[i].port_num == physport)) {
6006 6024 phy_mask = mpt->m_phy_info[i].phy_mask;
6007 6025 break;
6008 6026 }
6009 6027 }
6010 6028 NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
6011 6029 mpt->m_instance, physport, phy_mask));
6012 6030 return (phy_mask);
6013 6031 }
6014 6032
6015 6033 /*
6016 6034 * mpt free device handle after device gone, by use of passthrough
6017 6035 */
6018 6036 static int
6019 6037 mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl)
6020 6038 {
6021 6039 Mpi2SasIoUnitControlRequest_t req;
6022 6040 Mpi2SasIoUnitControlReply_t rep;
6023 6041 int ret;
6024 6042
6025 6043 ASSERT(mutex_owned(&mpt->m_mutex));
6026 6044
6027 6045 /*
6028 6046 * Need to compose a SAS IO Unit Control request message
6029 6047 * and call mptsas_do_passthru() function
6030 6048 */
6031 6049 bzero(&req, sizeof (req));
6032 6050 bzero(&rep, sizeof (rep));
6033 6051
6034 6052 req.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
6035 6053 req.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
6036 6054 req.DevHandle = LE_16(devhdl);
6037 6055
6038 6056 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
6039 6057 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
6040 6058 if (ret != 0) {
6041 6059 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
6042 6060 "Control error %d", ret);
6043 6061 return (DDI_FAILURE);
6044 6062 }
6045 6063
6046 6064 /* do passthrough success, check the ioc status */
6047 6065 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
6048 6066 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
6049 6067 "Control IOCStatus %d", LE_16(rep.IOCStatus));
6050 6068 return (DDI_FAILURE);
6051 6069 }
6052 6070
6053 6071 return (DDI_SUCCESS);
6054 6072 }
6055 6073
6056 6074 static void
6057 6075 mptsas_update_phymask(mptsas_t *mpt)
6058 6076 {
6059 6077 mptsas_phymask_t mask = 0, phy_mask;
6060 6078 char *phy_mask_name;
6061 6079 uint8_t current_port;
6062 6080 int i, j;
6063 6081
6064 6082 NDBG20(("mptsas%d update phymask ", mpt->m_instance));
6065 6083
6066 6084 ASSERT(mutex_owned(&mpt->m_mutex));
6067 6085
6068 6086 (void) mptsas_get_sas_io_unit_page(mpt);
6069 6087
6070 6088 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6071 6089
6072 6090 for (i = 0; i < mpt->m_num_phys; i++) {
6073 6091 phy_mask = 0x00;
6074 6092
6075 6093 if (mpt->m_phy_info[i].attached_devhdl == 0)
6076 6094 continue;
6077 6095
6078 6096 bzero(phy_mask_name, sizeof (phy_mask_name));
6079 6097
6080 6098 current_port = mpt->m_phy_info[i].port_num;
6081 6099
6082 6100 if ((mask & (1 << i)) != 0)
6083 6101 continue;
6084 6102
6085 6103 for (j = 0; j < mpt->m_num_phys; j++) {
6086 6104 if (mpt->m_phy_info[j].attached_devhdl &&
6087 6105 (mpt->m_phy_info[j].port_num == current_port)) {
6088 6106 phy_mask |= (1 << j);
6089 6107 }
6090 6108 }
6091 6109 mask = mask | phy_mask;
6092 6110
6093 6111 for (j = 0; j < mpt->m_num_phys; j++) {
6094 6112 if ((phy_mask >> j) & 0x01) {
6095 6113 mpt->m_phy_info[j].phy_mask = phy_mask;
6096 6114 }
6097 6115 }
6098 6116
6099 6117 (void) sprintf(phy_mask_name, "%x", phy_mask);
6100 6118
6101 6119 mutex_exit(&mpt->m_mutex);
6102 6120 /*
6103 6121 * register a iport, if the port has already been existed
6104 6122 * SCSA will do nothing and just return.
6105 6123 */
6106 6124 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
6107 6125 mutex_enter(&mpt->m_mutex);
6108 6126 }
6109 6127 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6110 6128 NDBG20(("mptsas%d update phymask return", mpt->m_instance));
6111 6129 }
6112 6130
6113 6131 /*
6114 6132 * mptsas_handle_dr is a task handler for DR, the DR action includes:
6115 6133 * 1. Directly attched Device Added/Removed.
6116 6134 * 2. Expander Device Added/Removed.
6117 6135 * 3. Indirectly Attached Device Added/Expander.
6118 6136 * 4. LUNs of a existing device status change.
6119 6137 * 5. RAID volume created/deleted.
6120 6138 * 6. Member of RAID volume is released because of RAID deletion.
6121 6139 * 7. Physical disks are removed because of RAID creation.
6122 6140 */
6123 6141 static void
6124 6142 mptsas_handle_dr(void *args) {
6125 6143 mptsas_topo_change_list_t *topo_node = NULL;
6126 6144 mptsas_topo_change_list_t *save_node = NULL;
6127 6145 mptsas_t *mpt;
6128 6146 dev_info_t *parent = NULL;
6129 6147 mptsas_phymask_t phymask = 0;
6130 6148 char *phy_mask_name;
6131 6149 uint8_t flags = 0, physport = 0xff;
6132 6150 uint8_t port_update = 0;
6133 6151 uint_t event;
6134 6152
6135 6153 topo_node = (mptsas_topo_change_list_t *)args;
6136 6154
6137 6155 mpt = topo_node->mpt;
6138 6156 event = topo_node->event;
6139 6157 flags = topo_node->flags;
6140 6158
6141 6159 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6142 6160
6143 6161 NDBG20(("mptsas%d handle_dr enter", mpt->m_instance));
6144 6162
6145 6163 switch (event) {
6146 6164 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6147 6165 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6148 6166 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE) ||
6149 6167 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6150 6168 /*
6151 6169 * Direct attached or expander attached device added
6152 6170 * into system or a Phys Disk that is being unhidden.
6153 6171 */
6154 6172 port_update = 1;
6155 6173 }
6156 6174 break;
6157 6175 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6158 6176 /*
6159 6177 * New expander added into system, it must be the head
6160 6178 * of topo_change_list_t
6161 6179 */
6162 6180 port_update = 1;
6163 6181 break;
6164 6182 default:
6165 6183 port_update = 0;
6166 6184 break;
6167 6185 }
6168 6186 /*
6169 6187 * All cases port_update == 1 may cause initiator port form change
6170 6188 */
6171 6189 mutex_enter(&mpt->m_mutex);
6172 6190 if (mpt->m_port_chng && port_update) {
6173 6191 /*
6174 6192 * mpt->m_port_chng flag indicates some PHYs of initiator
6175 6193 * port have changed to online. So when expander added or
6176 6194 * directly attached device online event come, we force to
6177 6195 * update port information by issueing SAS IO Unit Page and
6178 6196 * update PHYMASKs.
6179 6197 */
6180 6198 (void) mptsas_update_phymask(mpt);
6181 6199 mpt->m_port_chng = 0;
6182 6200
6183 6201 }
6184 6202 mutex_exit(&mpt->m_mutex);
6185 6203 while (topo_node) {
6186 6204 phymask = 0;
6187 6205 if (parent == NULL) {
6188 6206 physport = topo_node->un.physport;
6189 6207 event = topo_node->event;
6190 6208 flags = topo_node->flags;
6191 6209 if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET |
6192 6210 MPTSAS_DR_EVENT_OFFLINE_SMP)) {
6193 6211 /*
6194 6212 * For all offline events, phymask is known
6195 6213 */
6196 6214 phymask = topo_node->un.phymask;
6197 6215 goto find_parent;
6198 6216 }
6199 6217 if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6200 6218 goto handle_topo_change;
6201 6219 }
6202 6220 if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) {
6203 6221 phymask = topo_node->un.phymask;
6204 6222 goto find_parent;
6205 6223 }
6206 6224
6207 6225 if ((flags ==
6208 6226 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) &&
6209 6227 (event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) {
6210 6228 /*
6211 6229 * There is no any field in IR_CONFIG_CHANGE
6212 6230 * event indicate physport/phynum, let's get
6213 6231 * parent after SAS Device Page0 request.
6214 6232 */
6215 6233 goto handle_topo_change;
6216 6234 }
6217 6235
6218 6236 mutex_enter(&mpt->m_mutex);
6219 6237 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6220 6238 /*
6221 6239 * If the direct attached device added or a
6222 6240 * phys disk is being unhidden, argument
6223 6241 * physport actually is PHY#, so we have to get
6224 6242 * phymask according PHY#.
6225 6243 */
6226 6244 physport = mpt->m_phy_info[physport].port_num;
6227 6245 }
6228 6246
6229 6247 /*
6230 6248 * Translate physport to phymask so that we can search
6231 6249 * parent dip.
6232 6250 */
6233 6251 phymask = mptsas_physport_to_phymask(mpt,
6234 6252 physport);
6235 6253 mutex_exit(&mpt->m_mutex);
6236 6254
6237 6255 find_parent:
6238 6256 bzero(phy_mask_name, MPTSAS_MAX_PHYS);
6239 6257 /*
6240 6258 * For RAID topology change node, write the iport name
6241 6259 * as v0.
6242 6260 */
6243 6261 if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6244 6262 (void) sprintf(phy_mask_name, "v0");
6245 6263 } else {
6246 6264 /*
6247 6265 * phymask can bo 0 if the drive has been
6248 6266 * pulled by the time an add event is
6249 6267 * processed. If phymask is 0, just skip this
6250 6268 * event and continue.
6251 6269 */
6252 6270 if (phymask == 0) {
6253 6271 mutex_enter(&mpt->m_mutex);
6254 6272 save_node = topo_node;
6255 6273 topo_node = topo_node->next;
6256 6274 ASSERT(save_node);
6257 6275 kmem_free(save_node,
6258 6276 sizeof (mptsas_topo_change_list_t));
6259 6277 mutex_exit(&mpt->m_mutex);
6260 6278
6261 6279 parent = NULL;
6262 6280 continue;
6263 6281 }
6264 6282 (void) sprintf(phy_mask_name, "%x", phymask);
6265 6283 }
6266 6284 parent = scsi_hba_iport_find(mpt->m_dip,
6267 6285 phy_mask_name);
6268 6286 if (parent == NULL) {
6269 6287 mptsas_log(mpt, CE_WARN, "Failed to find an "
6270 6288 "iport, should not happen!");
6271 6289 goto out;
6272 6290 }
6273 6291
6274 6292 }
6275 6293 ASSERT(parent);
6276 6294 handle_topo_change:
6277 6295
6278 6296 mutex_enter(&mpt->m_mutex);
6279 6297 /*
6280 6298 * If HBA is being reset, don't perform operations depending
6281 6299 * on the IOC. We must free the topo list, however.
6282 6300 */
6283 6301 if (!mpt->m_in_reset)
6284 6302 mptsas_handle_topo_change(topo_node, parent);
6285 6303 else
6286 6304 NDBG20(("skipping topo change received during reset"));
6287 6305 save_node = topo_node;
6288 6306 topo_node = topo_node->next;
6289 6307 ASSERT(save_node);
6290 6308 kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
6291 6309 mutex_exit(&mpt->m_mutex);
6292 6310
6293 6311 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6294 6312 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
6295 6313 (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
6296 6314 /*
6297 6315 * If direct attached device associated, make sure
6298 6316 * reset the parent before start the next one. But
6299 6317 * all devices associated with expander shares the
6300 6318 * parent. Also, reset parent if this is for RAID.
6301 6319 */
6302 6320 parent = NULL;
6303 6321 }
6304 6322 }
6305 6323 out:
6306 6324 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6307 6325 }
6308 6326
6309 6327 static void
6310 6328 mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
6311 6329 dev_info_t *parent)
6312 6330 {
6313 6331 mptsas_target_t *ptgt = NULL;
6314 6332 mptsas_smp_t *psmp = NULL;
6315 6333 mptsas_t *mpt = (void *)topo_node->mpt;
6316 6334 uint16_t devhdl;
6317 6335 uint16_t attached_devhdl;
6318 6336 uint64_t sas_wwn = 0;
6319 6337 int rval = 0;
6320 6338 uint32_t page_address;
6321 6339 uint8_t phy, flags;
6322 6340 char *addr = NULL;
6323 6341 dev_info_t *lundip;
6324 6342 int circ = 0, circ1 = 0;
6325 6343 char attached_wwnstr[MPTSAS_WWN_STRLEN];
6326 6344
6327 6345 NDBG20(("mptsas%d handle_topo_change enter, devhdl 0x%x,"
6328 6346 "event 0x%x, flags 0x%x", mpt->m_instance, topo_node->devhdl,
6329 6347 topo_node->event, topo_node->flags));
6330 6348
6331 6349 ASSERT(mutex_owned(&mpt->m_mutex));
6332 6350
6333 6351 switch (topo_node->event) {
6334 6352 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6335 6353 {
6336 6354 char *phy_mask_name;
6337 6355 mptsas_phymask_t phymask = 0;
6338 6356
6339 6357 if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6340 6358 /*
6341 6359 * Get latest RAID info.
6342 6360 */
6343 6361 (void) mptsas_get_raid_info(mpt);
6344 6362 ptgt = refhash_linear_search(mpt->m_targets,
6345 6363 mptsas_target_eval_devhdl, &topo_node->devhdl);
6346 6364 if (ptgt == NULL)
6347 6365 break;
6348 6366 } else {
6349 6367 ptgt = (void *)topo_node->object;
6350 6368 }
6351 6369
6352 6370 if (ptgt == NULL) {
6353 6371 /*
6354 6372 * If a Phys Disk was deleted, RAID info needs to be
6355 6373 * updated to reflect the new topology.
6356 6374 */
6357 6375 (void) mptsas_get_raid_info(mpt);
6358 6376
6359 6377 /*
6360 6378 * Get sas device page 0 by DevHandle to make sure if
6361 6379 * SSP/SATA end device exist.
6362 6380 */
6363 6381 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
6364 6382 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
6365 6383 topo_node->devhdl;
6366 6384
6367 6385 rval = mptsas_get_target_device_info(mpt, page_address,
6368 6386 &devhdl, &ptgt);
6369 6387 if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
6370 6388 mptsas_log(mpt, CE_NOTE,
6371 6389 "mptsas_handle_topo_change: target %d is "
6372 6390 "not a SAS/SATA device. \n",
6373 6391 topo_node->devhdl);
6374 6392 } else if (rval == DEV_INFO_FAIL_ALLOC) {
6375 6393 mptsas_log(mpt, CE_NOTE,
6376 6394 "mptsas_handle_topo_change: could not "
6377 6395 "allocate memory. \n");
6378 6396 }
6379 6397 /*
6380 6398 * If rval is DEV_INFO_PHYS_DISK than there is nothing
6381 6399 * else to do, just leave.
6382 6400 */
6383 6401 if (rval != DEV_INFO_SUCCESS) {
6384 6402 return;
6385 6403 }
6386 6404 }
6387 6405
6388 6406 ASSERT(ptgt->m_devhdl == topo_node->devhdl);
6389 6407
6390 6408 mutex_exit(&mpt->m_mutex);
6391 6409 flags = topo_node->flags;
6392 6410
6393 6411 if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
6394 6412 phymask = ptgt->m_addr.mta_phymask;
6395 6413 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6396 6414 (void) sprintf(phy_mask_name, "%x", phymask);
6397 6415 parent = scsi_hba_iport_find(mpt->m_dip,
6398 6416 phy_mask_name);
6399 6417 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6400 6418 if (parent == NULL) {
6401 6419 mptsas_log(mpt, CE_WARN, "Failed to find a "
6402 6420 "iport for PD, should not happen!");
6403 6421 mutex_enter(&mpt->m_mutex);
6404 6422 break;
6405 6423 }
6406 6424 }
6407 6425
6408 6426 if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6409 6427 ndi_devi_enter(parent, &circ1);
6410 6428 (void) mptsas_config_raid(parent, topo_node->devhdl,
6411 6429 &lundip);
6412 6430 ndi_devi_exit(parent, circ1);
6413 6431 } else {
6414 6432 /*
6415 6433 * hold nexus for bus configure
6416 6434 */
6417 6435 ndi_devi_enter(scsi_vhci_dip, &circ);
6418 6436 ndi_devi_enter(parent, &circ1);
6419 6437 rval = mptsas_config_target(parent, ptgt);
6420 6438 /*
6421 6439 * release nexus for bus configure
6422 6440 */
6423 6441 ndi_devi_exit(parent, circ1);
6424 6442 ndi_devi_exit(scsi_vhci_dip, circ);
6425 6443
6426 6444 /*
6427 6445 * Add parent's props for SMHBA support
6428 6446 */
6429 6447 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6430 6448 bzero(attached_wwnstr,
6431 6449 sizeof (attached_wwnstr));
6432 6450 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
6433 6451 ptgt->m_addr.mta_wwn);
6434 6452 if (ddi_prop_update_string(DDI_DEV_T_NONE,
6435 6453 parent,
6436 6454 SCSI_ADDR_PROP_ATTACHED_PORT,
6437 6455 attached_wwnstr)
6438 6456 != DDI_PROP_SUCCESS) {
6439 6457 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6440 6458 parent,
6441 6459 SCSI_ADDR_PROP_ATTACHED_PORT);
6442 6460 mptsas_log(mpt, CE_WARN, "Failed to"
6443 6461 "attached-port props");
6444 6462 return;
6445 6463 }
6446 6464 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6447 6465 MPTSAS_NUM_PHYS, 1) !=
6448 6466 DDI_PROP_SUCCESS) {
6449 6467 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6450 6468 parent, MPTSAS_NUM_PHYS);
6451 6469 mptsas_log(mpt, CE_WARN, "Failed to"
6452 6470 " create num-phys props");
6453 6471 return;
6454 6472 }
6455 6473
6456 6474 /*
6457 6475 * Update PHY info for smhba
6458 6476 */
6459 6477 mutex_enter(&mpt->m_mutex);
6460 6478 if (mptsas_smhba_phy_init(mpt)) {
6461 6479 mutex_exit(&mpt->m_mutex);
6462 6480 mptsas_log(mpt, CE_WARN, "mptsas phy"
6463 6481 " update failed");
6464 6482 return;
6465 6483 }
6466 6484 mutex_exit(&mpt->m_mutex);
6467 6485
6468 6486 /*
6469 6487 * topo_node->un.physport is really the PHY#
6470 6488 * for direct attached devices
6471 6489 */
6472 6490 mptsas_smhba_set_one_phy_props(mpt, parent,
6473 6491 topo_node->un.physport, &attached_devhdl);
6474 6492
6475 6493 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6476 6494 MPTSAS_VIRTUAL_PORT, 0) !=
6477 6495 DDI_PROP_SUCCESS) {
6478 6496 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6479 6497 parent, MPTSAS_VIRTUAL_PORT);
6480 6498 mptsas_log(mpt, CE_WARN,
6481 6499 "mptsas virtual-port"
6482 6500 "port prop update failed");
6483 6501 return;
6484 6502 }
6485 6503 }
6486 6504 }
6487 6505 mutex_enter(&mpt->m_mutex);
6488 6506
6489 6507 NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
6490 6508 "phymask:%x.", mpt->m_instance, ptgt->m_devhdl,
6491 6509 ptgt->m_addr.mta_phymask));
6492 6510 break;
6493 6511 }
6494 6512 case MPTSAS_DR_EVENT_OFFLINE_TARGET:
6495 6513 {
6496 6514 devhdl = topo_node->devhdl;
6497 6515 ptgt = refhash_linear_search(mpt->m_targets,
6498 6516 mptsas_target_eval_devhdl, &devhdl);
6499 6517 if (ptgt == NULL)
6500 6518 break;
6501 6519
6502 6520 sas_wwn = ptgt->m_addr.mta_wwn;
6503 6521 phy = ptgt->m_phynum;
6504 6522
6505 6523 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
6506 6524
6507 6525 if (sas_wwn) {
6508 6526 (void) sprintf(addr, "w%016"PRIx64, sas_wwn);
6509 6527 } else {
6510 6528 (void) sprintf(addr, "p%x", phy);
6511 6529 }
6512 6530 ASSERT(ptgt->m_devhdl == devhdl);
6513 6531
6514 6532 if ((topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) ||
6515 6533 (topo_node->flags ==
6516 6534 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6517 6535 /*
6518 6536 * Get latest RAID info if RAID volume status changes
6519 6537 * or Phys Disk status changes
6520 6538 */
6521 6539 (void) mptsas_get_raid_info(mpt);
6522 6540 }
6523 6541 /*
6524 6542 * Abort all outstanding command on the device
6525 6543 */
6526 6544 rval = mptsas_do_scsi_reset(mpt, devhdl);
6527 6545 if (rval) {
6528 6546 NDBG20(("mptsas%d handle_topo_change to reset target "
6529 6547 "before offline devhdl:%x, phymask:%x, rval:%x",
6530 6548 mpt->m_instance, ptgt->m_devhdl,
6531 6549 ptgt->m_addr.mta_phymask, rval));
6532 6550 }
6533 6551
6534 6552 mutex_exit(&mpt->m_mutex);
6535 6553
6536 6554 ndi_devi_enter(scsi_vhci_dip, &circ);
6537 6555 ndi_devi_enter(parent, &circ1);
6538 6556 rval = mptsas_offline_target(parent, addr);
6539 6557 ndi_devi_exit(parent, circ1);
6540 6558 ndi_devi_exit(scsi_vhci_dip, circ);
6541 6559 NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
6542 6560 "phymask:%x, rval:%x", mpt->m_instance,
6543 6561 ptgt->m_devhdl, ptgt->m_addr.mta_phymask, rval));
6544 6562
6545 6563 kmem_free(addr, SCSI_MAXNAMELEN);
6546 6564
6547 6565 /*
6548 6566 * Clear parent's props for SMHBA support
6549 6567 */
6550 6568 flags = topo_node->flags;
6551 6569 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6552 6570 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6553 6571 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6554 6572 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6555 6573 DDI_PROP_SUCCESS) {
6556 6574 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6557 6575 SCSI_ADDR_PROP_ATTACHED_PORT);
6558 6576 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6559 6577 "prop update failed");
6560 6578 break;
6561 6579 }
6562 6580 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6563 6581 MPTSAS_NUM_PHYS, 0) !=
6564 6582 DDI_PROP_SUCCESS) {
6565 6583 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6566 6584 MPTSAS_NUM_PHYS);
6567 6585 mptsas_log(mpt, CE_WARN, "mptsas num phys "
6568 6586 "prop update failed");
6569 6587 break;
6570 6588 }
6571 6589 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6572 6590 MPTSAS_VIRTUAL_PORT, 1) !=
6573 6591 DDI_PROP_SUCCESS) {
6574 6592 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6575 6593 MPTSAS_VIRTUAL_PORT);
6576 6594 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6577 6595 "prop update failed");
6578 6596 break;
6579 6597 }
6580 6598 }
6581 6599
6582 6600 mutex_enter(&mpt->m_mutex);
6583 6601 ptgt->m_led_status = 0;
6584 6602 (void) mptsas_flush_led_status(mpt, ptgt);
6585 6603 if (rval == DDI_SUCCESS) {
6586 6604 refhash_remove(mpt->m_targets, ptgt);
6587 6605 ptgt = NULL;
6588 6606 } else {
6589 6607 /*
6590 6608 * clean DR_INTRANSITION flag to allow I/O down to
6591 6609 * PHCI driver since failover finished.
6592 6610 * Invalidate the devhdl
6593 6611 */
6594 6612 ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6595 6613 ptgt->m_tgt_unconfigured = 0;
6596 6614 mutex_enter(&mpt->m_tx_waitq_mutex);
6597 6615 ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6598 6616 mutex_exit(&mpt->m_tx_waitq_mutex);
6599 6617 }
6600 6618
6601 6619 /*
6602 6620 * Send SAS IO Unit Control to free the dev handle
6603 6621 */
6604 6622 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6605 6623 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
6606 6624 rval = mptsas_free_devhdl(mpt, devhdl);
6607 6625
6608 6626 NDBG20(("mptsas%d handle_topo_change to remove "
6609 6627 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6610 6628 rval));
6611 6629 }
6612 6630
6613 6631 break;
6614 6632 }
6615 6633 case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
6616 6634 {
6617 6635 devhdl = topo_node->devhdl;
6618 6636 /*
6619 6637 * If this is the remove handle event, do a reset first.
6620 6638 */
6621 6639 if (topo_node->event == MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6622 6640 rval = mptsas_do_scsi_reset(mpt, devhdl);
6623 6641 if (rval) {
6624 6642 NDBG20(("mpt%d reset target before remove "
6625 6643 "devhdl:%x, rval:%x", mpt->m_instance,
6626 6644 devhdl, rval));
6627 6645 }
6628 6646 }
6629 6647
6630 6648 /*
6631 6649 * Send SAS IO Unit Control to free the dev handle
6632 6650 */
6633 6651 rval = mptsas_free_devhdl(mpt, devhdl);
6634 6652 NDBG20(("mptsas%d handle_topo_change to remove "
6635 6653 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6636 6654 rval));
6637 6655 break;
6638 6656 }
6639 6657 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6640 6658 {
6641 6659 mptsas_smp_t smp;
6642 6660 dev_info_t *smpdip;
6643 6661
6644 6662 devhdl = topo_node->devhdl;
6645 6663
6646 6664 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
6647 6665 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)devhdl;
6648 6666 rval = mptsas_get_sas_expander_page0(mpt, page_address, &smp);
6649 6667 if (rval != DDI_SUCCESS) {
6650 6668 mptsas_log(mpt, CE_WARN, "failed to online smp, "
6651 6669 "handle %x", devhdl);
6652 6670 return;
6653 6671 }
6654 6672
6655 6673 psmp = mptsas_smp_alloc(mpt, &smp);
6656 6674 if (psmp == NULL) {
6657 6675 return;
6658 6676 }
6659 6677
6660 6678 mutex_exit(&mpt->m_mutex);
6661 6679 ndi_devi_enter(parent, &circ1);
6662 6680 (void) mptsas_online_smp(parent, psmp, &smpdip);
6663 6681 ndi_devi_exit(parent, circ1);
6664 6682
6665 6683 mutex_enter(&mpt->m_mutex);
6666 6684 break;
6667 6685 }
6668 6686 case MPTSAS_DR_EVENT_OFFLINE_SMP:
6669 6687 {
6670 6688 devhdl = topo_node->devhdl;
6671 6689 uint32_t dev_info;
6672 6690
6673 6691 psmp = refhash_linear_search(mpt->m_smp_targets,
6674 6692 mptsas_smp_eval_devhdl, &devhdl);
6675 6693 if (psmp == NULL)
6676 6694 break;
6677 6695 /*
6678 6696 * The mptsas_smp_t data is released only if the dip is offlined
6679 6697 * successfully.
6680 6698 */
6681 6699 mutex_exit(&mpt->m_mutex);
6682 6700
6683 6701 ndi_devi_enter(parent, &circ1);
6684 6702 rval = mptsas_offline_smp(parent, psmp, NDI_DEVI_REMOVE);
6685 6703 ndi_devi_exit(parent, circ1);
6686 6704
6687 6705 dev_info = psmp->m_deviceinfo;
6688 6706 if ((dev_info & DEVINFO_DIRECT_ATTACHED) ==
6689 6707 DEVINFO_DIRECT_ATTACHED) {
6690 6708 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6691 6709 MPTSAS_VIRTUAL_PORT, 1) !=
6692 6710 DDI_PROP_SUCCESS) {
6693 6711 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6694 6712 MPTSAS_VIRTUAL_PORT);
6695 6713 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6696 6714 "prop update failed");
6697 6715 return;
6698 6716 }
6699 6717 /*
6700 6718 * Check whether the smp connected to the iport,
6701 6719 */
6702 6720 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6703 6721 MPTSAS_NUM_PHYS, 0) !=
6704 6722 DDI_PROP_SUCCESS) {
6705 6723 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6706 6724 MPTSAS_NUM_PHYS);
6707 6725 mptsas_log(mpt, CE_WARN, "mptsas num phys"
6708 6726 "prop update failed");
6709 6727 return;
6710 6728 }
6711 6729 /*
6712 6730 * Clear parent's attached-port props
6713 6731 */
6714 6732 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6715 6733 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6716 6734 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6717 6735 DDI_PROP_SUCCESS) {
6718 6736 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6719 6737 SCSI_ADDR_PROP_ATTACHED_PORT);
6720 6738 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6721 6739 "prop update failed");
6722 6740 return;
6723 6741 }
6724 6742 }
6725 6743
6726 6744 mutex_enter(&mpt->m_mutex);
6727 6745 NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
6728 6746 "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
6729 6747 if (rval == DDI_SUCCESS) {
6730 6748 refhash_remove(mpt->m_smp_targets, psmp);
6731 6749 } else {
6732 6750 psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
6733 6751 }
6734 6752
6735 6753 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6736 6754
6737 6755 break;
6738 6756 }
6739 6757 default:
6740 6758 return;
6741 6759 }
6742 6760 }
6743 6761
6744 6762 /*
6745 6763 * Record the event if its type is enabled in mpt instance by ioctl.
6746 6764 */
6747 6765 static void
6748 6766 mptsas_record_event(void *args)
6749 6767 {
6750 6768 m_replyh_arg_t *replyh_arg;
6751 6769 pMpi2EventNotificationReply_t eventreply;
6752 6770 uint32_t event, rfm;
6753 6771 mptsas_t *mpt;
6754 6772 int i, j;
6755 6773 uint16_t event_data_len;
6756 6774 boolean_t sendAEN = FALSE;
6757 6775
6758 6776 replyh_arg = (m_replyh_arg_t *)args;
6759 6777 rfm = replyh_arg->rfm;
6760 6778 mpt = replyh_arg->mpt;
6761 6779
6762 6780 eventreply = (pMpi2EventNotificationReply_t)
6763 6781 (mpt->m_reply_frame + (rfm -
6764 6782 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
6765 6783 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6766 6784
6767 6785
6768 6786 /*
6769 6787 * Generate a system event to let anyone who cares know that a
6770 6788 * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the
6771 6789 * event mask is set to.
6772 6790 */
6773 6791 if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
6774 6792 sendAEN = TRUE;
6775 6793 }
6776 6794
6777 6795 /*
6778 6796 * Record the event only if it is not masked. Determine which dword
6779 6797 * and bit of event mask to test.
6780 6798 */
6781 6799 i = (uint8_t)(event / 32);
6782 6800 j = (uint8_t)(event % 32);
6783 6801 if ((i < 4) && ((1 << j) & mpt->m_event_mask[i])) {
6784 6802 i = mpt->m_event_index;
6785 6803 mpt->m_events[i].Type = event;
6786 6804 mpt->m_events[i].Number = ++mpt->m_event_number;
6787 6805 bzero(mpt->m_events[i].Data, MPTSAS_MAX_EVENT_DATA_LENGTH * 4);
6788 6806 event_data_len = ddi_get16(mpt->m_acc_reply_frame_hdl,
6789 6807 &eventreply->EventDataLength);
6790 6808
6791 6809 if (event_data_len > 0) {
6792 6810 /*
6793 6811 * Limit data to size in m_event entry
6794 6812 */
6795 6813 if (event_data_len > MPTSAS_MAX_EVENT_DATA_LENGTH) {
6796 6814 event_data_len = MPTSAS_MAX_EVENT_DATA_LENGTH;
6797 6815 }
6798 6816 for (j = 0; j < event_data_len; j++) {
6799 6817 mpt->m_events[i].Data[j] =
6800 6818 ddi_get32(mpt->m_acc_reply_frame_hdl,
6801 6819 &(eventreply->EventData[j]));
6802 6820 }
6803 6821
6804 6822 /*
6805 6823 * check for index wrap-around
6806 6824 */
6807 6825 if (++i == MPTSAS_EVENT_QUEUE_SIZE) {
6808 6826 i = 0;
6809 6827 }
6810 6828 mpt->m_event_index = (uint8_t)i;
6811 6829
6812 6830 /*
6813 6831 * Set flag to send the event.
6814 6832 */
6815 6833 sendAEN = TRUE;
6816 6834 }
6817 6835 }
6818 6836
6819 6837 /*
6820 6838 * Generate a system event if flag is set to let anyone who cares know
6821 6839 * that an event has occurred.
6822 6840 */
6823 6841 if (sendAEN) {
6824 6842 (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
6825 6843 "SAS", NULL, NULL, DDI_NOSLEEP);
6826 6844 }
6827 6845 }
6828 6846
6829 6847 #define SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS
6830 6848 /*
6831 6849 * handle sync events from ioc in interrupt
6832 6850 * return value:
6833 6851 * DDI_SUCCESS: The event is handled by this func
6834 6852 * DDI_FAILURE: Event is not handled
6835 6853 */
6836 6854 static int
6837 6855 mptsas_handle_event_sync(void *args)
6838 6856 {
6839 6857 m_replyh_arg_t *replyh_arg;
6840 6858 pMpi2EventNotificationReply_t eventreply;
6841 6859 uint32_t event, rfm;
6842 6860 mptsas_t *mpt;
6843 6861 uint_t iocstatus;
6844 6862
6845 6863 replyh_arg = (m_replyh_arg_t *)args;
6846 6864 rfm = replyh_arg->rfm;
6847 6865 mpt = replyh_arg->mpt;
6848 6866
6849 6867 ASSERT(mutex_owned(&mpt->m_mutex));
6850 6868
6851 6869 eventreply = (pMpi2EventNotificationReply_t)
6852 6870 (mpt->m_reply_frame + (rfm -
6853 6871 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
6854 6872 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6855 6873
6856 6874 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
6857 6875 &eventreply->IOCStatus)) {
6858 6876 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
6859 6877 mptsas_log(mpt, CE_WARN,
6860 6878 "!mptsas_handle_event_sync: event 0x%x, "
6861 6879 "IOCStatus=0x%x, "
6862 6880 "IOCLogInfo=0x%x", event, iocstatus,
6863 6881 ddi_get32(mpt->m_acc_reply_frame_hdl,
6864 6882 &eventreply->IOCLogInfo));
6865 6883 } else {
6866 6884 mptsas_log(mpt, CE_WARN,
6867 6885 "mptsas_handle_event_sync: event 0x%x, "
6868 6886 "IOCStatus=0x%x, "
6869 6887 "(IOCLogInfo=0x%x)", event, iocstatus,
6870 6888 ddi_get32(mpt->m_acc_reply_frame_hdl,
6871 6889 &eventreply->IOCLogInfo));
6872 6890 }
6873 6891 }
6874 6892
6875 6893 /*
6876 6894 * figure out what kind of event we got and handle accordingly
6877 6895 */
6878 6896 switch (event) {
6879 6897 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
6880 6898 {
6881 6899 pMpi2EventDataSasTopologyChangeList_t sas_topo_change_list;
6882 6900 uint8_t num_entries, expstatus, phy;
6883 6901 uint8_t phystatus, physport, state, i;
6884 6902 uint8_t start_phy_num, link_rate;
6885 6903 uint16_t dev_handle, reason_code;
6886 6904 uint16_t enc_handle, expd_handle;
6887 6905 char string[80], curr[80], prev[80];
6888 6906 mptsas_topo_change_list_t *topo_head = NULL;
6889 6907 mptsas_topo_change_list_t *topo_tail = NULL;
6890 6908 mptsas_topo_change_list_t *topo_node = NULL;
6891 6909 mptsas_target_t *ptgt;
6892 6910 mptsas_smp_t *psmp;
6893 6911 uint8_t flags = 0, exp_flag;
6894 6912 smhba_info_t *pSmhba = NULL;
6895 6913
6896 6914 NDBG20(("mptsas_handle_event_sync: SAS topology change"));
6897 6915
6898 6916 sas_topo_change_list = (pMpi2EventDataSasTopologyChangeList_t)
6899 6917 eventreply->EventData;
6900 6918
6901 6919 enc_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6902 6920 &sas_topo_change_list->EnclosureHandle);
6903 6921 expd_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6904 6922 &sas_topo_change_list->ExpanderDevHandle);
6905 6923 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6906 6924 &sas_topo_change_list->NumEntries);
6907 6925 start_phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
6908 6926 &sas_topo_change_list->StartPhyNum);
6909 6927 expstatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6910 6928 &sas_topo_change_list->ExpStatus);
6911 6929 physport = ddi_get8(mpt->m_acc_reply_frame_hdl,
6912 6930 &sas_topo_change_list->PhysicalPort);
6913 6931
6914 6932 string[0] = 0;
6915 6933 if (expd_handle) {
6916 6934 flags = MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED;
6917 6935 switch (expstatus) {
6918 6936 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6919 6937 (void) sprintf(string, " added");
6920 6938 /*
6921 6939 * New expander device added
6922 6940 */
6923 6941 mpt->m_port_chng = 1;
6924 6942 topo_node = kmem_zalloc(
6925 6943 sizeof (mptsas_topo_change_list_t),
6926 6944 KM_SLEEP);
6927 6945 topo_node->mpt = mpt;
6928 6946 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_SMP;
6929 6947 topo_node->un.physport = physport;
6930 6948 topo_node->devhdl = expd_handle;
6931 6949 topo_node->flags = flags;
6932 6950 topo_node->object = NULL;
6933 6951 if (topo_head == NULL) {
6934 6952 topo_head = topo_tail = topo_node;
6935 6953 } else {
6936 6954 topo_tail->next = topo_node;
6937 6955 topo_tail = topo_node;
6938 6956 }
6939 6957 break;
6940 6958 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6941 6959 (void) sprintf(string, " not responding, "
6942 6960 "removed");
6943 6961 psmp = refhash_linear_search(mpt->m_smp_targets,
6944 6962 mptsas_smp_eval_devhdl, &expd_handle);
6945 6963 if (psmp == NULL)
6946 6964 break;
6947 6965
6948 6966 topo_node = kmem_zalloc(
6949 6967 sizeof (mptsas_topo_change_list_t),
6950 6968 KM_SLEEP);
6951 6969 topo_node->mpt = mpt;
6952 6970 topo_node->un.phymask =
6953 6971 psmp->m_addr.mta_phymask;
6954 6972 topo_node->event = MPTSAS_DR_EVENT_OFFLINE_SMP;
6955 6973 topo_node->devhdl = expd_handle;
6956 6974 topo_node->flags = flags;
6957 6975 topo_node->object = NULL;
6958 6976 if (topo_head == NULL) {
6959 6977 topo_head = topo_tail = topo_node;
6960 6978 } else {
6961 6979 topo_tail->next = topo_node;
6962 6980 topo_tail = topo_node;
6963 6981 }
6964 6982 break;
6965 6983 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6966 6984 break;
6967 6985 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6968 6986 (void) sprintf(string, " not responding, "
6969 6987 "delaying removal");
6970 6988 break;
6971 6989 default:
6972 6990 break;
6973 6991 }
6974 6992 } else {
6975 6993 flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
6976 6994 }
6977 6995
6978 6996 NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
6979 6997 enc_handle, expd_handle, string));
6980 6998 for (i = 0; i < num_entries; i++) {
6981 6999 phy = i + start_phy_num;
6982 7000 phystatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6983 7001 &sas_topo_change_list->PHY[i].PhyStatus);
6984 7002 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6985 7003 &sas_topo_change_list->PHY[i].AttachedDevHandle);
6986 7004 reason_code = phystatus & MPI2_EVENT_SAS_TOPO_RC_MASK;
6987 7005 /*
6988 7006 * Filter out processing of Phy Vacant Status unless
6989 7007 * the reason code is "Not Responding". Process all
6990 7008 * other combinations of Phy Status and Reason Codes.
6991 7009 */
6992 7010 if ((phystatus &
6993 7011 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) &&
6994 7012 (reason_code !=
6995 7013 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
6996 7014 continue;
6997 7015 }
6998 7016 curr[0] = 0;
6999 7017 prev[0] = 0;
7000 7018 string[0] = 0;
7001 7019 switch (reason_code) {
7002 7020 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7003 7021 {
7004 7022 NDBG20(("mptsas%d phy %d physical_port %d "
7005 7023 "dev_handle %d added", mpt->m_instance, phy,
7006 7024 physport, dev_handle));
7007 7025 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
7008 7026 &sas_topo_change_list->PHY[i].LinkRate);
7009 7027 state = (link_rate &
7010 7028 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
7011 7029 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
7012 7030 switch (state) {
7013 7031 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7014 7032 (void) sprintf(curr, "is disabled");
7015 7033 break;
7016 7034 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7017 7035 (void) sprintf(curr, "is offline, "
7018 7036 "failed speed negotiation");
7019 7037 break;
7020 7038 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7021 7039 (void) sprintf(curr, "SATA OOB "
7022 7040 "complete");
7023 7041 break;
7024 7042 case SMP_RESET_IN_PROGRESS:
7025 7043 (void) sprintf(curr, "SMP reset in "
7026 7044 "progress");
7027 7045 break;
7028 7046 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7029 7047 (void) sprintf(curr, "is online at "
7030 7048 "1.5 Gbps");
7031 7049 break;
7032 7050 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7033 7051 (void) sprintf(curr, "is online at 3.0 "
7034 7052 "Gbps");
7035 7053 break;
7036 7054 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7037 7055 (void) sprintf(curr, "is online at 6.0 "
7038 7056 "Gbps");
7039 7057 break;
7040 7058 case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7041 7059 (void) sprintf(curr,
7042 7060 "is online at 12.0 Gbps");
7043 7061 break;
7044 7062 default:
7045 7063 (void) sprintf(curr, "state is "
7046 7064 "unknown");
7047 7065 break;
7048 7066 }
7049 7067 /*
7050 7068 * New target device added into the system.
7051 7069 * Set association flag according to if an
7052 7070 * expander is used or not.
7053 7071 */
7054 7072 exp_flag =
7055 7073 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
7056 7074 if (flags ==
7057 7075 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
7058 7076 flags = exp_flag;
7059 7077 }
7060 7078 topo_node = kmem_zalloc(
7061 7079 sizeof (mptsas_topo_change_list_t),
7062 7080 KM_SLEEP);
7063 7081 topo_node->mpt = mpt;
7064 7082 topo_node->event =
7065 7083 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7066 7084 if (expd_handle == 0) {
7067 7085 /*
7068 7086 * Per MPI 2, if expander dev handle
7069 7087 * is 0, it's a directly attached
7070 7088 * device. So driver use PHY to decide
7071 7089 * which iport is associated
7072 7090 */
7073 7091 physport = phy;
7074 7092 mpt->m_port_chng = 1;
7075 7093 }
7076 7094 topo_node->un.physport = physport;
7077 7095 topo_node->devhdl = dev_handle;
7078 7096 topo_node->flags = flags;
7079 7097 topo_node->object = NULL;
7080 7098 if (topo_head == NULL) {
7081 7099 topo_head = topo_tail = topo_node;
7082 7100 } else {
7083 7101 topo_tail->next = topo_node;
7084 7102 topo_tail = topo_node;
7085 7103 }
7086 7104 break;
7087 7105 }
7088 7106 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7089 7107 {
7090 7108 NDBG20(("mptsas%d phy %d physical_port %d "
7091 7109 "dev_handle %d removed", mpt->m_instance,
7092 7110 phy, physport, dev_handle));
7093 7111 /*
7094 7112 * Set association flag according to if an
7095 7113 * expander is used or not.
7096 7114 */
7097 7115 exp_flag =
7098 7116 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
7099 7117 if (flags ==
7100 7118 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
7101 7119 flags = exp_flag;
7102 7120 }
7103 7121 /*
7104 7122 * Target device is removed from the system
7105 7123 * Before the device is really offline from
7106 7124 * from system.
7107 7125 */
7108 7126 ptgt = refhash_linear_search(mpt->m_targets,
7109 7127 mptsas_target_eval_devhdl, &dev_handle);
7110 7128 /*
7111 7129 * If ptgt is NULL here, it means that the
7112 7130 * DevHandle is not in the hash table. This is
7113 7131 * reasonable sometimes. For example, if a
7114 7132 * disk was pulled, then added, then pulled
7115 7133 * again, the disk will not have been put into
7116 7134 * the hash table because the add event will
7117 7135 * have an invalid phymask. BUT, this does not
7118 7136 * mean that the DevHandle is invalid. The
7119 7137 * controller will still have a valid DevHandle
7120 7138 * that must be removed. To do this, use the
7121 7139 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
7122 7140 */
7123 7141 if (ptgt == NULL) {
7124 7142 topo_node = kmem_zalloc(
7125 7143 sizeof (mptsas_topo_change_list_t),
7126 7144 KM_SLEEP);
7127 7145 topo_node->mpt = mpt;
7128 7146 topo_node->un.phymask = 0;
7129 7147 topo_node->event =
7130 7148 MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
7131 7149 topo_node->devhdl = dev_handle;
7132 7150 topo_node->flags = flags;
7133 7151 topo_node->object = NULL;
7134 7152 if (topo_head == NULL) {
7135 7153 topo_head = topo_tail =
7136 7154 topo_node;
7137 7155 } else {
7138 7156 topo_tail->next = topo_node;
7139 7157 topo_tail = topo_node;
7140 7158 }
7141 7159 break;
7142 7160 }
7143 7161
7144 7162 /*
7145 7163 * Update DR flag immediately avoid I/O failure
7146 7164 * before failover finish. Pay attention to the
7147 7165 * mutex protect, we need grab m_tx_waitq_mutex
7148 7166 * during set m_dr_flag because we won't add
7149 7167 * the following command into waitq, instead,
7150 7168 * we need return TRAN_BUSY in the tran_start
7151 7169 * context.
7152 7170 */
7153 7171 mutex_enter(&mpt->m_tx_waitq_mutex);
7154 7172 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7155 7173 mutex_exit(&mpt->m_tx_waitq_mutex);
7156 7174
7157 7175 topo_node = kmem_zalloc(
7158 7176 sizeof (mptsas_topo_change_list_t),
7159 7177 KM_SLEEP);
7160 7178 topo_node->mpt = mpt;
7161 7179 topo_node->un.phymask =
7162 7180 ptgt->m_addr.mta_phymask;
7163 7181 topo_node->event =
7164 7182 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7165 7183 topo_node->devhdl = dev_handle;
7166 7184 topo_node->flags = flags;
7167 7185 topo_node->object = NULL;
7168 7186 if (topo_head == NULL) {
7169 7187 topo_head = topo_tail = topo_node;
7170 7188 } else {
7171 7189 topo_tail->next = topo_node;
7172 7190 topo_tail = topo_node;
7173 7191 }
7174 7192 break;
7175 7193 }
7176 7194 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7177 7195 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
7178 7196 &sas_topo_change_list->PHY[i].LinkRate);
7179 7197 state = (link_rate &
7180 7198 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
7181 7199 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
7182 7200 pSmhba = &mpt->m_phy_info[i].smhba_info;
7183 7201 pSmhba->negotiated_link_rate = state;
7184 7202 switch (state) {
7185 7203 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7186 7204 (void) sprintf(curr, "is disabled");
7187 7205 mptsas_smhba_log_sysevent(mpt,
7188 7206 ESC_SAS_PHY_EVENT,
7189 7207 SAS_PHY_REMOVE,
7190 7208 &mpt->m_phy_info[i].smhba_info);
7191 7209 mpt->m_phy_info[i].smhba_info.
7192 7210 negotiated_link_rate
7193 7211 = 0x1;
7194 7212 break;
7195 7213 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7196 7214 (void) sprintf(curr, "is offline, "
7197 7215 "failed speed negotiation");
7198 7216 mptsas_smhba_log_sysevent(mpt,
7199 7217 ESC_SAS_PHY_EVENT,
7200 7218 SAS_PHY_OFFLINE,
7201 7219 &mpt->m_phy_info[i].smhba_info);
7202 7220 break;
7203 7221 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7204 7222 (void) sprintf(curr, "SATA OOB "
7205 7223 "complete");
7206 7224 break;
7207 7225 case SMP_RESET_IN_PROGRESS:
7208 7226 (void) sprintf(curr, "SMP reset in "
7209 7227 "progress");
7210 7228 break;
7211 7229 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7212 7230 (void) sprintf(curr, "is online at "
7213 7231 "1.5 Gbps");
7214 7232 if ((expd_handle == 0) &&
7215 7233 (enc_handle == 1)) {
7216 7234 mpt->m_port_chng = 1;
7217 7235 }
7218 7236 mptsas_smhba_log_sysevent(mpt,
7219 7237 ESC_SAS_PHY_EVENT,
7220 7238 SAS_PHY_ONLINE,
7221 7239 &mpt->m_phy_info[i].smhba_info);
7222 7240 break;
7223 7241 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7224 7242 (void) sprintf(curr, "is online at 3.0 "
7225 7243 "Gbps");
7226 7244 if ((expd_handle == 0) &&
7227 7245 (enc_handle == 1)) {
7228 7246 mpt->m_port_chng = 1;
7229 7247 }
7230 7248 mptsas_smhba_log_sysevent(mpt,
7231 7249 ESC_SAS_PHY_EVENT,
7232 7250 SAS_PHY_ONLINE,
7233 7251 &mpt->m_phy_info[i].smhba_info);
7234 7252 break;
7235 7253 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7236 7254 (void) sprintf(curr, "is online at "
7237 7255 "6.0 Gbps");
7238 7256 if ((expd_handle == 0) &&
7239 7257 (enc_handle == 1)) {
7240 7258 mpt->m_port_chng = 1;
7241 7259 }
7242 7260 mptsas_smhba_log_sysevent(mpt,
7243 7261 ESC_SAS_PHY_EVENT,
7244 7262 SAS_PHY_ONLINE,
7245 7263 &mpt->m_phy_info[i].smhba_info);
7246 7264 break;
7247 7265 case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7248 7266 (void) sprintf(curr, "is online at "
7249 7267 "12.0 Gbps");
7250 7268 if ((expd_handle == 0) &&
7251 7269 (enc_handle == 1)) {
7252 7270 mpt->m_port_chng = 1;
7253 7271 }
7254 7272 mptsas_smhba_log_sysevent(mpt,
7255 7273 ESC_SAS_PHY_EVENT,
7256 7274 SAS_PHY_ONLINE,
7257 7275 &mpt->m_phy_info[i].smhba_info);
7258 7276 break;
7259 7277 default:
7260 7278 (void) sprintf(curr, "state is "
7261 7279 "unknown");
7262 7280 break;
7263 7281 }
7264 7282
7265 7283 state = (link_rate &
7266 7284 MPI2_EVENT_SAS_TOPO_LR_PREV_MASK) >>
7267 7285 MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT;
7268 7286 switch (state) {
7269 7287 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7270 7288 (void) sprintf(prev, ", was disabled");
7271 7289 break;
7272 7290 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7273 7291 (void) sprintf(prev, ", was offline, "
7274 7292 "failed speed negotiation");
7275 7293 break;
7276 7294 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7277 7295 (void) sprintf(prev, ", was SATA OOB "
7278 7296 "complete");
7279 7297 break;
7280 7298 case SMP_RESET_IN_PROGRESS:
7281 7299 (void) sprintf(prev, ", was SMP reset "
7282 7300 "in progress");
7283 7301 break;
7284 7302 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7285 7303 (void) sprintf(prev, ", was online at "
7286 7304 "1.5 Gbps");
7287 7305 break;
7288 7306 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7289 7307 (void) sprintf(prev, ", was online at "
7290 7308 "3.0 Gbps");
7291 7309 break;
7292 7310 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7293 7311 (void) sprintf(prev, ", was online at "
7294 7312 "6.0 Gbps");
7295 7313 break;
7296 7314 case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7297 7315 (void) sprintf(prev, ", was online at "
7298 7316 "12.0 Gbps");
7299 7317 break;
7300 7318 default:
7301 7319 break;
7302 7320 }
7303 7321 (void) sprintf(&string[strlen(string)], "link "
7304 7322 "changed, ");
7305 7323 break;
7306 7324 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7307 7325 continue;
7308 7326 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7309 7327 (void) sprintf(&string[strlen(string)],
7310 7328 "target not responding, delaying "
7311 7329 "removal");
7312 7330 break;
7313 7331 }
7314 7332 NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
7315 7333 mpt->m_instance, phy, dev_handle, string, curr,
7316 7334 prev));
7317 7335 }
7318 7336 if (topo_head != NULL) {
7319 7337 /*
7320 7338 * Launch DR taskq to handle topology change
7321 7339 */
7322 7340 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7323 7341 mptsas_handle_dr, (void *)topo_head,
7324 7342 DDI_NOSLEEP)) != DDI_SUCCESS) {
7325 7343 while (topo_head != NULL) {
7326 7344 topo_node = topo_head;
7327 7345 topo_head = topo_head->next;
7328 7346 kmem_free(topo_node,
7329 7347 sizeof (mptsas_topo_change_list_t));
7330 7348 }
7331 7349 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7332 7350 "for handle SAS DR event failed. \n");
7333 7351 }
7334 7352 }
7335 7353 break;
7336 7354 }
7337 7355 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7338 7356 {
7339 7357 Mpi2EventDataIrConfigChangeList_t *irChangeList;
7340 7358 mptsas_topo_change_list_t *topo_head = NULL;
7341 7359 mptsas_topo_change_list_t *topo_tail = NULL;
7342 7360 mptsas_topo_change_list_t *topo_node = NULL;
7343 7361 mptsas_target_t *ptgt;
7344 7362 uint8_t num_entries, i, reason;
7345 7363 uint16_t volhandle, diskhandle;
7346 7364
7347 7365 irChangeList = (pMpi2EventDataIrConfigChangeList_t)
7348 7366 eventreply->EventData;
7349 7367 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
7350 7368 &irChangeList->NumElements);
7351 7369
7352 7370 NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
7353 7371 mpt->m_instance));
7354 7372
7355 7373 for (i = 0; i < num_entries; i++) {
7356 7374 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7357 7375 &irChangeList->ConfigElement[i].ReasonCode);
7358 7376 volhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7359 7377 &irChangeList->ConfigElement[i].VolDevHandle);
7360 7378 diskhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7361 7379 &irChangeList->ConfigElement[i].PhysDiskDevHandle);
7362 7380
7363 7381 switch (reason) {
7364 7382 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
7365 7383 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
7366 7384 {
7367 7385 NDBG20(("mptsas %d volume added\n",
7368 7386 mpt->m_instance));
7369 7387
7370 7388 topo_node = kmem_zalloc(
7371 7389 sizeof (mptsas_topo_change_list_t),
7372 7390 KM_SLEEP);
7373 7391
7374 7392 topo_node->mpt = mpt;
7375 7393 topo_node->event =
7376 7394 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7377 7395 topo_node->un.physport = 0xff;
7378 7396 topo_node->devhdl = volhandle;
7379 7397 topo_node->flags =
7380 7398 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7381 7399 topo_node->object = NULL;
7382 7400 if (topo_head == NULL) {
7383 7401 topo_head = topo_tail = topo_node;
7384 7402 } else {
7385 7403 topo_tail->next = topo_node;
7386 7404 topo_tail = topo_node;
7387 7405 }
7388 7406 break;
7389 7407 }
7390 7408 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
7391 7409 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
7392 7410 {
7393 7411 NDBG20(("mptsas %d volume deleted\n",
7394 7412 mpt->m_instance));
7395 7413 ptgt = refhash_linear_search(mpt->m_targets,
7396 7414 mptsas_target_eval_devhdl, &volhandle);
7397 7415 if (ptgt == NULL)
7398 7416 break;
7399 7417
7400 7418 /*
7401 7419 * Clear any flags related to volume
7402 7420 */
7403 7421 (void) mptsas_delete_volume(mpt, volhandle);
7404 7422
7405 7423 /*
7406 7424 * Update DR flag immediately avoid I/O failure
7407 7425 */
7408 7426 mutex_enter(&mpt->m_tx_waitq_mutex);
7409 7427 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7410 7428 mutex_exit(&mpt->m_tx_waitq_mutex);
7411 7429
7412 7430 topo_node = kmem_zalloc(
7413 7431 sizeof (mptsas_topo_change_list_t),
7414 7432 KM_SLEEP);
7415 7433 topo_node->mpt = mpt;
7416 7434 topo_node->un.phymask =
7417 7435 ptgt->m_addr.mta_phymask;
7418 7436 topo_node->event =
7419 7437 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7420 7438 topo_node->devhdl = volhandle;
7421 7439 topo_node->flags =
7422 7440 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7423 7441 topo_node->object = (void *)ptgt;
7424 7442 if (topo_head == NULL) {
7425 7443 topo_head = topo_tail = topo_node;
7426 7444 } else {
7427 7445 topo_tail->next = topo_node;
7428 7446 topo_tail = topo_node;
7429 7447 }
7430 7448 break;
7431 7449 }
7432 7450 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
7433 7451 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
7434 7452 {
7435 7453 ptgt = refhash_linear_search(mpt->m_targets,
7436 7454 mptsas_target_eval_devhdl, &diskhandle);
7437 7455 if (ptgt == NULL)
7438 7456 break;
7439 7457
7440 7458 /*
7441 7459 * Update DR flag immediately avoid I/O failure
7442 7460 */
7443 7461 mutex_enter(&mpt->m_tx_waitq_mutex);
7444 7462 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7445 7463 mutex_exit(&mpt->m_tx_waitq_mutex);
7446 7464
7447 7465 topo_node = kmem_zalloc(
7448 7466 sizeof (mptsas_topo_change_list_t),
7449 7467 KM_SLEEP);
7450 7468 topo_node->mpt = mpt;
7451 7469 topo_node->un.phymask =
7452 7470 ptgt->m_addr.mta_phymask;
7453 7471 topo_node->event =
7454 7472 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7455 7473 topo_node->devhdl = diskhandle;
7456 7474 topo_node->flags =
7457 7475 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7458 7476 topo_node->object = (void *)ptgt;
7459 7477 if (topo_head == NULL) {
7460 7478 topo_head = topo_tail = topo_node;
7461 7479 } else {
7462 7480 topo_tail->next = topo_node;
7463 7481 topo_tail = topo_node;
7464 7482 }
7465 7483 break;
7466 7484 }
7467 7485 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
7468 7486 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
7469 7487 {
7470 7488 /*
7471 7489 * The physical drive is released by a IR
7472 7490 * volume. But we cannot get the the physport
7473 7491 * or phynum from the event data, so we only
7474 7492 * can get the physport/phynum after SAS
7475 7493 * Device Page0 request for the devhdl.
7476 7494 */
7477 7495 topo_node = kmem_zalloc(
7478 7496 sizeof (mptsas_topo_change_list_t),
7479 7497 KM_SLEEP);
7480 7498 topo_node->mpt = mpt;
7481 7499 topo_node->un.phymask = 0;
7482 7500 topo_node->event =
7483 7501 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7484 7502 topo_node->devhdl = diskhandle;
7485 7503 topo_node->flags =
7486 7504 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7487 7505 topo_node->object = NULL;
7488 7506 mpt->m_port_chng = 1;
7489 7507 if (topo_head == NULL) {
7490 7508 topo_head = topo_tail = topo_node;
7491 7509 } else {
7492 7510 topo_tail->next = topo_node;
7493 7511 topo_tail = topo_node;
7494 7512 }
7495 7513 break;
7496 7514 }
7497 7515 default:
7498 7516 break;
7499 7517 }
7500 7518 }
7501 7519
7502 7520 if (topo_head != NULL) {
7503 7521 /*
7504 7522 * Launch DR taskq to handle topology change
7505 7523 */
7506 7524 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7507 7525 mptsas_handle_dr, (void *)topo_head,
7508 7526 DDI_NOSLEEP)) != DDI_SUCCESS) {
7509 7527 while (topo_head != NULL) {
7510 7528 topo_node = topo_head;
7511 7529 topo_head = topo_head->next;
7512 7530 kmem_free(topo_node,
7513 7531 sizeof (mptsas_topo_change_list_t));
7514 7532 }
7515 7533 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7516 7534 "for handle SAS DR event failed. \n");
7517 7535 }
7518 7536 }
7519 7537 break;
7520 7538 }
7521 7539 default:
7522 7540 return (DDI_FAILURE);
7523 7541 }
7524 7542
7525 7543 return (DDI_SUCCESS);
7526 7544 }
7527 7545
7528 7546 /*
7529 7547 * handle events from ioc
7530 7548 */
7531 7549 static void
7532 7550 mptsas_handle_event(void *args)
7533 7551 {
7534 7552 m_replyh_arg_t *replyh_arg;
7535 7553 pMpi2EventNotificationReply_t eventreply;
7536 7554 uint32_t event, iocloginfo, rfm;
7537 7555 uint32_t status;
7538 7556 uint8_t port;
7539 7557 mptsas_t *mpt;
7540 7558 uint_t iocstatus;
7541 7559
7542 7560 replyh_arg = (m_replyh_arg_t *)args;
7543 7561 rfm = replyh_arg->rfm;
7544 7562 mpt = replyh_arg->mpt;
7545 7563
7546 7564 mutex_enter(&mpt->m_mutex);
7547 7565 /*
7548 7566 * If HBA is being reset, drop incoming event.
7549 7567 */
7550 7568 if (mpt->m_in_reset) {
7551 7569 NDBG20(("dropping event received prior to reset"));
7552 7570 mutex_exit(&mpt->m_mutex);
7553 7571 return;
7554 7572 }
7555 7573
7556 7574 eventreply = (pMpi2EventNotificationReply_t)
7557 7575 (mpt->m_reply_frame + (rfm -
7558 7576 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
7559 7577 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7560 7578
7561 7579 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7562 7580 &eventreply->IOCStatus)) {
7563 7581 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7564 7582 mptsas_log(mpt, CE_WARN,
7565 7583 "!mptsas_handle_event: IOCStatus=0x%x, "
7566 7584 "IOCLogInfo=0x%x", iocstatus,
7567 7585 ddi_get32(mpt->m_acc_reply_frame_hdl,
7568 7586 &eventreply->IOCLogInfo));
7569 7587 } else {
7570 7588 mptsas_log(mpt, CE_WARN,
7571 7589 "mptsas_handle_event: IOCStatus=0x%x, "
7572 7590 "IOCLogInfo=0x%x", iocstatus,
7573 7591 ddi_get32(mpt->m_acc_reply_frame_hdl,
7574 7592 &eventreply->IOCLogInfo));
7575 7593 }
7576 7594 }
7577 7595
7578 7596 /*
7579 7597 * figure out what kind of event we got and handle accordingly
7580 7598 */
7581 7599 switch (event) {
7582 7600 case MPI2_EVENT_LOG_ENTRY_ADDED:
7583 7601 break;
7584 7602 case MPI2_EVENT_LOG_DATA:
7585 7603 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7586 7604 &eventreply->IOCLogInfo);
7587 7605 NDBG20(("mptsas %d log info %x received.\n", mpt->m_instance,
7588 7606 iocloginfo));
7589 7607 break;
7590 7608 case MPI2_EVENT_STATE_CHANGE:
7591 7609 NDBG20(("mptsas%d state change.", mpt->m_instance));
7592 7610 break;
7593 7611 case MPI2_EVENT_HARD_RESET_RECEIVED:
7594 7612 NDBG20(("mptsas%d event change.", mpt->m_instance));
7595 7613 break;
7596 7614 case MPI2_EVENT_SAS_DISCOVERY:
7597 7615 {
7598 7616 MPI2_EVENT_DATA_SAS_DISCOVERY *sasdiscovery;
7599 7617 char string[80];
7600 7618 uint8_t rc;
7601 7619
7602 7620 sasdiscovery =
7603 7621 (pMpi2EventDataSasDiscovery_t)eventreply->EventData;
7604 7622
7605 7623 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7606 7624 &sasdiscovery->ReasonCode);
7607 7625 port = ddi_get8(mpt->m_acc_reply_frame_hdl,
7608 7626 &sasdiscovery->PhysicalPort);
7609 7627 status = ddi_get32(mpt->m_acc_reply_frame_hdl,
7610 7628 &sasdiscovery->DiscoveryStatus);
7611 7629
7612 7630 string[0] = 0;
7613 7631 switch (rc) {
7614 7632 case MPI2_EVENT_SAS_DISC_RC_STARTED:
7615 7633 (void) sprintf(string, "STARTING");
7616 7634 break;
7617 7635 case MPI2_EVENT_SAS_DISC_RC_COMPLETED:
7618 7636 (void) sprintf(string, "COMPLETED");
7619 7637 break;
7620 7638 default:
7621 7639 (void) sprintf(string, "UNKNOWN");
7622 7640 break;
7623 7641 }
7624 7642
7625 7643 NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string,
7626 7644 port, status));
7627 7645
7628 7646 break;
7629 7647 }
7630 7648 case MPI2_EVENT_EVENT_CHANGE:
7631 7649 NDBG20(("mptsas%d event change.", mpt->m_instance));
7632 7650 break;
7633 7651 case MPI2_EVENT_TASK_SET_FULL:
7634 7652 {
7635 7653 pMpi2EventDataTaskSetFull_t taskfull;
7636 7654
7637 7655 taskfull = (pMpi2EventDataTaskSetFull_t)eventreply->EventData;
7638 7656
7639 7657 NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
7640 7658 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7641 7659 &taskfull->CurrentDepth)));
7642 7660 break;
7643 7661 }
7644 7662 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7645 7663 {
7646 7664 /*
7647 7665 * SAS TOPOLOGY CHANGE LIST Event has already been handled
7648 7666 * in mptsas_handle_event_sync() of interrupt context
7649 7667 */
7650 7668 break;
7651 7669 }
7652 7670 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
7653 7671 {
7654 7672 pMpi2EventDataSasEnclDevStatusChange_t encstatus;
7655 7673 uint8_t rc;
7656 7674 char string[80];
7657 7675
7658 7676 encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
7659 7677 eventreply->EventData;
7660 7678
7661 7679 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7662 7680 &encstatus->ReasonCode);
7663 7681 switch (rc) {
7664 7682 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7665 7683 (void) sprintf(string, "added");
7666 7684 break;
7667 7685 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7668 7686 (void) sprintf(string, ", not responding");
7669 7687 break;
7670 7688 default:
7671 7689 break;
7672 7690 }
7673 7691 NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure "
7674 7692 "%x%s\n", mpt->m_instance,
7675 7693 ddi_get16(mpt->m_acc_reply_frame_hdl,
7676 7694 &encstatus->EnclosureHandle), string));
7677 7695 break;
7678 7696 }
7679 7697
7680 7698 /*
7681 7699 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
7682 7700 * mptsas_handle_event_sync,in here just send ack message.
7683 7701 */
7684 7702 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
7685 7703 {
7686 7704 pMpi2EventDataSasDeviceStatusChange_t statuschange;
7687 7705 uint8_t rc;
7688 7706 uint16_t devhdl;
7689 7707 uint64_t wwn = 0;
7690 7708 uint32_t wwn_lo, wwn_hi;
7691 7709
7692 7710 statuschange = (pMpi2EventDataSasDeviceStatusChange_t)
7693 7711 eventreply->EventData;
7694 7712 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7695 7713 &statuschange->ReasonCode);
7696 7714 wwn_lo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7697 7715 (uint32_t *)(void *)&statuschange->SASAddress);
7698 7716 wwn_hi = ddi_get32(mpt->m_acc_reply_frame_hdl,
7699 7717 (uint32_t *)(void *)&statuschange->SASAddress + 1);
7700 7718 wwn = ((uint64_t)wwn_hi << 32) | wwn_lo;
7701 7719 devhdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
7702 7720 &statuschange->DevHandle);
7703 7721
7704 7722 NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64,
7705 7723 wwn));
7706 7724
7707 7725 switch (rc) {
7708 7726 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7709 7727 NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x",
7710 7728 ddi_get8(mpt->m_acc_reply_frame_hdl,
7711 7729 &statuschange->ASC),
7712 7730 ddi_get8(mpt->m_acc_reply_frame_hdl,
7713 7731 &statuschange->ASCQ)));
7714 7732 break;
7715 7733
7716 7734 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7717 7735 NDBG20(("Device not supported"));
7718 7736 break;
7719 7737
7720 7738 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7721 7739 NDBG20(("IOC internally generated the Target Reset "
7722 7740 "for devhdl:%x", devhdl));
7723 7741 break;
7724 7742
7725 7743 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7726 7744 NDBG20(("IOC's internally generated Target Reset "
7727 7745 "completed for devhdl:%x", devhdl));
7728 7746 break;
7729 7747
7730 7748 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7731 7749 NDBG20(("IOC internally generated Abort Task"));
7732 7750 break;
7733 7751
7734 7752 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7735 7753 NDBG20(("IOC's internally generated Abort Task "
7736 7754 "completed"));
7737 7755 break;
7738 7756
7739 7757 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7740 7758 NDBG20(("IOC internally generated Abort Task Set"));
7741 7759 break;
7742 7760
7743 7761 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7744 7762 NDBG20(("IOC internally generated Clear Task Set"));
7745 7763 break;
7746 7764
7747 7765 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7748 7766 NDBG20(("IOC internally generated Query Task"));
7749 7767 break;
7750 7768
7751 7769 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7752 7770 NDBG20(("Device sent an Asynchronous Notification"));
7753 7771 break;
7754 7772
7755 7773 default:
7756 7774 break;
7757 7775 }
7758 7776 break;
7759 7777 }
7760 7778 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7761 7779 {
7762 7780 /*
7763 7781 * IR TOPOLOGY CHANGE LIST Event has already been handled
7764 7782 * in mpt_handle_event_sync() of interrupt context
7765 7783 */
7766 7784 break;
7767 7785 }
7768 7786 case MPI2_EVENT_IR_OPERATION_STATUS:
7769 7787 {
7770 7788 Mpi2EventDataIrOperationStatus_t *irOpStatus;
7771 7789 char reason_str[80];
7772 7790 uint8_t rc, percent;
7773 7791 uint16_t handle;
7774 7792
7775 7793 irOpStatus = (pMpi2EventDataIrOperationStatus_t)
7776 7794 eventreply->EventData;
7777 7795 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7778 7796 &irOpStatus->RAIDOperation);
7779 7797 percent = ddi_get8(mpt->m_acc_reply_frame_hdl,
7780 7798 &irOpStatus->PercentComplete);
7781 7799 handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7782 7800 &irOpStatus->VolDevHandle);
7783 7801
7784 7802 switch (rc) {
7785 7803 case MPI2_EVENT_IR_RAIDOP_RESYNC:
7786 7804 (void) sprintf(reason_str, "resync");
7787 7805 break;
7788 7806 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
7789 7807 (void) sprintf(reason_str, "online capacity "
7790 7808 "expansion");
7791 7809 break;
7792 7810 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
7793 7811 (void) sprintf(reason_str, "consistency check");
7794 7812 break;
7795 7813 default:
7796 7814 (void) sprintf(reason_str, "unknown reason %x",
7797 7815 rc);
7798 7816 }
7799 7817
7800 7818 NDBG20(("mptsas%d raid operational status: (%s)"
7801 7819 "\thandle(0x%04x), percent complete(%d)\n",
7802 7820 mpt->m_instance, reason_str, handle, percent));
7803 7821 break;
7804 7822 }
7805 7823 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
7806 7824 {
7807 7825 pMpi2EventDataSasBroadcastPrimitive_t sas_broadcast;
7808 7826 uint8_t phy_num;
7809 7827 uint8_t primitive;
7810 7828
7811 7829 sas_broadcast = (pMpi2EventDataSasBroadcastPrimitive_t)
7812 7830 eventreply->EventData;
7813 7831
7814 7832 phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
7815 7833 &sas_broadcast->PhyNum);
7816 7834 primitive = ddi_get8(mpt->m_acc_reply_frame_hdl,
7817 7835 &sas_broadcast->Primitive);
7818 7836
7819 7837 switch (primitive) {
7820 7838 case MPI2_EVENT_PRIMITIVE_CHANGE:
7821 7839 mptsas_smhba_log_sysevent(mpt,
7822 7840 ESC_SAS_HBA_PORT_BROADCAST,
7823 7841 SAS_PORT_BROADCAST_CHANGE,
7824 7842 &mpt->m_phy_info[phy_num].smhba_info);
7825 7843 break;
7826 7844 case MPI2_EVENT_PRIMITIVE_SES:
7827 7845 mptsas_smhba_log_sysevent(mpt,
7828 7846 ESC_SAS_HBA_PORT_BROADCAST,
7829 7847 SAS_PORT_BROADCAST_SES,
7830 7848 &mpt->m_phy_info[phy_num].smhba_info);
7831 7849 break;
7832 7850 case MPI2_EVENT_PRIMITIVE_EXPANDER:
7833 7851 mptsas_smhba_log_sysevent(mpt,
7834 7852 ESC_SAS_HBA_PORT_BROADCAST,
7835 7853 SAS_PORT_BROADCAST_D01_4,
7836 7854 &mpt->m_phy_info[phy_num].smhba_info);
7837 7855 break;
7838 7856 case MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT:
7839 7857 mptsas_smhba_log_sysevent(mpt,
7840 7858 ESC_SAS_HBA_PORT_BROADCAST,
7841 7859 SAS_PORT_BROADCAST_D04_7,
7842 7860 &mpt->m_phy_info[phy_num].smhba_info);
7843 7861 break;
7844 7862 case MPI2_EVENT_PRIMITIVE_RESERVED3:
7845 7863 mptsas_smhba_log_sysevent(mpt,
7846 7864 ESC_SAS_HBA_PORT_BROADCAST,
7847 7865 SAS_PORT_BROADCAST_D16_7,
7848 7866 &mpt->m_phy_info[phy_num].smhba_info);
7849 7867 break;
7850 7868 case MPI2_EVENT_PRIMITIVE_RESERVED4:
7851 7869 mptsas_smhba_log_sysevent(mpt,
7852 7870 ESC_SAS_HBA_PORT_BROADCAST,
7853 7871 SAS_PORT_BROADCAST_D29_7,
7854 7872 &mpt->m_phy_info[phy_num].smhba_info);
7855 7873 break;
7856 7874 case MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED:
7857 7875 mptsas_smhba_log_sysevent(mpt,
7858 7876 ESC_SAS_HBA_PORT_BROADCAST,
7859 7877 SAS_PORT_BROADCAST_D24_0,
7860 7878 &mpt->m_phy_info[phy_num].smhba_info);
7861 7879 break;
7862 7880 case MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED:
7863 7881 mptsas_smhba_log_sysevent(mpt,
7864 7882 ESC_SAS_HBA_PORT_BROADCAST,
7865 7883 SAS_PORT_BROADCAST_D27_4,
7866 7884 &mpt->m_phy_info[phy_num].smhba_info);
7867 7885 break;
7868 7886 default:
7869 7887 NDBG16(("mptsas%d: unknown BROADCAST PRIMITIVE"
7870 7888 " %x received",
7871 7889 mpt->m_instance, primitive));
7872 7890 break;
7873 7891 }
7874 7892 NDBG16(("mptsas%d sas broadcast primitive: "
7875 7893 "\tprimitive(0x%04x), phy(%d) complete\n",
7876 7894 mpt->m_instance, primitive, phy_num));
7877 7895 break;
7878 7896 }
7879 7897 case MPI2_EVENT_IR_VOLUME:
7880 7898 {
7881 7899 Mpi2EventDataIrVolume_t *irVolume;
7882 7900 uint16_t devhandle;
7883 7901 uint32_t state;
7884 7902 int config, vol;
7885 7903 uint8_t found = FALSE;
7886 7904
7887 7905 irVolume = (pMpi2EventDataIrVolume_t)eventreply->EventData;
7888 7906 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7889 7907 &irVolume->NewValue);
7890 7908 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7891 7909 &irVolume->VolDevHandle);
7892 7910
7893 7911 NDBG20(("EVENT_IR_VOLUME event is received"));
7894 7912
7895 7913 /*
7896 7914 * Get latest RAID info and then find the DevHandle for this
7897 7915 * event in the configuration. If the DevHandle is not found
7898 7916 * just exit the event.
7899 7917 */
7900 7918 (void) mptsas_get_raid_info(mpt);
7901 7919 for (config = 0; (config < mpt->m_num_raid_configs) &&
7902 7920 (!found); config++) {
7903 7921 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
7904 7922 if (mpt->m_raidconfig[config].m_raidvol[vol].
7905 7923 m_raidhandle == devhandle) {
7906 7924 found = TRUE;
7907 7925 break;
7908 7926 }
7909 7927 }
7910 7928 }
7911 7929 if (!found) {
7912 7930 break;
7913 7931 }
7914 7932
7915 7933 switch (irVolume->ReasonCode) {
7916 7934 case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
7917 7935 {
7918 7936 uint32_t i;
7919 7937 mpt->m_raidconfig[config].m_raidvol[vol].m_settings =
7920 7938 state;
7921 7939
7922 7940 i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
7923 7941 mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
7924 7942 ", auto-config of hot-swap drives is %s"
7925 7943 ", write caching is %s"
7926 7944 ", hot-spare pool mask is %02x\n",
7927 7945 vol, state &
7928 7946 MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
7929 7947 ? "disabled" : "enabled",
7930 7948 i == MPI2_RAIDVOL0_SETTING_UNCHANGED
7931 7949 ? "controlled by member disks" :
7932 7950 i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
7933 7951 ? "disabled" :
7934 7952 i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
7935 7953 ? "enabled" :
7936 7954 "incorrectly set",
7937 7955 (state >> 16) & 0xff);
7938 7956 break;
7939 7957 }
7940 7958 case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
7941 7959 {
7942 7960 mpt->m_raidconfig[config].m_raidvol[vol].m_state =
7943 7961 (uint8_t)state;
7944 7962
7945 7963 mptsas_log(mpt, CE_NOTE,
7946 7964 "Volume %d is now %s\n", vol,
7947 7965 state == MPI2_RAID_VOL_STATE_OPTIMAL
7948 7966 ? "optimal" :
7949 7967 state == MPI2_RAID_VOL_STATE_DEGRADED
7950 7968 ? "degraded" :
7951 7969 state == MPI2_RAID_VOL_STATE_ONLINE
7952 7970 ? "online" :
7953 7971 state == MPI2_RAID_VOL_STATE_INITIALIZING
7954 7972 ? "initializing" :
7955 7973 state == MPI2_RAID_VOL_STATE_FAILED
7956 7974 ? "failed" :
7957 7975 state == MPI2_RAID_VOL_STATE_MISSING
7958 7976 ? "missing" :
7959 7977 "state unknown");
7960 7978 break;
7961 7979 }
7962 7980 case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
7963 7981 {
7964 7982 mpt->m_raidconfig[config].m_raidvol[vol].
7965 7983 m_statusflags = state;
7966 7984
7967 7985 mptsas_log(mpt, CE_NOTE,
7968 7986 " Volume %d is now %s%s%s%s%s%s%s%s%s\n",
7969 7987 vol,
7970 7988 state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
7971 7989 ? ", enabled" : ", disabled",
7972 7990 state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
7973 7991 ? ", quiesced" : "",
7974 7992 state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
7975 7993 ? ", inactive" : ", active",
7976 7994 state &
7977 7995 MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
7978 7996 ? ", bad block table is full" : "",
7979 7997 state &
7980 7998 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
7981 7999 ? ", resync in progress" : "",
7982 8000 state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
7983 8001 ? ", background initialization in progress" : "",
7984 8002 state &
7985 8003 MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
7986 8004 ? ", capacity expansion in progress" : "",
7987 8005 state &
7988 8006 MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
7989 8007 ? ", consistency check in progress" : "",
7990 8008 state & MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB
7991 8009 ? ", data scrub in progress" : "");
7992 8010 break;
7993 8011 }
7994 8012 default:
7995 8013 break;
7996 8014 }
7997 8015 break;
7998 8016 }
7999 8017 case MPI2_EVENT_IR_PHYSICAL_DISK:
8000 8018 {
8001 8019 Mpi2EventDataIrPhysicalDisk_t *irPhysDisk;
8002 8020 uint16_t devhandle, enchandle, slot;
8003 8021 uint32_t status, state;
8004 8022 uint8_t physdisknum, reason;
8005 8023
8006 8024 irPhysDisk = (Mpi2EventDataIrPhysicalDisk_t *)
8007 8025 eventreply->EventData;
8008 8026 physdisknum = ddi_get8(mpt->m_acc_reply_frame_hdl,
8009 8027 &irPhysDisk->PhysDiskNum);
8010 8028 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
8011 8029 &irPhysDisk->PhysDiskDevHandle);
8012 8030 enchandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
8013 8031 &irPhysDisk->EnclosureHandle);
8014 8032 slot = ddi_get16(mpt->m_acc_reply_frame_hdl,
8015 8033 &irPhysDisk->Slot);
8016 8034 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
8017 8035 &irPhysDisk->NewValue);
8018 8036 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
8019 8037 &irPhysDisk->ReasonCode);
8020 8038
8021 8039 NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
8022 8040
8023 8041 switch (reason) {
8024 8042 case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
8025 8043 mptsas_log(mpt, CE_NOTE,
8026 8044 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8027 8045 "for enclosure with handle 0x%x is now in hot "
8028 8046 "spare pool %d",
8029 8047 physdisknum, devhandle, slot, enchandle,
8030 8048 (state >> 16) & 0xff);
8031 8049 break;
8032 8050
8033 8051 case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
8034 8052 status = state;
8035 8053 mptsas_log(mpt, CE_NOTE,
8036 8054 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8037 8055 "for enclosure with handle 0x%x is now "
8038 8056 "%s%s%s%s%s\n", physdisknum, devhandle, slot,
8039 8057 enchandle,
8040 8058 status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
8041 8059 ? ", inactive" : ", active",
8042 8060 status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
8043 8061 ? ", out of sync" : "",
8044 8062 status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
8045 8063 ? ", quiesced" : "",
8046 8064 status &
8047 8065 MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
8048 8066 ? ", write cache enabled" : "",
8049 8067 status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
8050 8068 ? ", capacity expansion target" : "");
8051 8069 break;
8052 8070
8053 8071 case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
8054 8072 mptsas_log(mpt, CE_NOTE,
8055 8073 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8056 8074 "for enclosure with handle 0x%x is now %s\n",
8057 8075 physdisknum, devhandle, slot, enchandle,
8058 8076 state == MPI2_RAID_PD_STATE_OPTIMAL
8059 8077 ? "optimal" :
8060 8078 state == MPI2_RAID_PD_STATE_REBUILDING
8061 8079 ? "rebuilding" :
8062 8080 state == MPI2_RAID_PD_STATE_DEGRADED
8063 8081 ? "degraded" :
8064 8082 state == MPI2_RAID_PD_STATE_HOT_SPARE
8065 8083 ? "a hot spare" :
8066 8084 state == MPI2_RAID_PD_STATE_ONLINE
8067 8085 ? "online" :
8068 8086 state == MPI2_RAID_PD_STATE_OFFLINE
8069 8087 ? "offline" :
8070 8088 state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
8071 8089 ? "not compatible" :
8072 8090 state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
8073 8091 ? "not configured" :
8074 8092 "state unknown");
8075 8093 break;
8076 8094 }
8077 8095 break;
8078 8096 }
8079 8097 default:
8080 8098 NDBG20(("mptsas%d: unknown event %x received",
8081 8099 mpt->m_instance, event));
8082 8100 break;
8083 8101 }
8084 8102
8085 8103 /*
8086 8104 * Return the reply frame to the free queue.
8087 8105 */
8088 8106 ddi_put32(mpt->m_acc_free_queue_hdl,
8089 8107 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm);
8090 8108 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
8091 8109 DDI_DMA_SYNC_FORDEV);
8092 8110 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
8093 8111 mpt->m_free_index = 0;
8094 8112 }
8095 8113 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
8096 8114 mpt->m_free_index);
8097 8115 mutex_exit(&mpt->m_mutex);
8098 8116 }
8099 8117
8100 8118 /*
8101 8119 * invoked from timeout() to restart qfull cmds with throttle == 0
8102 8120 */
8103 8121 static void
8104 8122 mptsas_restart_cmd(void *arg)
8105 8123 {
8106 8124 mptsas_t *mpt = arg;
8107 8125 mptsas_target_t *ptgt = NULL;
8108 8126
8109 8127 mutex_enter(&mpt->m_mutex);
8110 8128
8111 8129 mpt->m_restart_cmd_timeid = 0;
8112 8130
8113 8131 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
8114 8132 ptgt = refhash_next(mpt->m_targets, ptgt)) {
8115 8133 if (ptgt->m_reset_delay == 0) {
8116 8134 if (ptgt->m_t_throttle == QFULL_THROTTLE) {
8117 8135 mptsas_set_throttle(mpt, ptgt,
8118 8136 MAX_THROTTLE);
8119 8137 }
8120 8138 }
8121 8139 }
8122 8140 mptsas_restart_hba(mpt);
8123 8141 mutex_exit(&mpt->m_mutex);
8124 8142 }
8125 8143
8126 8144 void
8127 8145 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8128 8146 {
8129 8147 int slot;
8130 8148 mptsas_slots_t *slots = mpt->m_active;
8131 8149 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8132 8150
8133 8151 ASSERT(cmd != NULL);
8134 8152 ASSERT(cmd->cmd_queued == FALSE);
8135 8153
8136 8154 /*
8137 8155 * Task Management cmds are removed in their own routines. Also,
8138 8156 * we don't want to modify timeout based on TM cmds.
8139 8157 */
8140 8158 if (cmd->cmd_flags & CFLAG_TM_CMD) {
8141 8159 return;
8142 8160 }
8143 8161
8144 8162 slot = cmd->cmd_slot;
8145 8163
8146 8164 /*
8147 8165 * remove the cmd.
8148 8166 */
8149 8167 if (cmd == slots->m_slot[slot]) {
8150 8168 NDBG31(("mptsas_remove_cmd: removing cmd=0x%p, flags "
8151 8169 "0x%x", (void *)cmd, cmd->cmd_flags));
8152 8170 slots->m_slot[slot] = NULL;
8153 8171 mpt->m_ncmds--;
8154 8172
8155 8173 /*
8156 8174 * only decrement per target ncmds if command
8157 8175 * has a target associated with it.
8158 8176 */
8159 8177 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
8160 8178 ptgt->m_t_ncmds--;
8161 8179 /*
8162 8180 * reset throttle if we just ran an untagged command
8163 8181 * to a tagged target
8164 8182 */
8165 8183 if ((ptgt->m_t_ncmds == 0) &&
8166 8184 ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
8167 8185 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8168 8186 }
8169 8187
8170 8188 /*
8171 8189 * Remove this command from the active queue.
8172 8190 */
8173 8191 if (cmd->cmd_active_expiration != 0) {
8174 8192 TAILQ_REMOVE(&ptgt->m_active_cmdq, cmd,
8175 8193 cmd_active_link);
8176 8194 cmd->cmd_active_expiration = 0;
8177 8195 }
8178 8196 }
8179 8197 }
8180 8198
8181 8199 /*
8182 8200 * This is all we need to do for ioc commands.
8183 8201 */
8184 8202 if (cmd->cmd_flags & CFLAG_CMDIOC) {
8185 8203 mptsas_return_to_pool(mpt, cmd);
8186 8204 return;
8187 8205 }
8188 8206
8189 8207 ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);
8190 8208 }
8191 8209
8192 8210 /*
8193 8211 * accept all cmds on the tx_waitq if any and then
8194 8212 * start a fresh request from the top of the device queue.
8195 8213 *
8196 8214 * since there are always cmds queued on the tx_waitq, and rare cmds on
8197 8215 * the instance waitq, so this function should not be invoked in the ISR,
8198 8216 * the mptsas_restart_waitq() is invoked in the ISR instead. otherwise, the
8199 8217 * burden belongs to the IO dispatch CPUs is moved the interrupt CPU.
8200 8218 */
8201 8219 static void
8202 8220 mptsas_restart_hba(mptsas_t *mpt)
8203 8221 {
8204 8222 ASSERT(mutex_owned(&mpt->m_mutex));
8205 8223
8206 8224 mutex_enter(&mpt->m_tx_waitq_mutex);
8207 8225 if (mpt->m_tx_waitq) {
8208 8226 mptsas_accept_tx_waitq(mpt);
8209 8227 }
8210 8228 mutex_exit(&mpt->m_tx_waitq_mutex);
8211 8229 mptsas_restart_waitq(mpt);
8212 8230 }
8213 8231
8214 8232 /*
8215 8233 * start a fresh request from the top of the device queue
8216 8234 */
8217 8235 static void
8218 8236 mptsas_restart_waitq(mptsas_t *mpt)
8219 8237 {
8220 8238 mptsas_cmd_t *cmd, *next_cmd;
8221 8239 mptsas_target_t *ptgt = NULL;
8222 8240
8223 8241 NDBG1(("mptsas_restart_waitq: mpt=0x%p", (void *)mpt));
8224 8242
8225 8243 ASSERT(mutex_owned(&mpt->m_mutex));
8226 8244
8227 8245 /*
8228 8246 * If there is a reset delay, don't start any cmds. Otherwise, start
8229 8247 * as many cmds as possible.
8230 8248 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
8231 8249 * commands is m_max_requests - 2.
8232 8250 */
8233 8251 cmd = mpt->m_waitq;
8234 8252
8235 8253 while (cmd != NULL) {
8236 8254 next_cmd = cmd->cmd_linkp;
8237 8255 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
8238 8256 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8239 8257 /*
8240 8258 * passthru command get slot need
8241 8259 * set CFLAG_PREPARED.
8242 8260 */
8243 8261 cmd->cmd_flags |= CFLAG_PREPARED;
8244 8262 mptsas_waitq_delete(mpt, cmd);
8245 8263 mptsas_start_passthru(mpt, cmd);
8246 8264 }
8247 8265 cmd = next_cmd;
8248 8266 continue;
8249 8267 }
8250 8268 if (cmd->cmd_flags & CFLAG_CONFIG) {
8251 8269 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8252 8270 /*
8253 8271 * Send the config page request and delete it
8254 8272 * from the waitq.
8255 8273 */
8256 8274 cmd->cmd_flags |= CFLAG_PREPARED;
8257 8275 mptsas_waitq_delete(mpt, cmd);
8258 8276 mptsas_start_config_page_access(mpt, cmd);
8259 8277 }
8260 8278 cmd = next_cmd;
8261 8279 continue;
8262 8280 }
8263 8281 if (cmd->cmd_flags & CFLAG_FW_DIAG) {
8264 8282 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8265 8283 /*
8266 8284 * Send the FW Diag request and delete if from
8267 8285 * the waitq.
8268 8286 */
8269 8287 cmd->cmd_flags |= CFLAG_PREPARED;
8270 8288 mptsas_waitq_delete(mpt, cmd);
8271 8289 mptsas_start_diag(mpt, cmd);
8272 8290 }
8273 8291 cmd = next_cmd;
8274 8292 continue;
8275 8293 }
8276 8294
8277 8295 ptgt = cmd->cmd_tgt_addr;
8278 8296 if (ptgt && (ptgt->m_t_throttle == DRAIN_THROTTLE) &&
8279 8297 (ptgt->m_t_ncmds == 0)) {
8280 8298 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8281 8299 }
8282 8300 if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
8283 8301 (ptgt && (ptgt->m_reset_delay == 0)) &&
8284 8302 (ptgt && (ptgt->m_t_ncmds <
8285 8303 ptgt->m_t_throttle))) {
8286 8304 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8287 8305 mptsas_waitq_delete(mpt, cmd);
8288 8306 (void) mptsas_start_cmd(mpt, cmd);
8289 8307 }
8290 8308 }
8291 8309 cmd = next_cmd;
8292 8310 }
8293 8311 }
8294 8312 /*
8295 8313 * Cmds are queued if tran_start() doesn't get the m_mutexlock(no wait).
8296 8314 * Accept all those queued cmds before new cmd is accept so that the
8297 8315 * cmds are sent in order.
8298 8316 */
8299 8317 static void
8300 8318 mptsas_accept_tx_waitq(mptsas_t *mpt)
8301 8319 {
8302 8320 mptsas_cmd_t *cmd;
8303 8321
8304 8322 ASSERT(mutex_owned(&mpt->m_mutex));
8305 8323 ASSERT(mutex_owned(&mpt->m_tx_waitq_mutex));
8306 8324
8307 8325 /*
8308 8326 * A Bus Reset could occur at any time and flush the tx_waitq,
8309 8327 * so we cannot count on the tx_waitq to contain even one cmd.
8310 8328 * And when the m_tx_waitq_mutex is released and run
8311 8329 * mptsas_accept_pkt(), the tx_waitq may be flushed.
8312 8330 */
8313 8331 cmd = mpt->m_tx_waitq;
8314 8332 for (;;) {
8315 8333 if ((cmd = mpt->m_tx_waitq) == NULL) {
8316 8334 mpt->m_tx_draining = 0;
8317 8335 break;
8318 8336 }
8319 8337 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL) {
8320 8338 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8321 8339 }
8322 8340 cmd->cmd_linkp = NULL;
8323 8341 mutex_exit(&mpt->m_tx_waitq_mutex);
8324 8342 if (mptsas_accept_pkt(mpt, cmd) != TRAN_ACCEPT)
8325 8343 cmn_err(CE_WARN, "mpt: mptsas_accept_tx_waitq: failed "
8326 8344 "to accept cmd on queue\n");
8327 8345 mutex_enter(&mpt->m_tx_waitq_mutex);
8328 8346 }
8329 8347 }
8330 8348
8331 8349
8332 8350 /*
8333 8351 * mpt tag type lookup
8334 8352 */
8335 8353 static char mptsas_tag_lookup[] =
8336 8354 {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
8337 8355
8338 8356 static int
8339 8357 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8340 8358 {
8341 8359 struct scsi_pkt *pkt = CMD2PKT(cmd);
8342 8360 uint32_t control = 0;
8343 8361 caddr_t mem, arsbuf;
8344 8362 pMpi2SCSIIORequest_t io_request;
8345 8363 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
8346 8364 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
8347 8365 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8348 8366 uint16_t SMID, io_flags = 0;
8349 8367 uint8_t ars_size;
8350 8368 uint64_t request_desc;
8351 8369 uint32_t ars_dmaaddrlow;
8352 8370 mptsas_cmd_t *c;
8353 8371
8354 8372 NDBG1(("mptsas_start_cmd: cmd=0x%p, flags 0x%x", (void *)cmd,
8355 8373 cmd->cmd_flags));
8356 8374
8357 8375 /*
8358 8376 * Set SMID and increment index. Rollover to 1 instead of 0 if index
8359 8377 * is at the max. 0 is an invalid SMID, so we call the first index 1.
8360 8378 */
8361 8379 SMID = cmd->cmd_slot;
8362 8380
8363 8381 /*
8364 8382 * It is possible for back to back device reset to
8365 8383 * happen before the reset delay has expired. That's
8366 8384 * ok, just let the device reset go out on the bus.
8367 8385 */
8368 8386 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8369 8387 ASSERT(ptgt->m_reset_delay == 0);
8370 8388 }
8371 8389
8372 8390 /*
8373 8391 * if a non-tagged cmd is submitted to an active tagged target
8374 8392 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
8375 8393 * to be untagged
8376 8394 */
8377 8395 if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
8378 8396 (ptgt->m_t_ncmds > 1) &&
8379 8397 ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
8380 8398 (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
8381 8399 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8382 8400 NDBG23(("target=%d, untagged cmd, start draining\n",
8383 8401 ptgt->m_devhdl));
8384 8402
8385 8403 if (ptgt->m_reset_delay == 0) {
8386 8404 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
8387 8405 }
8388 8406
8389 8407 mptsas_remove_cmd(mpt, cmd);
8390 8408 cmd->cmd_pkt_flags |= FLAG_HEAD;
8391 8409 mptsas_waitq_add(mpt, cmd);
8392 8410 }
8393 8411 return (DDI_FAILURE);
8394 8412 }
8395 8413
8396 8414 /*
8397 8415 * Set correct tag bits.
8398 8416 */
8399 8417 if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
8400 8418 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
8401 8419 FLAG_TAGMASK) >> 12)]) {
8402 8420 case MSG_SIMPLE_QTAG:
8403 8421 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8404 8422 break;
8405 8423 case MSG_HEAD_QTAG:
8406 8424 control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
8407 8425 break;
8408 8426 case MSG_ORDERED_QTAG:
8409 8427 control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
8410 8428 break;
8411 8429 default:
8412 8430 mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
8413 8431 break;
8414 8432 }
8415 8433 } else {
8416 8434 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
8417 8435 ptgt->m_t_throttle = 1;
8418 8436 }
8419 8437 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8420 8438 }
8421 8439
8422 8440 if (cmd->cmd_pkt_flags & FLAG_TLR) {
8423 8441 control |= MPI2_SCSIIO_CONTROL_TLR_ON;
8424 8442 }
8425 8443
8426 8444 mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
8427 8445 io_request = (pMpi2SCSIIORequest_t)mem;
8428 8446 if (cmd->cmd_extrqslen != 0) {
8429 8447 /*
8430 8448 * Mapping of the buffer was done in mptsas_pkt_alloc_extern().
8431 8449 * Calculate the DMA address with the same offset.
8432 8450 */
8433 8451 arsbuf = cmd->cmd_arq_buf;
8434 8452 ars_size = cmd->cmd_extrqslen;
8435 8453 ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
8436 8454 ((uintptr_t)arsbuf - (uintptr_t)mpt->m_req_sense)) &
8437 8455 0xffffffffu;
8438 8456 } else {
8439 8457 arsbuf = mpt->m_req_sense + (mpt->m_req_sense_size * (SMID-1));
8440 8458 cmd->cmd_arq_buf = arsbuf;
8441 8459 ars_size = mpt->m_req_sense_size;
8442 8460 ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
8443 8461 (mpt->m_req_sense_size * (SMID-1))) &
8444 8462 0xffffffffu;
8445 8463 }
8446 8464 bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
8447 8465 bzero(arsbuf, ars_size);
8448 8466
8449 8467 ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
8450 8468 (MPI2_SCSI_IO_REQUEST, SGL) / 4);
8451 8469 mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
8452 8470 MPI2_FUNCTION_SCSI_IO_REQUEST);
8453 8471
8454 8472 (void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
8455 8473 io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
8456 8474
8457 8475 io_flags = cmd->cmd_cdblen;
8458 8476 if (mptsas_use_fastpath &&
8459 8477 ptgt->m_io_flags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
8460 8478 io_flags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
8461 8479 request_desc = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
8462 8480 } else {
8463 8481 request_desc = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
8464 8482 }
8465 8483 ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
8466 8484 /*
8467 8485 * setup the Scatter/Gather DMA list for this request
8468 8486 */
8469 8487 if (cmd->cmd_cookiec > 0) {
8470 8488 mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
8471 8489 } else {
8472 8490 ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
8473 8491 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
8474 8492 MPI2_SGE_FLAGS_END_OF_BUFFER |
8475 8493 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
8476 8494 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
8477 8495 }
8478 8496
8479 8497 /*
8480 8498 * save ARQ information
8481 8499 */
8482 8500 ddi_put8(acc_hdl, &io_request->SenseBufferLength, ars_size);
8483 8501 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress, ars_dmaaddrlow);
8484 8502
8485 8503 ddi_put32(acc_hdl, &io_request->Control, control);
8486 8504
8487 8505 NDBG31(("starting message=%d(0x%p), with cmd=0x%p",
8488 8506 SMID, (void *)io_request, (void *)cmd));
8489 8507
8490 8508 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
8491 8509 (void) ddi_dma_sync(mpt->m_dma_req_sense_hdl, 0, 0,
8492 8510 DDI_DMA_SYNC_FORDEV);
8493 8511
8494 8512 /*
8495 8513 * Build request descriptor and write it to the request desc post reg.
8496 8514 */
8497 8515 request_desc |= (SMID << 16);
8498 8516 request_desc |= (uint64_t)ptgt->m_devhdl << 48;
8499 8517 MPTSAS_START_CMD(mpt, request_desc);
8500 8518
8501 8519 /*
8502 8520 * Start timeout.
8503 8521 */
8504 8522 cmd->cmd_active_expiration =
8505 8523 gethrtime() + (hrtime_t)pkt->pkt_time * NANOSEC;
8506 8524 #ifdef MPTSAS_TEST
8507 8525 /*
8508 8526 * Force timeouts to happen immediately.
8509 8527 */
8510 8528 if (mptsas_test_timeouts)
8511 8529 cmd->cmd_active_expiration = gethrtime();
8512 8530 #endif
8513 8531 c = TAILQ_FIRST(&ptgt->m_active_cmdq);
8514 8532 if (c == NULL ||
8515 8533 c->cmd_active_expiration < cmd->cmd_active_expiration) {
8516 8534 /*
8517 8535 * Common case is that this is the last pending expiration
8518 8536 * (or queue is empty). Insert at head of the queue.
8519 8537 */
8520 8538 TAILQ_INSERT_HEAD(&ptgt->m_active_cmdq, cmd, cmd_active_link);
8521 8539 } else {
8522 8540 /*
8523 8541 * Queue is not empty and first element expires later than
8524 8542 * this command. Search for element expiring sooner.
8525 8543 */
8526 8544 while ((c = TAILQ_NEXT(c, cmd_active_link)) != NULL) {
8527 8545 if (c->cmd_active_expiration <
8528 8546 cmd->cmd_active_expiration) {
8529 8547 TAILQ_INSERT_BEFORE(c, cmd, cmd_active_link);
8530 8548 break;
8531 8549 }
8532 8550 }
8533 8551 if (c == NULL) {
8534 8552 /*
8535 8553 * No element found expiring sooner, append to
8536 8554 * non-empty queue.
8537 8555 */
8538 8556 TAILQ_INSERT_TAIL(&ptgt->m_active_cmdq, cmd,
8539 8557 cmd_active_link);
8540 8558 }
8541 8559 }
8542 8560
8543 8561 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8544 8562 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8545 8563 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8546 8564 return (DDI_FAILURE);
8547 8565 }
8548 8566 return (DDI_SUCCESS);
8549 8567 }
8550 8568
8551 8569 /*
8552 8570 * Select a helper thread to handle current doneq
8553 8571 */
8554 8572 static void
8555 8573 mptsas_deliver_doneq_thread(mptsas_t *mpt)
8556 8574 {
8557 8575 uint64_t t, i;
8558 8576 uint32_t min = 0xffffffff;
8559 8577 mptsas_doneq_thread_list_t *item;
8560 8578
8561 8579 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
8562 8580 item = &mpt->m_doneq_thread_id[i];
8563 8581 /*
8564 8582 * If the completed command on help thread[i] less than
8565 8583 * doneq_thread_threshold, then pick the thread[i]. Otherwise
8566 8584 * pick a thread which has least completed command.
8567 8585 */
8568 8586
8569 8587 mutex_enter(&item->mutex);
8570 8588 if (item->len < mpt->m_doneq_thread_threshold) {
8571 8589 t = i;
8572 8590 mutex_exit(&item->mutex);
8573 8591 break;
8574 8592 }
8575 8593 if (item->len < min) {
8576 8594 min = item->len;
8577 8595 t = i;
8578 8596 }
8579 8597 mutex_exit(&item->mutex);
8580 8598 }
8581 8599 mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
8582 8600 mptsas_doneq_mv(mpt, t);
8583 8601 cv_signal(&mpt->m_doneq_thread_id[t].cv);
8584 8602 mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
8585 8603 }
8586 8604
8587 8605 /*
8588 8606 * move the current global doneq to the doneq of thead[t]
8589 8607 */
8590 8608 static void
8591 8609 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
8592 8610 {
8593 8611 mptsas_cmd_t *cmd;
8594 8612 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8595 8613
8596 8614 ASSERT(mutex_owned(&item->mutex));
8597 8615 while ((cmd = mpt->m_doneq) != NULL) {
8598 8616 if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
8599 8617 mpt->m_donetail = &mpt->m_doneq;
8600 8618 }
8601 8619 cmd->cmd_linkp = NULL;
8602 8620 *item->donetail = cmd;
8603 8621 item->donetail = &cmd->cmd_linkp;
8604 8622 mpt->m_doneq_len--;
8605 8623 item->len++;
8606 8624 }
8607 8625 }
8608 8626
8609 8627 void
8610 8628 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
8611 8629 {
8612 8630 struct scsi_pkt *pkt = CMD2PKT(cmd);
8613 8631
8614 8632 /* Check all acc and dma handles */
8615 8633 if ((mptsas_check_acc_handle(mpt->m_datap) !=
8616 8634 DDI_SUCCESS) ||
8617 8635 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
8618 8636 DDI_SUCCESS) ||
8619 8637 (mptsas_check_acc_handle(mpt->m_acc_req_sense_hdl) !=
8620 8638 DDI_SUCCESS) ||
8621 8639 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
8622 8640 DDI_SUCCESS) ||
8623 8641 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
8624 8642 DDI_SUCCESS) ||
8625 8643 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
8626 8644 DDI_SUCCESS) ||
8627 8645 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
8628 8646 DDI_SUCCESS) ||
8629 8647 (mptsas_check_acc_handle(mpt->m_config_handle) !=
8630 8648 DDI_SUCCESS)) {
8631 8649 ddi_fm_service_impact(mpt->m_dip,
8632 8650 DDI_SERVICE_UNAFFECTED);
8633 8651 ddi_fm_acc_err_clear(mpt->m_config_handle,
8634 8652 DDI_FME_VER0);
8635 8653 pkt->pkt_reason = CMD_TRAN_ERR;
8636 8654 pkt->pkt_statistics = 0;
8637 8655 }
8638 8656 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
8639 8657 DDI_SUCCESS) ||
8640 8658 (mptsas_check_dma_handle(mpt->m_dma_req_sense_hdl) !=
8641 8659 DDI_SUCCESS) ||
8642 8660 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
8643 8661 DDI_SUCCESS) ||
8644 8662 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
8645 8663 DDI_SUCCESS) ||
8646 8664 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
8647 8665 DDI_SUCCESS) ||
8648 8666 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
8649 8667 DDI_SUCCESS)) {
8650 8668 ddi_fm_service_impact(mpt->m_dip,
8651 8669 DDI_SERVICE_UNAFFECTED);
8652 8670 pkt->pkt_reason = CMD_TRAN_ERR;
8653 8671 pkt->pkt_statistics = 0;
8654 8672 }
8655 8673 if (cmd->cmd_dmahandle &&
8656 8674 (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
8657 8675 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8658 8676 pkt->pkt_reason = CMD_TRAN_ERR;
8659 8677 pkt->pkt_statistics = 0;
8660 8678 }
8661 8679 if ((cmd->cmd_extra_frames &&
8662 8680 ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
8663 8681 DDI_SUCCESS) ||
8664 8682 (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
8665 8683 DDI_SUCCESS)))) {
8666 8684 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8667 8685 pkt->pkt_reason = CMD_TRAN_ERR;
8668 8686 pkt->pkt_statistics = 0;
8669 8687 }
8670 8688 }
8671 8689
8672 8690 /*
8673 8691 * These routines manipulate the queue of commands that
8674 8692 * are waiting for their completion routines to be called.
8675 8693 * The queue is usually in FIFO order but on an MP system
8676 8694 * it's possible for the completion routines to get out
8677 8695 * of order. If that's a problem you need to add a global
8678 8696 * mutex around the code that calls the completion routine
8679 8697 * in the interrupt handler.
8680 8698 */
8681 8699 static void
8682 8700 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8683 8701 {
8684 8702 struct scsi_pkt *pkt = CMD2PKT(cmd);
8685 8703
8686 8704 NDBG31(("mptsas_doneq_add: cmd=0x%p", (void *)cmd));
8687 8705
8688 8706 ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
8689 8707 cmd->cmd_linkp = NULL;
8690 8708 cmd->cmd_flags |= CFLAG_FINISHED;
8691 8709 cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
8692 8710
8693 8711 mptsas_fma_check(mpt, cmd);
8694 8712
8695 8713 /*
8696 8714 * only add scsi pkts that have completion routines to
8697 8715 * the doneq. no intr cmds do not have callbacks.
8698 8716 */
8699 8717 if (pkt && (pkt->pkt_comp)) {
8700 8718 *mpt->m_donetail = cmd;
8701 8719 mpt->m_donetail = &cmd->cmd_linkp;
8702 8720 mpt->m_doneq_len++;
8703 8721 }
8704 8722 }
8705 8723
8706 8724 static mptsas_cmd_t *
8707 8725 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
8708 8726 {
8709 8727 mptsas_cmd_t *cmd;
8710 8728 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8711 8729
8712 8730 /* pop one off the done queue */
8713 8731 if ((cmd = item->doneq) != NULL) {
8714 8732 /* if the queue is now empty fix the tail pointer */
8715 8733 NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
8716 8734 if ((item->doneq = cmd->cmd_linkp) == NULL) {
8717 8735 item->donetail = &item->doneq;
8718 8736 }
8719 8737 cmd->cmd_linkp = NULL;
8720 8738 item->len--;
8721 8739 }
8722 8740 return (cmd);
8723 8741 }
8724 8742
8725 8743 static void
8726 8744 mptsas_doneq_empty(mptsas_t *mpt)
8727 8745 {
8728 8746 if (mpt->m_doneq && !mpt->m_in_callback) {
8729 8747 mptsas_cmd_t *cmd, *next;
8730 8748 struct scsi_pkt *pkt;
8731 8749
8732 8750 mpt->m_in_callback = 1;
8733 8751 cmd = mpt->m_doneq;
8734 8752 mpt->m_doneq = NULL;
8735 8753 mpt->m_donetail = &mpt->m_doneq;
8736 8754 mpt->m_doneq_len = 0;
8737 8755
8738 8756 mutex_exit(&mpt->m_mutex);
8739 8757 /*
8740 8758 * run the completion routines of all the
8741 8759 * completed commands
8742 8760 */
8743 8761 while (cmd != NULL) {
8744 8762 next = cmd->cmd_linkp;
8745 8763 cmd->cmd_linkp = NULL;
8746 8764 /* run this command's completion routine */
8747 8765 cmd->cmd_flags |= CFLAG_COMPLETED;
8748 8766 pkt = CMD2PKT(cmd);
8749 8767 mptsas_pkt_comp(pkt, cmd);
8750 8768 cmd = next;
8751 8769 }
8752 8770 mutex_enter(&mpt->m_mutex);
8753 8771 mpt->m_in_callback = 0;
8754 8772 }
8755 8773 }
8756 8774
8757 8775 /*
8758 8776 * These routines manipulate the target's queue of pending requests
8759 8777 */
8760 8778 void
8761 8779 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8762 8780 {
8763 8781 NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
8764 8782 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8765 8783 cmd->cmd_queued = TRUE;
8766 8784 if (ptgt)
8767 8785 ptgt->m_t_nwait++;
8768 8786 if (cmd->cmd_pkt_flags & FLAG_HEAD) {
8769 8787 if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
8770 8788 mpt->m_waitqtail = &cmd->cmd_linkp;
8771 8789 }
8772 8790 mpt->m_waitq = cmd;
8773 8791 } else {
8774 8792 cmd->cmd_linkp = NULL;
8775 8793 *(mpt->m_waitqtail) = cmd;
8776 8794 mpt->m_waitqtail = &cmd->cmd_linkp;
8777 8795 }
8778 8796 }
8779 8797
8780 8798 static mptsas_cmd_t *
8781 8799 mptsas_waitq_rm(mptsas_t *mpt)
8782 8800 {
8783 8801 mptsas_cmd_t *cmd;
8784 8802 mptsas_target_t *ptgt;
8785 8803 NDBG7(("mptsas_waitq_rm"));
8786 8804
8787 8805 MPTSAS_WAITQ_RM(mpt, cmd);
8788 8806
8789 8807 NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
8790 8808 if (cmd) {
8791 8809 ptgt = cmd->cmd_tgt_addr;
8792 8810 if (ptgt) {
8793 8811 ptgt->m_t_nwait--;
8794 8812 ASSERT(ptgt->m_t_nwait >= 0);
8795 8813 }
8796 8814 }
8797 8815 return (cmd);
8798 8816 }
8799 8817
8800 8818 /*
8801 8819 * remove specified cmd from the middle of the wait queue.
8802 8820 */
8803 8821 static void
8804 8822 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8805 8823 {
8806 8824 mptsas_cmd_t *prevp = mpt->m_waitq;
8807 8825 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8808 8826
8809 8827 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8810 8828 (void *)mpt, (void *)cmd));
8811 8829 if (ptgt) {
8812 8830 ptgt->m_t_nwait--;
8813 8831 ASSERT(ptgt->m_t_nwait >= 0);
8814 8832 }
8815 8833
8816 8834 if (prevp == cmd) {
8817 8835 if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
8818 8836 mpt->m_waitqtail = &mpt->m_waitq;
8819 8837
8820 8838 cmd->cmd_linkp = NULL;
8821 8839 cmd->cmd_queued = FALSE;
8822 8840 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8823 8841 (void *)mpt, (void *)cmd));
8824 8842 return;
8825 8843 }
8826 8844
8827 8845 while (prevp != NULL) {
8828 8846 if (prevp->cmd_linkp == cmd) {
8829 8847 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8830 8848 mpt->m_waitqtail = &prevp->cmd_linkp;
8831 8849
8832 8850 cmd->cmd_linkp = NULL;
8833 8851 cmd->cmd_queued = FALSE;
8834 8852 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8835 8853 (void *)mpt, (void *)cmd));
8836 8854 return;
8837 8855 }
8838 8856 prevp = prevp->cmd_linkp;
8839 8857 }
8840 8858 cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
8841 8859 }
8842 8860
8843 8861 static mptsas_cmd_t *
8844 8862 mptsas_tx_waitq_rm(mptsas_t *mpt)
8845 8863 {
8846 8864 mptsas_cmd_t *cmd;
8847 8865 NDBG7(("mptsas_tx_waitq_rm"));
8848 8866
8849 8867 MPTSAS_TX_WAITQ_RM(mpt, cmd);
8850 8868
8851 8869 NDBG7(("mptsas_tx_waitq_rm: cmd=0x%p", (void *)cmd));
8852 8870
8853 8871 return (cmd);
8854 8872 }
8855 8873
8856 8874 /*
8857 8875 * remove specified cmd from the middle of the tx_waitq.
8858 8876 */
8859 8877 static void
8860 8878 mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8861 8879 {
8862 8880 mptsas_cmd_t *prevp = mpt->m_tx_waitq;
8863 8881
8864 8882 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8865 8883 (void *)mpt, (void *)cmd));
8866 8884
8867 8885 if (prevp == cmd) {
8868 8886 if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL)
8869 8887 mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8870 8888
8871 8889 cmd->cmd_linkp = NULL;
8872 8890 cmd->cmd_queued = FALSE;
8873 8891 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8874 8892 (void *)mpt, (void *)cmd));
8875 8893 return;
8876 8894 }
8877 8895
8878 8896 while (prevp != NULL) {
8879 8897 if (prevp->cmd_linkp == cmd) {
8880 8898 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8881 8899 mpt->m_tx_waitqtail = &prevp->cmd_linkp;
8882 8900
8883 8901 cmd->cmd_linkp = NULL;
8884 8902 cmd->cmd_queued = FALSE;
8885 8903 NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8886 8904 (void *)mpt, (void *)cmd));
8887 8905 return;
8888 8906 }
8889 8907 prevp = prevp->cmd_linkp;
8890 8908 }
8891 8909 cmn_err(CE_PANIC, "mpt: mptsas_tx_waitq_delete: queue botch");
8892 8910 }
8893 8911
8894 8912 /*
8895 8913 * device and bus reset handling
8896 8914 *
8897 8915 * Notes:
8898 8916 * - RESET_ALL: reset the controller
8899 8917 * - RESET_TARGET: reset the target specified in scsi_address
8900 8918 */
8901 8919 static int
8902 8920 mptsas_scsi_reset(struct scsi_address *ap, int level)
8903 8921 {
8904 8922 mptsas_t *mpt = ADDR2MPT(ap);
8905 8923 int rval;
8906 8924 mptsas_tgt_private_t *tgt_private;
8907 8925 mptsas_target_t *ptgt = NULL;
8908 8926
8909 8927 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
8910 8928 ptgt = tgt_private->t_private;
8911 8929 if (ptgt == NULL) {
8912 8930 return (FALSE);
8913 8931 }
8914 8932 NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,
8915 8933 level));
8916 8934
8917 8935 mutex_enter(&mpt->m_mutex);
8918 8936 /*
8919 8937 * if we are not in panic set up a reset delay for this target
8920 8938 */
8921 8939 if (!ddi_in_panic()) {
8922 8940 mptsas_setup_bus_reset_delay(mpt);
8923 8941 } else {
8924 8942 drv_usecwait(mpt->m_scsi_reset_delay * 1000);
8925 8943 }
8926 8944 rval = mptsas_do_scsi_reset(mpt, ptgt->m_devhdl);
8927 8945 mutex_exit(&mpt->m_mutex);
8928 8946
8929 8947 /*
8930 8948 * The transport layer expect to only see TRUE and
8931 8949 * FALSE. Therefore, we will adjust the return value
8932 8950 * if mptsas_do_scsi_reset returns FAILED.
8933 8951 */
8934 8952 if (rval == FAILED)
8935 8953 rval = FALSE;
8936 8954 return (rval);
8937 8955 }
8938 8956
8939 8957 static int
8940 8958 mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl)
8941 8959 {
8942 8960 int rval = FALSE;
8943 8961 uint8_t config, disk;
8944 8962
8945 8963 ASSERT(mutex_owned(&mpt->m_mutex));
8946 8964
8947 8965 if (mptsas_debug_resets) {
8948 8966 mptsas_log(mpt, CE_WARN, "mptsas_do_scsi_reset: target=%d",
8949 8967 devhdl);
8950 8968 }
8951 8969
8952 8970 /*
8953 8971 * Issue a Target Reset message to the target specified but not to a
8954 8972 * disk making up a raid volume. Just look through the RAID config
8955 8973 * Phys Disk list of DevHandles. If the target's DevHandle is in this
8956 8974 * list, then don't reset this target.
8957 8975 */
8958 8976 for (config = 0; config < mpt->m_num_raid_configs; config++) {
8959 8977 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
8960 8978 if (devhdl == mpt->m_raidconfig[config].
8961 8979 m_physdisk_devhdl[disk]) {
8962 8980 return (TRUE);
8963 8981 }
8964 8982 }
8965 8983 }
8966 8984
8967 8985 rval = mptsas_ioc_task_management(mpt,
8968 8986 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, devhdl, 0, NULL, 0, 0);
8969 8987
8970 8988 mptsas_doneq_empty(mpt);
8971 8989 return (rval);
8972 8990 }
8973 8991
8974 8992 static int
8975 8993 mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
8976 8994 void (*callback)(caddr_t), caddr_t arg)
8977 8995 {
8978 8996 mptsas_t *mpt = ADDR2MPT(ap);
8979 8997
8980 8998 NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap->a_target));
8981 8999
8982 9000 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
8983 9001 &mpt->m_mutex, &mpt->m_reset_notify_listf));
8984 9002 }
8985 9003
8986 9004 static int
8987 9005 mptsas_get_name(struct scsi_device *sd, char *name, int len)
8988 9006 {
8989 9007 dev_info_t *lun_dip = NULL;
8990 9008
8991 9009 ASSERT(sd != NULL);
8992 9010 ASSERT(name != NULL);
8993 9011 lun_dip = sd->sd_dev;
8994 9012 ASSERT(lun_dip != NULL);
8995 9013
8996 9014 if (mptsas_name_child(lun_dip, name, len) == DDI_SUCCESS) {
8997 9015 return (1);
8998 9016 } else {
8999 9017 return (0);
9000 9018 }
9001 9019 }
9002 9020
9003 9021 static int
9004 9022 mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len)
9005 9023 {
9006 9024 return (mptsas_get_name(sd, name, len));
9007 9025 }
9008 9026
9009 9027 void
9010 9028 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
9011 9029 {
9012 9030
9013 9031 NDBG25(("mptsas_set_throttle: throttle=%x", what));
9014 9032
9015 9033 /*
9016 9034 * if the bus is draining/quiesced, no changes to the throttles
9017 9035 * are allowed. Not allowing change of throttles during draining
9018 9036 * limits error recovery but will reduce draining time
9019 9037 *
9020 9038 * all throttles should have been set to HOLD_THROTTLE
9021 9039 */
9022 9040 if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
9023 9041 return;
9024 9042 }
9025 9043
9026 9044 if (what == HOLD_THROTTLE) {
9027 9045 ptgt->m_t_throttle = HOLD_THROTTLE;
9028 9046 } else if (ptgt->m_reset_delay == 0) {
9029 9047 ptgt->m_t_throttle = what;
9030 9048 }
9031 9049 }
9032 9050
9033 9051 /*
9034 9052 * Clean up from a device reset.
9035 9053 * For the case of target reset, this function clears the waitq of all
9036 9054 * commands for a particular target. For the case of abort task set, this
9037 9055 * function clears the waitq of all commonds for a particular target/lun.
9038 9056 */
9039 9057 static void
9040 9058 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
9041 9059 {
9042 9060 mptsas_slots_t *slots = mpt->m_active;
9043 9061 mptsas_cmd_t *cmd, *next_cmd;
9044 9062 int slot;
9045 9063 uchar_t reason;
9046 9064 uint_t stat;
9047 9065 hrtime_t timestamp;
9048 9066
9049 9067 NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
9050 9068
9051 9069 timestamp = gethrtime();
9052 9070
9053 9071 /*
9054 9072 * Make sure the I/O Controller has flushed all cmds
9055 9073 * that are associated with this target for a target reset
9056 9074 * and target/lun for abort task set.
9057 9075 * Account for TM requests, which use the last SMID.
9058 9076 */
9059 9077 for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
9060 9078 if ((cmd = slots->m_slot[slot]) == NULL)
9061 9079 continue;
9062 9080 reason = CMD_RESET;
9063 9081 stat = STAT_DEV_RESET;
9064 9082 switch (tasktype) {
9065 9083 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
9066 9084 if (Tgt(cmd) == target) {
9067 9085 if (cmd->cmd_active_expiration <= timestamp) {
9068 9086 /*
9069 9087 * When timeout requested, propagate
9070 9088 * proper reason and statistics to
9071 9089 * target drivers.
9072 9090 */
9073 9091 reason = CMD_TIMEOUT;
9074 9092 stat |= STAT_TIMEOUT;
9075 9093 }
9076 9094 NDBG25(("mptsas_flush_target discovered non-"
9077 9095 "NULL cmd in slot %d, tasktype 0x%x", slot,
9078 9096 tasktype));
9079 9097 mptsas_dump_cmd(mpt, cmd);
9080 9098 mptsas_remove_cmd(mpt, cmd);
9081 9099 mptsas_set_pkt_reason(mpt, cmd, reason, stat);
9082 9100 mptsas_doneq_add(mpt, cmd);
9083 9101 }
9084 9102 break;
9085 9103 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9086 9104 reason = CMD_ABORTED;
9087 9105 stat = STAT_ABORTED;
9088 9106 /*FALLTHROUGH*/
9089 9107 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9090 9108 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9091 9109
9092 9110 NDBG25(("mptsas_flush_target discovered non-"
9093 9111 "NULL cmd in slot %d, tasktype 0x%x", slot,
9094 9112 tasktype));
9095 9113 mptsas_dump_cmd(mpt, cmd);
9096 9114 mptsas_remove_cmd(mpt, cmd);
9097 9115 mptsas_set_pkt_reason(mpt, cmd, reason,
9098 9116 stat);
9099 9117 mptsas_doneq_add(mpt, cmd);
9100 9118 }
9101 9119 break;
9102 9120 default:
9103 9121 break;
9104 9122 }
9105 9123 }
9106 9124
9107 9125 /*
9108 9126 * Flush the waitq and tx_waitq of this target's cmds
9109 9127 */
9110 9128 cmd = mpt->m_waitq;
9111 9129
9112 9130 reason = CMD_RESET;
9113 9131 stat = STAT_DEV_RESET;
9114 9132
9115 9133 switch (tasktype) {
9116 9134 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
9117 9135 while (cmd != NULL) {
9118 9136 next_cmd = cmd->cmd_linkp;
9119 9137 if (Tgt(cmd) == target) {
9120 9138 mptsas_waitq_delete(mpt, cmd);
9121 9139 mptsas_set_pkt_reason(mpt, cmd,
9122 9140 reason, stat);
9123 9141 mptsas_doneq_add(mpt, cmd);
9124 9142 }
9125 9143 cmd = next_cmd;
9126 9144 }
9127 9145 mutex_enter(&mpt->m_tx_waitq_mutex);
9128 9146 cmd = mpt->m_tx_waitq;
9129 9147 while (cmd != NULL) {
9130 9148 next_cmd = cmd->cmd_linkp;
9131 9149 if (Tgt(cmd) == target) {
9132 9150 mptsas_tx_waitq_delete(mpt, cmd);
9133 9151 mutex_exit(&mpt->m_tx_waitq_mutex);
9134 9152 mptsas_set_pkt_reason(mpt, cmd,
9135 9153 reason, stat);
9136 9154 mptsas_doneq_add(mpt, cmd);
9137 9155 mutex_enter(&mpt->m_tx_waitq_mutex);
9138 9156 }
9139 9157 cmd = next_cmd;
9140 9158 }
9141 9159 mutex_exit(&mpt->m_tx_waitq_mutex);
9142 9160 break;
9143 9161 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9144 9162 reason = CMD_ABORTED;
9145 9163 stat = STAT_ABORTED;
9146 9164 /*FALLTHROUGH*/
9147 9165 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9148 9166 while (cmd != NULL) {
9149 9167 next_cmd = cmd->cmd_linkp;
9150 9168 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9151 9169 mptsas_waitq_delete(mpt, cmd);
9152 9170 mptsas_set_pkt_reason(mpt, cmd,
9153 9171 reason, stat);
9154 9172 mptsas_doneq_add(mpt, cmd);
9155 9173 }
9156 9174 cmd = next_cmd;
9157 9175 }
9158 9176 mutex_enter(&mpt->m_tx_waitq_mutex);
9159 9177 cmd = mpt->m_tx_waitq;
9160 9178 while (cmd != NULL) {
9161 9179 next_cmd = cmd->cmd_linkp;
9162 9180 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9163 9181 mptsas_tx_waitq_delete(mpt, cmd);
9164 9182 mutex_exit(&mpt->m_tx_waitq_mutex);
9165 9183 mptsas_set_pkt_reason(mpt, cmd,
9166 9184 reason, stat);
9167 9185 mptsas_doneq_add(mpt, cmd);
9168 9186 mutex_enter(&mpt->m_tx_waitq_mutex);
9169 9187 }
9170 9188 cmd = next_cmd;
9171 9189 }
9172 9190 mutex_exit(&mpt->m_tx_waitq_mutex);
9173 9191 break;
9174 9192 default:
9175 9193 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
9176 9194 tasktype);
9177 9195 break;
9178 9196 }
9179 9197 }
9180 9198
9181 9199 /*
9182 9200 * Clean up hba state, abort all outstanding command and commands in waitq
9183 9201 * reset timeout of all targets.
9184 9202 */
9185 9203 static void
9186 9204 mptsas_flush_hba(mptsas_t *mpt)
9187 9205 {
9188 9206 mptsas_slots_t *slots = mpt->m_active;
9189 9207 mptsas_cmd_t *cmd;
9190 9208 int slot;
9191 9209
9192 9210 NDBG25(("mptsas_flush_hba"));
9193 9211
9194 9212 /*
9195 9213 * The I/O Controller should have already sent back
9196 9214 * all commands via the scsi I/O reply frame. Make
9197 9215 * sure all commands have been flushed.
9198 9216 * Account for TM request, which use the last SMID.
9199 9217 */
9200 9218 for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
9201 9219 if ((cmd = slots->m_slot[slot]) == NULL)
9202 9220 continue;
9203 9221
9204 9222 if (cmd->cmd_flags & CFLAG_CMDIOC) {
9205 9223 /*
9206 9224 * Need to make sure to tell everyone that might be
9207 9225 * waiting on this command that it's going to fail. If
9208 9226 * we get here, this command will never timeout because
9209 9227 * the active command table is going to be re-allocated,
9210 9228 * so there will be nothing to check against a time out.
9211 9229 * Instead, mark the command as failed due to reset.
9212 9230 */
9213 9231 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
9214 9232 STAT_BUS_RESET);
9215 9233 if ((cmd->cmd_flags &
9216 9234 (CFLAG_PASSTHRU | CFLAG_CONFIG | CFLAG_FW_DIAG))) {
9217 9235 cmd->cmd_flags |= CFLAG_FINISHED;
9218 9236 cv_broadcast(&mpt->m_passthru_cv);
9219 9237 cv_broadcast(&mpt->m_config_cv);
9220 9238 cv_broadcast(&mpt->m_fw_diag_cv);
9221 9239 }
9222 9240 continue;
9223 9241 }
9224 9242
9225 9243 NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
9226 9244 slot));
9227 9245 mptsas_dump_cmd(mpt, cmd);
9228 9246
9229 9247 mptsas_remove_cmd(mpt, cmd);
9230 9248 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9231 9249 mptsas_doneq_add(mpt, cmd);
9232 9250 }
9233 9251
9234 9252 /*
9235 9253 * Flush the waitq.
9236 9254 */
9237 9255 while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
9238 9256 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9239 9257 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9240 9258 (cmd->cmd_flags & CFLAG_CONFIG) ||
9241 9259 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9242 9260 cmd->cmd_flags |= CFLAG_FINISHED;
9243 9261 cv_broadcast(&mpt->m_passthru_cv);
9244 9262 cv_broadcast(&mpt->m_config_cv);
9245 9263 cv_broadcast(&mpt->m_fw_diag_cv);
9246 9264 } else {
9247 9265 mptsas_doneq_add(mpt, cmd);
9248 9266 }
9249 9267 }
9250 9268
9251 9269 /*
9252 9270 * Flush the tx_waitq
9253 9271 */
9254 9272 mutex_enter(&mpt->m_tx_waitq_mutex);
9255 9273 while ((cmd = mptsas_tx_waitq_rm(mpt)) != NULL) {
9256 9274 mutex_exit(&mpt->m_tx_waitq_mutex);
9257 9275 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9258 9276 mptsas_doneq_add(mpt, cmd);
9259 9277 mutex_enter(&mpt->m_tx_waitq_mutex);
9260 9278 }
9261 9279 mutex_exit(&mpt->m_tx_waitq_mutex);
9262 9280
9263 9281 /*
9264 9282 * Drain the taskqs prior to reallocating resources.
9265 9283 */
9266 9284 mutex_exit(&mpt->m_mutex);
9267 9285 ddi_taskq_wait(mpt->m_event_taskq);
9268 9286 ddi_taskq_wait(mpt->m_dr_taskq);
9269 9287 mutex_enter(&mpt->m_mutex);
9270 9288 }
9271 9289
9272 9290 /*
9273 9291 * set pkt_reason and OR in pkt_statistics flag
9274 9292 */
9275 9293 static void
9276 9294 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
9277 9295 uint_t stat)
9278 9296 {
9279 9297 #ifndef __lock_lint
9280 9298 _NOTE(ARGUNUSED(mpt))
9281 9299 #endif
9282 9300
9283 9301 NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
9284 9302 (void *)cmd, reason, stat));
9285 9303
9286 9304 if (cmd) {
9287 9305 if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
9288 9306 cmd->cmd_pkt->pkt_reason = reason;
9289 9307 }
9290 9308 cmd->cmd_pkt->pkt_statistics |= stat;
9291 9309 }
9292 9310 }
9293 9311
9294 9312 static void
9295 9313 mptsas_start_watch_reset_delay()
9296 9314 {
9297 9315 NDBG22(("mptsas_start_watch_reset_delay"));
9298 9316
9299 9317 mutex_enter(&mptsas_global_mutex);
9300 9318 if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
9301 9319 mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
9302 9320 drv_usectohz((clock_t)
9303 9321 MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
9304 9322 ASSERT(mptsas_reset_watch != NULL);
9305 9323 }
9306 9324 mutex_exit(&mptsas_global_mutex);
9307 9325 }
9308 9326
9309 9327 static void
9310 9328 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
9311 9329 {
9312 9330 mptsas_target_t *ptgt = NULL;
9313 9331
9314 9332 ASSERT(MUTEX_HELD(&mpt->m_mutex));
9315 9333
9316 9334 NDBG22(("mptsas_setup_bus_reset_delay"));
9317 9335 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9318 9336 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9319 9337 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9320 9338 ptgt->m_reset_delay = mpt->m_scsi_reset_delay;
9321 9339 }
9322 9340
9323 9341 mptsas_start_watch_reset_delay();
9324 9342 }
9325 9343
9326 9344 /*
9327 9345 * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
9328 9346 * mpt instance for active reset delays
9329 9347 */
9330 9348 static void
9331 9349 mptsas_watch_reset_delay(void *arg)
9332 9350 {
9333 9351 #ifndef __lock_lint
9334 9352 _NOTE(ARGUNUSED(arg))
9335 9353 #endif
9336 9354
9337 9355 mptsas_t *mpt;
9338 9356 int not_done = 0;
9339 9357
9340 9358 NDBG22(("mptsas_watch_reset_delay"));
9341 9359
9342 9360 mutex_enter(&mptsas_global_mutex);
9343 9361 mptsas_reset_watch = 0;
9344 9362 mutex_exit(&mptsas_global_mutex);
9345 9363 rw_enter(&mptsas_global_rwlock, RW_READER);
9346 9364 for (mpt = mptsas_head; mpt != NULL; mpt = mpt->m_next) {
9347 9365 if (mpt->m_tran == 0) {
9348 9366 continue;
9349 9367 }
9350 9368 mutex_enter(&mpt->m_mutex);
9351 9369 not_done += mptsas_watch_reset_delay_subr(mpt);
9352 9370 mutex_exit(&mpt->m_mutex);
9353 9371 }
9354 9372 rw_exit(&mptsas_global_rwlock);
9355 9373
9356 9374 if (not_done) {
9357 9375 mptsas_start_watch_reset_delay();
9358 9376 }
9359 9377 }
9360 9378
9361 9379 static int
9362 9380 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
9363 9381 {
9364 9382 int done = 0;
9365 9383 int restart = 0;
9366 9384 mptsas_target_t *ptgt = NULL;
9367 9385
9368 9386 NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
9369 9387
9370 9388 ASSERT(mutex_owned(&mpt->m_mutex));
9371 9389
9372 9390 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9373 9391 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9374 9392 if (ptgt->m_reset_delay != 0) {
9375 9393 ptgt->m_reset_delay -=
9376 9394 MPTSAS_WATCH_RESET_DELAY_TICK;
9377 9395 if (ptgt->m_reset_delay <= 0) {
9378 9396 ptgt->m_reset_delay = 0;
9379 9397 mptsas_set_throttle(mpt, ptgt,
9380 9398 MAX_THROTTLE);
9381 9399 restart++;
9382 9400 } else {
9383 9401 done = -1;
9384 9402 }
9385 9403 }
9386 9404 }
9387 9405
9388 9406 if (restart > 0) {
9389 9407 mptsas_restart_hba(mpt);
9390 9408 }
9391 9409 return (done);
9392 9410 }
9393 9411
9394 9412 #ifdef MPTSAS_TEST
9395 9413 static void
9396 9414 mptsas_test_reset(mptsas_t *mpt, int target)
9397 9415 {
9398 9416 mptsas_target_t *ptgt = NULL;
9399 9417
9400 9418 if (mptsas_rtest == target) {
9401 9419 if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
9402 9420 mptsas_rtest = -1;
9403 9421 }
9404 9422 if (mptsas_rtest == -1) {
9405 9423 NDBG22(("mptsas_test_reset success"));
9406 9424 }
9407 9425 }
9408 9426 }
9409 9427 #endif
9410 9428
9411 9429 /*
9412 9430 * abort handling:
9413 9431 *
9414 9432 * Notes:
9415 9433 * - if pkt is not NULL, abort just that command
9416 9434 * - if pkt is NULL, abort all outstanding commands for target
9417 9435 */
9418 9436 static int
9419 9437 mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
9420 9438 {
9421 9439 mptsas_t *mpt = ADDR2MPT(ap);
9422 9440 int rval;
9423 9441 mptsas_tgt_private_t *tgt_private;
9424 9442 int target, lun;
9425 9443
9426 9444 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
9427 9445 tran_tgt_private;
9428 9446 ASSERT(tgt_private != NULL);
9429 9447 target = tgt_private->t_private->m_devhdl;
9430 9448 lun = tgt_private->t_lun;
9431 9449
9432 9450 NDBG23(("mptsas_scsi_abort: target=%d.%d", target, lun));
9433 9451
9434 9452 mutex_enter(&mpt->m_mutex);
9435 9453 rval = mptsas_do_scsi_abort(mpt, target, lun, pkt);
9436 9454 mutex_exit(&mpt->m_mutex);
9437 9455 return (rval);
9438 9456 }
9439 9457
9440 9458 static int
9441 9459 mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, struct scsi_pkt *pkt)
9442 9460 {
9443 9461 mptsas_cmd_t *sp = NULL;
9444 9462 mptsas_slots_t *slots = mpt->m_active;
9445 9463 int rval = FALSE;
9446 9464
9447 9465 ASSERT(mutex_owned(&mpt->m_mutex));
9448 9466
9449 9467 /*
9450 9468 * Abort the command pkt on the target/lun in ap. If pkt is
9451 9469 * NULL, abort all outstanding commands on that target/lun.
9452 9470 * If you can abort them, return 1, else return 0.
9453 9471 * Each packet that's aborted should be sent back to the target
9454 9472 * driver through the callback routine, with pkt_reason set to
9455 9473 * CMD_ABORTED.
9456 9474 *
9457 9475 * abort cmd pkt on HBA hardware; clean out of outstanding
9458 9476 * command lists, etc.
9459 9477 */
9460 9478 if (pkt != NULL) {
9461 9479 /* abort the specified packet */
9462 9480 sp = PKT2CMD(pkt);
9463 9481
9464 9482 if (sp->cmd_queued) {
9465 9483 NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
9466 9484 (void *)sp));
9467 9485 mptsas_waitq_delete(mpt, sp);
9468 9486 mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
9469 9487 STAT_ABORTED);
9470 9488 mptsas_doneq_add(mpt, sp);
9471 9489 rval = TRUE;
9472 9490 goto done;
9473 9491 }
9474 9492
9475 9493 /*
9476 9494 * Have mpt firmware abort this command
9477 9495 */
9478 9496
9479 9497 if (slots->m_slot[sp->cmd_slot] != NULL) {
9480 9498 rval = mptsas_ioc_task_management(mpt,
9481 9499 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
9482 9500 lun, NULL, 0, 0);
9483 9501
9484 9502 /*
9485 9503 * The transport layer expects only TRUE and FALSE.
9486 9504 * Therefore, if mptsas_ioc_task_management returns
9487 9505 * FAILED we will return FALSE.
9488 9506 */
9489 9507 if (rval == FAILED)
9490 9508 rval = FALSE;
9491 9509 goto done;
9492 9510 }
9493 9511 }
9494 9512
9495 9513 /*
9496 9514 * If pkt is NULL then abort task set
9497 9515 */
9498 9516 rval = mptsas_ioc_task_management(mpt,
9499 9517 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0);
9500 9518
9501 9519 /*
9502 9520 * The transport layer expects only TRUE and FALSE.
9503 9521 * Therefore, if mptsas_ioc_task_management returns
9504 9522 * FAILED we will return FALSE.
9505 9523 */
9506 9524 if (rval == FAILED)
9507 9525 rval = FALSE;
9508 9526
9509 9527 #ifdef MPTSAS_TEST
9510 9528 if (rval && mptsas_test_stop) {
9511 9529 debug_enter("mptsas_do_scsi_abort");
9512 9530 }
9513 9531 #endif
9514 9532
9515 9533 done:
9516 9534 mptsas_doneq_empty(mpt);
9517 9535 return (rval);
9518 9536 }
9519 9537
9520 9538 /*
9521 9539 * capability handling:
9522 9540 * (*tran_getcap). Get the capability named, and return its value.
9523 9541 */
9524 9542 static int
9525 9543 mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
9526 9544 {
9527 9545 mptsas_t *mpt = ADDR2MPT(ap);
9528 9546 int ckey;
9529 9547 int rval = FALSE;
9530 9548
9531 9549 NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
9532 9550 ap->a_target, cap, tgtonly));
9533 9551
9534 9552 mutex_enter(&mpt->m_mutex);
9535 9553
9536 9554 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9537 9555 mutex_exit(&mpt->m_mutex);
9538 9556 return (UNDEFINED);
9539 9557 }
9540 9558
9541 9559 switch (ckey) {
9542 9560 case SCSI_CAP_DMA_MAX:
9543 9561 rval = (int)mpt->m_msg_dma_attr.dma_attr_maxxfer;
9544 9562 break;
9545 9563 case SCSI_CAP_ARQ:
9546 9564 rval = TRUE;
9547 9565 break;
9548 9566 case SCSI_CAP_MSG_OUT:
9549 9567 case SCSI_CAP_PARITY:
9550 9568 case SCSI_CAP_UNTAGGED_QING:
9551 9569 rval = TRUE;
9552 9570 break;
9553 9571 case SCSI_CAP_TAGGED_QING:
9554 9572 rval = TRUE;
9555 9573 break;
9556 9574 case SCSI_CAP_RESET_NOTIFICATION:
9557 9575 rval = TRUE;
9558 9576 break;
9559 9577 case SCSI_CAP_LINKED_CMDS:
9560 9578 rval = FALSE;
9561 9579 break;
9562 9580 case SCSI_CAP_QFULL_RETRIES:
9563 9581 rval = ((mptsas_tgt_private_t *)(ap->a_hba_tran->
9564 9582 tran_tgt_private))->t_private->m_qfull_retries;
9565 9583 break;
9566 9584 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9567 9585 rval = drv_hztousec(((mptsas_tgt_private_t *)
9568 9586 (ap->a_hba_tran->tran_tgt_private))->
9569 9587 t_private->m_qfull_retry_interval) / 1000;
9570 9588 break;
9571 9589 case SCSI_CAP_CDB_LEN:
9572 9590 rval = CDB_GROUP4;
9573 9591 break;
9574 9592 case SCSI_CAP_INTERCONNECT_TYPE:
9575 9593 rval = INTERCONNECT_SAS;
9576 9594 break;
9577 9595 case SCSI_CAP_TRAN_LAYER_RETRIES:
9578 9596 if (mpt->m_ioc_capabilities &
9579 9597 MPI2_IOCFACTS_CAPABILITY_TLR)
9580 9598 rval = TRUE;
9581 9599 else
9582 9600 rval = FALSE;
9583 9601 break;
9584 9602 default:
9585 9603 rval = UNDEFINED;
9586 9604 break;
9587 9605 }
9588 9606
9589 9607 NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
9590 9608
9591 9609 mutex_exit(&mpt->m_mutex);
9592 9610 return (rval);
9593 9611 }
9594 9612
9595 9613 /*
9596 9614 * (*tran_setcap). Set the capability named to the value given.
9597 9615 */
9598 9616 static int
9599 9617 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
9600 9618 {
9601 9619 mptsas_t *mpt = ADDR2MPT(ap);
9602 9620 int ckey;
9603 9621 int rval = FALSE;
9604 9622
9605 9623 NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
9606 9624 ap->a_target, cap, value, tgtonly));
9607 9625
9608 9626 if (!tgtonly) {
9609 9627 return (rval);
9610 9628 }
9611 9629
9612 9630 mutex_enter(&mpt->m_mutex);
9613 9631
9614 9632 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9615 9633 mutex_exit(&mpt->m_mutex);
9616 9634 return (UNDEFINED);
9617 9635 }
9618 9636
9619 9637 switch (ckey) {
9620 9638 case SCSI_CAP_DMA_MAX:
9621 9639 case SCSI_CAP_MSG_OUT:
9622 9640 case SCSI_CAP_PARITY:
9623 9641 case SCSI_CAP_INITIATOR_ID:
9624 9642 case SCSI_CAP_LINKED_CMDS:
9625 9643 case SCSI_CAP_UNTAGGED_QING:
9626 9644 case SCSI_CAP_RESET_NOTIFICATION:
9627 9645 /*
9628 9646 * None of these are settable via
9629 9647 * the capability interface.
9630 9648 */
9631 9649 break;
9632 9650 case SCSI_CAP_ARQ:
9633 9651 /*
9634 9652 * We cannot turn off arq so return false if asked to
9635 9653 */
9636 9654 if (value) {
9637 9655 rval = TRUE;
9638 9656 } else {
9639 9657 rval = FALSE;
9640 9658 }
9641 9659 break;
9642 9660 case SCSI_CAP_TAGGED_QING:
9643 9661 mptsas_set_throttle(mpt, ((mptsas_tgt_private_t *)
9644 9662 (ap->a_hba_tran->tran_tgt_private))->t_private,
9645 9663 MAX_THROTTLE);
9646 9664 rval = TRUE;
9647 9665 break;
9648 9666 case SCSI_CAP_QFULL_RETRIES:
9649 9667 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9650 9668 t_private->m_qfull_retries = (uchar_t)value;
9651 9669 rval = TRUE;
9652 9670 break;
9653 9671 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9654 9672 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9655 9673 t_private->m_qfull_retry_interval =
9656 9674 drv_usectohz(value * 1000);
9657 9675 rval = TRUE;
9658 9676 break;
9659 9677 default:
9660 9678 rval = UNDEFINED;
9661 9679 break;
9662 9680 }
9663 9681 mutex_exit(&mpt->m_mutex);
9664 9682 return (rval);
9665 9683 }
9666 9684
9667 9685 /*
9668 9686 * Utility routine for mptsas_ifsetcap/ifgetcap
9669 9687 */
9670 9688 /*ARGSUSED*/
9671 9689 static int
9672 9690 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp)
9673 9691 {
9674 9692 NDBG24(("mptsas_scsi_capchk: cap=%s", cap));
9675 9693
9676 9694 if (!cap)
9677 9695 return (FALSE);
9678 9696
9679 9697 *cidxp = scsi_hba_lookup_capstr(cap);
9680 9698 return (TRUE);
9681 9699 }
9682 9700
9683 9701 static int
9684 9702 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
9685 9703 {
9686 9704 mptsas_slots_t *old_active = mpt->m_active;
9687 9705 mptsas_slots_t *new_active;
9688 9706 size_t size;
9689 9707
9690 9708 /*
9691 9709 * if there are active commands, then we cannot
9692 9710 * change size of active slots array.
9693 9711 */
9694 9712 ASSERT(mpt->m_ncmds == 0);
9695 9713
9696 9714 size = MPTSAS_SLOTS_SIZE(mpt);
9697 9715 new_active = kmem_zalloc(size, flag);
9698 9716 if (new_active == NULL) {
9699 9717 NDBG1(("new active alloc failed"));
9700 9718 return (-1);
9701 9719 }
9702 9720 /*
9703 9721 * Since SMID 0 is reserved and the TM slot is reserved, the
9704 9722 * number of slots that can be used at any one time is
9705 9723 * m_max_requests - 2.
9706 9724 */
9707 9725 new_active->m_n_normal = (mpt->m_max_requests - 2);
9708 9726 new_active->m_size = size;
9709 9727 new_active->m_rotor = 1;
9710 9728 if (old_active)
9711 9729 mptsas_free_active_slots(mpt);
9712 9730 mpt->m_active = new_active;
9713 9731
9714 9732 return (0);
9715 9733 }
9716 9734
9717 9735 static void
9718 9736 mptsas_free_active_slots(mptsas_t *mpt)
9719 9737 {
9720 9738 mptsas_slots_t *active = mpt->m_active;
9721 9739 size_t size;
9722 9740
9723 9741 if (active == NULL)
9724 9742 return;
9725 9743 size = active->m_size;
9726 9744 kmem_free(active, size);
9727 9745 mpt->m_active = NULL;
9728 9746 }
9729 9747
9730 9748 /*
9731 9749 * Error logging, printing, and debug print routines.
9732 9750 */
9733 9751 static char *mptsas_label = "mpt_sas";
9734 9752
9735 9753 /*PRINTFLIKE3*/
9736 9754 void
9737 9755 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
9738 9756 {
9739 9757 dev_info_t *dev;
9740 9758 va_list ap;
9741 9759
9742 9760 if (mpt) {
9743 9761 dev = mpt->m_dip;
9744 9762 } else {
9745 9763 dev = 0;
9746 9764 }
9747 9765
9748 9766 mutex_enter(&mptsas_log_mutex);
9749 9767
9750 9768 va_start(ap, fmt);
9751 9769 (void) vsprintf(mptsas_log_buf, fmt, ap);
9752 9770 va_end(ap);
9753 9771
9754 9772 if (level == CE_CONT) {
9755 9773 scsi_log(dev, mptsas_label, level, "%s\n", mptsas_log_buf);
9756 9774 } else {
9757 9775 scsi_log(dev, mptsas_label, level, "%s", mptsas_log_buf);
9758 9776 }
9759 9777
9760 9778 mutex_exit(&mptsas_log_mutex);
9761 9779 }
9762 9780
9763 9781 #ifdef MPTSAS_DEBUG
9764 9782 /*
9765 9783 * Use a circular buffer to log messages to private memory.
9766 9784 * Increment idx atomically to minimize risk to miss lines.
9767 9785 * It's fast and does not hold up the proceedings too much.
9768 9786 */
9769 9787 static const size_t mptsas_dbglog_linecnt = MPTSAS_DBGLOG_LINECNT;
9770 9788 static const size_t mptsas_dbglog_linelen = MPTSAS_DBGLOG_LINELEN;
9771 9789 static char mptsas_dbglog_bufs[MPTSAS_DBGLOG_LINECNT][MPTSAS_DBGLOG_LINELEN];
9772 9790 static uint32_t mptsas_dbglog_idx = 0;
9773 9791
9774 9792 /*PRINTFLIKE1*/
9775 9793 void
9776 9794 mptsas_debug_log(char *fmt, ...)
9777 9795 {
9778 9796 va_list ap;
9779 9797 uint32_t idx;
9780 9798
9781 9799 idx = atomic_inc_32_nv(&mptsas_dbglog_idx) &
9782 9800 (mptsas_dbglog_linecnt - 1);
9783 9801
9784 9802 va_start(ap, fmt);
9785 9803 (void) vsnprintf(mptsas_dbglog_bufs[idx],
9786 9804 mptsas_dbglog_linelen, fmt, ap);
9787 9805 va_end(ap);
9788 9806 }
9789 9807
9790 9808 /*PRINTFLIKE1*/
9791 9809 void
9792 9810 mptsas_printf(char *fmt, ...)
9793 9811 {
9794 9812 dev_info_t *dev = 0;
9795 9813 va_list ap;
9796 9814
9797 9815 mutex_enter(&mptsas_log_mutex);
9798 9816
9799 9817 va_start(ap, fmt);
9800 9818 (void) vsprintf(mptsas_log_buf, fmt, ap);
9801 9819 va_end(ap);
9802 9820
9803 9821 #ifdef PROM_PRINTF
9804 9822 prom_printf("%s:\t%s\n", mptsas_label, mptsas_log_buf);
9805 9823 #else
9806 9824 scsi_log(dev, mptsas_label, CE_CONT, "!%s\n", mptsas_log_buf);
9807 9825 #endif
9808 9826 mutex_exit(&mptsas_log_mutex);
9809 9827 }
9810 9828 #endif
9811 9829
9812 9830 /*
9813 9831 * timeout handling
9814 9832 */
9815 9833 static void
9816 9834 mptsas_watch(void *arg)
9817 9835 {
9818 9836 #ifndef __lock_lint
9819 9837 _NOTE(ARGUNUSED(arg))
9820 9838 #endif
9821 9839
9822 9840 mptsas_t *mpt;
9823 9841 uint32_t doorbell;
9824 9842
9825 9843 NDBG30(("mptsas_watch"));
9826 9844
9827 9845 rw_enter(&mptsas_global_rwlock, RW_READER);
9828 9846 for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
9829 9847
9830 9848 mutex_enter(&mpt->m_mutex);
9831 9849
9832 9850 /* Skip device if not powered on */
9833 9851 if (mpt->m_options & MPTSAS_OPT_PM) {
9834 9852 if (mpt->m_power_level == PM_LEVEL_D0) {
9835 9853 (void) pm_busy_component(mpt->m_dip, 0);
9836 9854 mpt->m_busy = 1;
9837 9855 } else {
9838 9856 mutex_exit(&mpt->m_mutex);
9839 9857 continue;
9840 9858 }
9841 9859 }
9842 9860
9843 9861 /*
9844 9862 * Check if controller is in a FAULT state. If so, reset it.
9845 9863 */
9846 9864 doorbell = ddi_get32(mpt->m_datap, &mpt->m_reg->Doorbell);
9847 9865 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
9848 9866 doorbell &= MPI2_DOORBELL_DATA_MASK;
9849 9867 mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
9850 9868 "code: %04x", doorbell);
9851 9869 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
9852 9870 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
9853 9871 mptsas_log(mpt, CE_WARN, "Reset failed"
9854 9872 "after fault was detected");
9855 9873 }
9856 9874 }
9857 9875
9858 9876 /*
9859 9877 * For now, always call mptsas_watchsubr.
9860 9878 */
9861 9879 mptsas_watchsubr(mpt);
9862 9880
9863 9881 if (mpt->m_options & MPTSAS_OPT_PM) {
9864 9882 mpt->m_busy = 0;
9865 9883 (void) pm_idle_component(mpt->m_dip, 0);
9866 9884 }
9867 9885
9868 9886 mutex_exit(&mpt->m_mutex);
9869 9887 }
9870 9888 rw_exit(&mptsas_global_rwlock);
9871 9889
9872 9890 mutex_enter(&mptsas_global_mutex);
9873 9891 if (mptsas_timeouts_enabled)
9874 9892 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
9875 9893 mutex_exit(&mptsas_global_mutex);
9876 9894 }
9877 9895
9878 9896 static void
9879 9897 mptsas_watchsubr(mptsas_t *mpt)
9880 9898 {
9881 9899 int i;
9882 9900 mptsas_cmd_t *cmd;
9883 9901 mptsas_target_t *ptgt = NULL;
9884 9902 hrtime_t timestamp = gethrtime();
9885 9903
9886 9904 ASSERT(MUTEX_HELD(&mpt->m_mutex));
9887 9905
9888 9906 NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
9889 9907
9890 9908 #ifdef MPTSAS_TEST
9891 9909 if (mptsas_enable_untagged) {
9892 9910 mptsas_test_untagged++;
9893 9911 }
9894 9912 #endif
9895 9913
9896 9914 /*
9897 9915 * Check for commands stuck in active slot
9898 9916 * Account for TM requests, which use the last SMID.
9899 9917 */
9900 9918 for (i = 0; i <= mpt->m_active->m_n_normal; i++) {
9901 9919 if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
9902 9920 if (cmd->cmd_active_expiration <= timestamp) {
9903 9921 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
9904 9922 /*
9905 9923 * There seems to be a command stuck
9906 9924 * in the active slot. Drain throttle.
9907 9925 */
9908 9926 mptsas_set_throttle(mpt,
9909 9927 cmd->cmd_tgt_addr,
9910 9928 DRAIN_THROTTLE);
9911 9929 } else if (cmd->cmd_flags &
9912 9930 (CFLAG_PASSTHRU | CFLAG_CONFIG |
9913 9931 CFLAG_FW_DIAG)) {
9914 9932 /*
9915 9933 * passthrough command timeout
9916 9934 */
9917 9935 cmd->cmd_flags |= (CFLAG_FINISHED |
9918 9936 CFLAG_TIMEOUT);
9919 9937 cv_broadcast(&mpt->m_passthru_cv);
9920 9938 cv_broadcast(&mpt->m_config_cv);
9921 9939 cv_broadcast(&mpt->m_fw_diag_cv);
9922 9940 }
9923 9941 }
9924 9942 }
9925 9943 }
9926 9944
9927 9945 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9928 9946 ptgt = refhash_next(mpt->m_targets, ptgt)) {
9929 9947 /*
9930 9948 * If we were draining due to a qfull condition,
9931 9949 * go back to full throttle.
9932 9950 */
9933 9951 if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
9934 9952 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
9935 9953 (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
9936 9954 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9937 9955 mptsas_restart_hba(mpt);
9938 9956 }
9939 9957
9940 9958 cmd = TAILQ_LAST(&ptgt->m_active_cmdq, mptsas_active_cmdq);
9941 9959 if (cmd == NULL)
9942 9960 continue;
9943 9961
9944 9962 if (cmd->cmd_active_expiration <= timestamp) {
9945 9963 /*
9946 9964 * Earliest command timeout expired. Drain throttle.
9947 9965 */
9948 9966 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
9949 9967
9950 9968 /*
9951 9969 * Check for remaining commands.
9952 9970 */
9953 9971 cmd = TAILQ_FIRST(&ptgt->m_active_cmdq);
9954 9972 if (cmd->cmd_active_expiration > timestamp) {
9955 9973 /*
9956 9974 * Wait for remaining commands to complete or
9957 9975 * time out.
9958 9976 */
9959 9977 NDBG23(("command timed out, pending drain"));
9960 9978 continue;
9961 9979 }
9962 9980
9963 9981 /*
9964 9982 * All command timeouts expired.
9965 9983 */
9966 9984 mptsas_log(mpt, CE_NOTE, "Timeout of %d seconds "
9967 9985 "expired with %d commands on target %d lun %d.",
9968 9986 cmd->cmd_pkt->pkt_time, ptgt->m_t_ncmds,
9969 9987 ptgt->m_devhdl, Lun(cmd));
9970 9988
9971 9989 mptsas_cmd_timeout(mpt, ptgt);
9972 9990 } else if (cmd->cmd_active_expiration <=
9973 9991 timestamp + (hrtime_t)mptsas_scsi_watchdog_tick * NANOSEC) {
9974 9992 NDBG23(("pending timeout"));
9975 9993 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
9976 9994 }
9977 9995 }
9978 9996 }
9979 9997
9980 9998 /*
9981 9999 * timeout recovery
9982 10000 */
9983 10001 static void
9984 10002 mptsas_cmd_timeout(mptsas_t *mpt, mptsas_target_t *ptgt)
9985 10003 {
9986 10004 uint16_t devhdl;
9987 10005 uint64_t sas_wwn;
9988 10006 uint8_t phy;
9989 10007 char wwn_str[MPTSAS_WWN_STRLEN];
9990 10008
9991 10009 devhdl = ptgt->m_devhdl;
9992 10010 sas_wwn = ptgt->m_addr.mta_wwn;
9993 10011 phy = ptgt->m_phynum;
9994 10012 if (sas_wwn == 0) {
9995 10013 (void) sprintf(wwn_str, "p%x", phy);
9996 10014 } else {
9997 10015 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
9998 10016 }
9999 10017
10000 10018 NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
10001 10019 mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
10002 10020 "target %d %s, enclosure %u", devhdl, wwn_str,
10003 10021 ptgt->m_enclosure);
10004 10022
10005 10023 /*
10006 10024 * Abort all outstanding commands on the device.
10007 10025 */
10008 10026 NDBG29(("mptsas_cmd_timeout: device reset"));
10009 10027 if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
10010 10028 mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
10011 10029 "recovery failed!", devhdl);
10012 10030 }
10013 10031 }
10014 10032
10015 10033 /*
10016 10034 * Device / Hotplug control
10017 10035 */
10018 10036 static int
10019 10037 mptsas_scsi_quiesce(dev_info_t *dip)
10020 10038 {
10021 10039 mptsas_t *mpt;
10022 10040 scsi_hba_tran_t *tran;
10023 10041
10024 10042 tran = ddi_get_driver_private(dip);
10025 10043 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
10026 10044 return (-1);
10027 10045
10028 10046 return (mptsas_quiesce_bus(mpt));
10029 10047 }
10030 10048
10031 10049 static int
10032 10050 mptsas_scsi_unquiesce(dev_info_t *dip)
10033 10051 {
10034 10052 mptsas_t *mpt;
10035 10053 scsi_hba_tran_t *tran;
10036 10054
10037 10055 tran = ddi_get_driver_private(dip);
10038 10056 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
10039 10057 return (-1);
10040 10058
10041 10059 return (mptsas_unquiesce_bus(mpt));
10042 10060 }
10043 10061
10044 10062 static int
10045 10063 mptsas_quiesce_bus(mptsas_t *mpt)
10046 10064 {
10047 10065 mptsas_target_t *ptgt = NULL;
10048 10066
10049 10067 NDBG28(("mptsas_quiesce_bus"));
10050 10068 mutex_enter(&mpt->m_mutex);
10051 10069
10052 10070 /* Set all the throttles to zero */
10053 10071 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10054 10072 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10055 10073 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10056 10074 }
10057 10075
10058 10076 /* If there are any outstanding commands in the queue */
10059 10077 if (mpt->m_ncmds) {
10060 10078 mpt->m_softstate |= MPTSAS_SS_DRAINING;
10061 10079 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
10062 10080 mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
10063 10081 if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
10064 10082 /*
10065 10083 * Quiesce has been interrupted
10066 10084 */
10067 10085 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
10068 10086 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10069 10087 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10070 10088 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10071 10089 }
10072 10090 mptsas_restart_hba(mpt);
10073 10091 if (mpt->m_quiesce_timeid != 0) {
10074 10092 timeout_id_t tid = mpt->m_quiesce_timeid;
10075 10093 mpt->m_quiesce_timeid = 0;
10076 10094 mutex_exit(&mpt->m_mutex);
10077 10095 (void) untimeout(tid);
10078 10096 return (-1);
10079 10097 }
10080 10098 mutex_exit(&mpt->m_mutex);
10081 10099 return (-1);
10082 10100 } else {
10083 10101 /* Bus has been quiesced */
10084 10102 ASSERT(mpt->m_quiesce_timeid == 0);
10085 10103 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
10086 10104 mpt->m_softstate |= MPTSAS_SS_QUIESCED;
10087 10105 mutex_exit(&mpt->m_mutex);
10088 10106 return (0);
10089 10107 }
10090 10108 }
10091 10109 /* Bus was not busy - QUIESCED */
10092 10110 mutex_exit(&mpt->m_mutex);
10093 10111
10094 10112 return (0);
10095 10113 }
10096 10114
10097 10115 static int
10098 10116 mptsas_unquiesce_bus(mptsas_t *mpt)
10099 10117 {
10100 10118 mptsas_target_t *ptgt = NULL;
10101 10119
10102 10120 NDBG28(("mptsas_unquiesce_bus"));
10103 10121 mutex_enter(&mpt->m_mutex);
10104 10122 mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
10105 10123 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10106 10124 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10107 10125 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10108 10126 }
10109 10127 mptsas_restart_hba(mpt);
10110 10128 mutex_exit(&mpt->m_mutex);
10111 10129 return (0);
10112 10130 }
10113 10131
10114 10132 static void
10115 10133 mptsas_ncmds_checkdrain(void *arg)
10116 10134 {
10117 10135 mptsas_t *mpt = arg;
10118 10136 mptsas_target_t *ptgt = NULL;
10119 10137
10120 10138 mutex_enter(&mpt->m_mutex);
10121 10139 if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
10122 10140 mpt->m_quiesce_timeid = 0;
10123 10141 if (mpt->m_ncmds == 0) {
10124 10142 /* Command queue has been drained */
10125 10143 cv_signal(&mpt->m_cv);
10126 10144 } else {
10127 10145 /*
10128 10146 * The throttle may have been reset because
10129 10147 * of a SCSI bus reset
10130 10148 */
10131 10149 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10132 10150 ptgt = refhash_next(mpt->m_targets, ptgt)) {
10133 10151 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10134 10152 }
10135 10153
10136 10154 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
10137 10155 mpt, (MPTSAS_QUIESCE_TIMEOUT *
10138 10156 drv_usectohz(1000000)));
10139 10157 }
10140 10158 }
10141 10159 mutex_exit(&mpt->m_mutex);
10142 10160 }
10143 10161
10144 10162 /*ARGSUSED*/
10145 10163 static void
10146 10164 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
10147 10165 {
10148 10166 int i;
10149 10167 uint8_t *cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
10150 10168 char buf[128];
10151 10169
10152 10170 buf[0] = '\0';
10153 10171 NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd,
10154 10172 Tgt(cmd), Lun(cmd)));
10155 10173 (void) sprintf(&buf[0], "\tcdb=[");
10156 10174 for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
10157 10175 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
10158 10176 }
10159 10177 (void) sprintf(&buf[strlen(buf)], " ]");
10160 10178 NDBG25(("?%s\n", buf));
10161 10179 NDBG25(("?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
10162 10180 cmd->cmd_pkt->pkt_flags, cmd->cmd_pkt->pkt_statistics,
10163 10181 cmd->cmd_pkt->pkt_state));
10164 10182 NDBG25(("?pkt_scbp=0x%x cmd_flags=0x%x\n", cmd->cmd_pkt->pkt_scbp ?
10165 10183 *(cmd->cmd_pkt->pkt_scbp) : 0, cmd->cmd_flags));
10166 10184 }
10167 10185
10168 10186 static void
10169 10187 mptsas_passthru_sge(ddi_acc_handle_t acc_hdl, mptsas_pt_request_t *pt,
10170 10188 pMpi2SGESimple64_t sgep)
10171 10189 {
10172 10190 uint32_t sge_flags;
10173 10191 uint32_t data_size, dataout_size;
10174 10192 ddi_dma_cookie_t data_cookie;
10175 10193 ddi_dma_cookie_t dataout_cookie;
10176 10194
10177 10195 data_size = pt->data_size;
10178 10196 dataout_size = pt->dataout_size;
10179 10197 data_cookie = pt->data_cookie;
10180 10198 dataout_cookie = pt->dataout_cookie;
10181 10199
10182 10200 if (dataout_size) {
10183 10201 sge_flags = dataout_size |
10184 10202 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10185 10203 MPI2_SGE_FLAGS_END_OF_BUFFER |
10186 10204 MPI2_SGE_FLAGS_HOST_TO_IOC |
10187 10205 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10188 10206 MPI2_SGE_FLAGS_SHIFT);
10189 10207 ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
10190 10208 ddi_put32(acc_hdl, &sgep->Address.Low,
10191 10209 (uint32_t)(dataout_cookie.dmac_laddress &
10192 10210 0xffffffffull));
10193 10211 ddi_put32(acc_hdl, &sgep->Address.High,
10194 10212 (uint32_t)(dataout_cookie.dmac_laddress
10195 10213 >> 32));
10196 10214 sgep++;
10197 10215 }
10198 10216 sge_flags = data_size;
10199 10217 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10200 10218 MPI2_SGE_FLAGS_LAST_ELEMENT |
10201 10219 MPI2_SGE_FLAGS_END_OF_BUFFER |
10202 10220 MPI2_SGE_FLAGS_END_OF_LIST |
10203 10221 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10204 10222 MPI2_SGE_FLAGS_SHIFT);
10205 10223 if (pt->direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10206 10224 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
10207 10225 MPI2_SGE_FLAGS_SHIFT);
10208 10226 } else {
10209 10227 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
10210 10228 MPI2_SGE_FLAGS_SHIFT);
10211 10229 }
10212 10230 ddi_put32(acc_hdl, &sgep->FlagsLength,
10213 10231 sge_flags);
10214 10232 ddi_put32(acc_hdl, &sgep->Address.Low,
10215 10233 (uint32_t)(data_cookie.dmac_laddress &
10216 10234 0xffffffffull));
10217 10235 ddi_put32(acc_hdl, &sgep->Address.High,
10218 10236 (uint32_t)(data_cookie.dmac_laddress >> 32));
10219 10237 }
10220 10238
10221 10239 static void
10222 10240 mptsas_passthru_ieee_sge(ddi_acc_handle_t acc_hdl, mptsas_pt_request_t *pt,
10223 10241 pMpi2IeeeSgeSimple64_t ieeesgep)
10224 10242 {
10225 10243 uint8_t sge_flags;
10226 10244 uint32_t data_size, dataout_size;
10227 10245 ddi_dma_cookie_t data_cookie;
10228 10246 ddi_dma_cookie_t dataout_cookie;
10229 10247
10230 10248 data_size = pt->data_size;
10231 10249 dataout_size = pt->dataout_size;
10232 10250 data_cookie = pt->data_cookie;
10233 10251 dataout_cookie = pt->dataout_cookie;
10234 10252
10235 10253 sge_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
10236 10254 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
10237 10255 if (dataout_size) {
10238 10256 ddi_put32(acc_hdl, &ieeesgep->Length, dataout_size);
10239 10257 ddi_put32(acc_hdl, &ieeesgep->Address.Low,
10240 10258 (uint32_t)(dataout_cookie.dmac_laddress &
10241 10259 0xffffffffull));
10242 10260 ddi_put32(acc_hdl, &ieeesgep->Address.High,
10243 10261 (uint32_t)(dataout_cookie.dmac_laddress >> 32));
10244 10262 ddi_put8(acc_hdl, &ieeesgep->Flags, sge_flags);
10245 10263 ieeesgep++;
10246 10264 }
10247 10265 sge_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
10248 10266 ddi_put32(acc_hdl, &ieeesgep->Length, data_size);
10249 10267 ddi_put32(acc_hdl, &ieeesgep->Address.Low,
10250 10268 (uint32_t)(data_cookie.dmac_laddress & 0xffffffffull));
10251 10269 ddi_put32(acc_hdl, &ieeesgep->Address.High,
10252 10270 (uint32_t)(data_cookie.dmac_laddress >> 32));
10253 10271 ddi_put8(acc_hdl, &ieeesgep->Flags, sge_flags);
10254 10272 }
10255 10273
10256 10274 static void
10257 10275 mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd)
10258 10276 {
10259 10277 caddr_t memp;
10260 10278 pMPI2RequestHeader_t request_hdrp;
10261 10279 struct scsi_pkt *pkt = cmd->cmd_pkt;
10262 10280 mptsas_pt_request_t *pt = pkt->pkt_ha_private;
10263 10281 uint32_t request_size;
10264 10282 uint32_t i;
10265 10283 uint64_t request_desc = 0;
10266 10284 uint8_t desc_type;
10267 10285 uint16_t SMID;
10268 10286 uint8_t *request, function;
10269 10287 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
10270 10288 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
10271 10289
10272 10290 desc_type = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10273 10291
10274 10292 request = pt->request;
10275 10293 request_size = pt->request_size;
10276 10294
10277 10295 SMID = cmd->cmd_slot;
10278 10296
10279 10297 /*
10280 10298 * Store the passthrough message in memory location
10281 10299 * corresponding to our slot number
10282 10300 */
10283 10301 memp = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
10284 10302 request_hdrp = (pMPI2RequestHeader_t)memp;
10285 10303 bzero(memp, mpt->m_req_frame_size);
10286 10304
10287 10305 for (i = 0; i < request_size; i++) {
10288 10306 bcopy(request + i, memp + i, 1);
10289 10307 }
10290 10308
10291 10309 NDBG15(("mptsas_start_passthru: Func 0x%x, MsgFlags 0x%x, "
10292 10310 "size=%d, in %d, out %d, SMID %d", request_hdrp->Function,
10293 10311 request_hdrp->MsgFlags, request_size,
10294 10312 pt->data_size, pt->dataout_size, SMID));
10295 10313
10296 10314 /*
10297 10315 * Add an SGE, even if the length is zero.
10298 10316 */
10299 10317 if (mpt->m_MPI25 && pt->simple == 0) {
10300 10318 mptsas_passthru_ieee_sge(acc_hdl, pt,
10301 10319 (pMpi2IeeeSgeSimple64_t)
10302 10320 ((uint8_t *)request_hdrp + pt->sgl_offset));
10303 10321 } else {
10304 10322 mptsas_passthru_sge(acc_hdl, pt,
10305 10323 (pMpi2SGESimple64_t)
10306 10324 ((uint8_t *)request_hdrp + pt->sgl_offset));
10307 10325 }
10308 10326
10309 10327 function = request_hdrp->Function;
10310 10328 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10311 10329 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10312 10330 pMpi2SCSIIORequest_t scsi_io_req;
10313 10331 caddr_t arsbuf;
10314 10332 uint8_t ars_size;
10315 10333 uint32_t ars_dmaaddrlow;
10316 10334
10317 10335 NDBG15(("mptsas_start_passthru: Is SCSI IO Req"));
10318 10336 scsi_io_req = (pMpi2SCSIIORequest_t)request_hdrp;
10319 10337
10320 10338 if (cmd->cmd_extrqslen != 0) {
10321 10339 /*
10322 10340 * Mapping of the buffer was done in
10323 10341 * mptsas_do_passthru().
10324 10342 * Calculate the DMA address with the same offset.
10325 10343 */
10326 10344 arsbuf = cmd->cmd_arq_buf;
10327 10345 ars_size = cmd->cmd_extrqslen;
10328 10346 ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
10329 10347 ((uintptr_t)arsbuf - (uintptr_t)mpt->m_req_sense)) &
10330 10348 0xffffffffu;
10331 10349 } else {
10332 10350 arsbuf = mpt->m_req_sense +
10333 10351 (mpt->m_req_sense_size * (SMID-1));
10334 10352 cmd->cmd_arq_buf = arsbuf;
10335 10353 ars_size = mpt->m_req_sense_size;
10336 10354 ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
10337 10355 (mpt->m_req_sense_size * (SMID-1))) &
10338 10356 0xffffffffu;
10339 10357 }
10340 10358 bzero(arsbuf, ars_size);
10341 10359
10342 10360 ddi_put8(acc_hdl, &scsi_io_req->SenseBufferLength, ars_size);
10343 10361 ddi_put32(acc_hdl, &scsi_io_req->SenseBufferLowAddress,
10344 10362 ars_dmaaddrlow);
10345 10363
10346 10364 /*
10347 10365 * Put SGE for data and data_out buffer at the end of
10348 10366 * scsi_io_request message header.(64 bytes in total)
10349 10367 * Set SGLOffset0 value
10350 10368 */
10351 10369 ddi_put8(acc_hdl, &scsi_io_req->SGLOffset0,
10352 10370 offsetof(MPI2_SCSI_IO_REQUEST, SGL) / 4);
10353 10371
10354 10372 /*
10355 10373 * Setup descriptor info. RAID passthrough must use the
10356 10374 * default request descriptor which is already set, so if this
10357 10375 * is a SCSI IO request, change the descriptor to SCSI IO.
10358 10376 */
10359 10377 if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
10360 10378 desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
10361 10379 request_desc = ((uint64_t)ddi_get16(acc_hdl,
10362 10380 &scsi_io_req->DevHandle) << 48);
10363 10381 }
10364 10382 (void) ddi_dma_sync(mpt->m_dma_req_sense_hdl, 0, 0,
10365 10383 DDI_DMA_SYNC_FORDEV);
10366 10384 }
10367 10385
10368 10386 /*
10369 10387 * We must wait till the message has been completed before
10370 10388 * beginning the next message so we wait for this one to
10371 10389 * finish.
10372 10390 */
10373 10391 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
10374 10392 request_desc |= (SMID << 16) + desc_type;
10375 10393 cmd->cmd_rfm = NULL;
10376 10394 MPTSAS_START_CMD(mpt, request_desc);
10377 10395 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
10378 10396 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
10379 10397 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10380 10398 }
10381 10399 }
10382 10400
10383 10401 typedef void (mptsas_pre_f)(mptsas_t *, mptsas_pt_request_t *);
10384 10402 static mptsas_pre_f mpi_pre_ioc_facts;
10385 10403 static mptsas_pre_f mpi_pre_port_facts;
10386 10404 static mptsas_pre_f mpi_pre_fw_download;
10387 10405 static mptsas_pre_f mpi_pre_fw_25_download;
10388 10406 static mptsas_pre_f mpi_pre_fw_upload;
10389 10407 static mptsas_pre_f mpi_pre_fw_25_upload;
10390 10408 static mptsas_pre_f mpi_pre_sata_passthrough;
10391 10409 static mptsas_pre_f mpi_pre_smp_passthrough;
10392 10410 static mptsas_pre_f mpi_pre_config;
10393 10411 static mptsas_pre_f mpi_pre_sas_io_unit_control;
10394 10412 static mptsas_pre_f mpi_pre_scsi_io_req;
10395 10413
10396 10414 /*
10397 10415 * Prepare the pt for a SAS2 FW_DOWNLOAD request.
10398 10416 */
10399 10417 static void
10400 10418 mpi_pre_fw_download(mptsas_t *mpt, mptsas_pt_request_t *pt)
10401 10419 {
10402 10420 pMpi2FWDownloadTCSGE_t tcsge;
10403 10421 pMpi2FWDownloadRequest req;
10404 10422
10405 10423 /*
10406 10424 * If SAS3, call separate function.
10407 10425 */
10408 10426 if (mpt->m_MPI25) {
10409 10427 mpi_pre_fw_25_download(mpt, pt);
10410 10428 return;
10411 10429 }
10412 10430
10413 10431 /*
10414 10432 * User requests should come in with the Transaction
10415 10433 * context element where the SGL will go. Putting the
10416 10434 * SGL after that seems to work, but don't really know
10417 10435 * why. Other drivers tend to create an extra SGL and
10418 10436 * refer to the TCE through that.
10419 10437 */
10420 10438 req = (pMpi2FWDownloadRequest)pt->request;
10421 10439 tcsge = (pMpi2FWDownloadTCSGE_t)&req->SGL;
10422 10440 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10423 10441 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10424 10442 mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10425 10443 }
10426 10444
10427 10445 pt->sgl_offset = offsetof(MPI2_FW_DOWNLOAD_REQUEST, SGL) +
10428 10446 sizeof (*tcsge);
10429 10447 if (pt->request_size != pt->sgl_offset)
10430 10448 NDBG15(("mpi_pre_fw_download(): Incorrect req size, "
10431 10449 "0x%x, should be 0x%x, dataoutsz 0x%x",
10432 10450 (int)pt->request_size, (int)pt->sgl_offset,
10433 10451 (int)pt->dataout_size));
10434 10452 if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY))
10435 10453 NDBG15(("mpi_pre_fw_download(): Incorrect rep size, "
10436 10454 "0x%x, should be 0x%x", pt->data_size,
10437 10455 (int)sizeof (MPI2_FW_DOWNLOAD_REPLY)));
10438 10456 }
10439 10457
10440 10458 /*
10441 10459 * Prepare the pt for a SAS3 FW_DOWNLOAD request.
10442 10460 */
10443 10461 static void
10444 10462 mpi_pre_fw_25_download(mptsas_t *mpt, mptsas_pt_request_t *pt)
10445 10463 {
10446 10464 pMpi2FWDownloadTCSGE_t tcsge;
10447 10465 pMpi2FWDownloadRequest req2;
10448 10466 pMpi25FWDownloadRequest req25;
10449 10467
10450 10468 /*
10451 10469 * User requests should come in with the Transaction
10452 10470 * context element where the SGL will go. The new firmware
10453 10471 * Doesn't use TCE and has space in the main request for
10454 10472 * this information. So move to the right place.
10455 10473 */
10456 10474 req2 = (pMpi2FWDownloadRequest)pt->request;
10457 10475 req25 = (pMpi25FWDownloadRequest)pt->request;
10458 10476 tcsge = (pMpi2FWDownloadTCSGE_t)&req2->SGL;
10459 10477 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10460 10478 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10461 10479 mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10462 10480 }
10463 10481 req25->ImageOffset = tcsge->ImageOffset;
10464 10482 req25->ImageSize = tcsge->ImageSize;
10465 10483
10466 10484 pt->sgl_offset = offsetof(MPI25_FW_DOWNLOAD_REQUEST, SGL);
10467 10485 if (pt->request_size != pt->sgl_offset)
10468 10486 NDBG15(("mpi_pre_fw_25_download(): Incorrect req size, "
10469 10487 "0x%x, should be 0x%x, dataoutsz 0x%x",
10470 10488 pt->request_size, pt->sgl_offset,
10471 10489 pt->dataout_size));
10472 10490 if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY))
10473 10491 NDBG15(("mpi_pre_fw_25_download(): Incorrect rep size, "
10474 10492 "0x%x, should be 0x%x", pt->data_size,
10475 10493 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10476 10494 }
10477 10495
10478 10496 /*
10479 10497 * Prepare the pt for a SAS2 FW_UPLOAD request.
10480 10498 */
10481 10499 static void
10482 10500 mpi_pre_fw_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10483 10501 {
10484 10502 pMpi2FWUploadTCSGE_t tcsge;
10485 10503 pMpi2FWUploadRequest_t req;
10486 10504
10487 10505 /*
10488 10506 * If SAS3, call separate function.
10489 10507 */
10490 10508 if (mpt->m_MPI25) {
10491 10509 mpi_pre_fw_25_upload(mpt, pt);
10492 10510 return;
10493 10511 }
10494 10512
10495 10513 /*
10496 10514 * User requests should come in with the Transaction
10497 10515 * context element where the SGL will go. Putting the
10498 10516 * SGL after that seems to work, but don't really know
10499 10517 * why. Other drivers tend to create an extra SGL and
10500 10518 * refer to the TCE through that.
10501 10519 */
10502 10520 req = (pMpi2FWUploadRequest_t)pt->request;
10503 10521 tcsge = (pMpi2FWUploadTCSGE_t)&req->SGL;
10504 10522 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10505 10523 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10506 10524 mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10507 10525 }
10508 10526
10509 10527 pt->sgl_offset = offsetof(MPI2_FW_UPLOAD_REQUEST, SGL) +
10510 10528 sizeof (*tcsge);
10511 10529 if (pt->request_size != pt->sgl_offset)
10512 10530 NDBG15(("mpi_pre_fw_upload(): Incorrect req size, "
10513 10531 "0x%x, should be 0x%x, dataoutsz 0x%x",
10514 10532 pt->request_size, pt->sgl_offset,
10515 10533 pt->dataout_size));
10516 10534 if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY))
10517 10535 NDBG15(("mpi_pre_fw_upload(): Incorrect rep size, "
10518 10536 "0x%x, should be 0x%x", pt->data_size,
10519 10537 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10520 10538 }
10521 10539
10522 10540 /*
10523 10541 * Prepare the pt a SAS3 FW_UPLOAD request.
10524 10542 */
10525 10543 static void
10526 10544 mpi_pre_fw_25_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10527 10545 {
10528 10546 pMpi2FWUploadTCSGE_t tcsge;
10529 10547 pMpi2FWUploadRequest_t req2;
10530 10548 pMpi25FWUploadRequest_t req25;
10531 10549
10532 10550 /*
10533 10551 * User requests should come in with the Transaction
10534 10552 * context element where the SGL will go. The new firmware
10535 10553 * Doesn't use TCE and has space in the main request for
10536 10554 * this information. So move to the right place.
10537 10555 */
10538 10556 req2 = (pMpi2FWUploadRequest_t)pt->request;
10539 10557 req25 = (pMpi25FWUploadRequest_t)pt->request;
10540 10558 tcsge = (pMpi2FWUploadTCSGE_t)&req2->SGL;
10541 10559 if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10542 10560 tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10543 10561 mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10544 10562 }
10545 10563 req25->ImageOffset = tcsge->ImageOffset;
10546 10564 req25->ImageSize = tcsge->ImageSize;
10547 10565
10548 10566 pt->sgl_offset = offsetof(MPI25_FW_UPLOAD_REQUEST, SGL);
10549 10567 if (pt->request_size != pt->sgl_offset)
10550 10568 NDBG15(("mpi_pre_fw_25_upload(): Incorrect req size, "
10551 10569 "0x%x, should be 0x%x, dataoutsz 0x%x",
10552 10570 pt->request_size, pt->sgl_offset,
10553 10571 pt->dataout_size));
10554 10572 if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY))
10555 10573 NDBG15(("mpi_pre_fw_25_upload(): Incorrect rep size, "
10556 10574 "0x%x, should be 0x%x", pt->data_size,
10557 10575 (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10558 10576 }
10559 10577
10560 10578 /*
10561 10579 * Prepare the pt for an IOC_FACTS request.
10562 10580 */
10563 10581 static void
10564 10582 mpi_pre_ioc_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
10565 10583 {
10566 10584 #ifndef __lock_lint
10567 10585 _NOTE(ARGUNUSED(mpt))
10568 10586 #endif
10569 10587 if (pt->request_size != sizeof (MPI2_IOC_FACTS_REQUEST))
10570 10588 NDBG15(("mpi_pre_ioc_facts(): Incorrect req size, "
10571 10589 "0x%x, should be 0x%x, dataoutsz 0x%x",
10572 10590 pt->request_size,
10573 10591 (int)sizeof (MPI2_IOC_FACTS_REQUEST),
10574 10592 pt->dataout_size));
10575 10593 if (pt->data_size != sizeof (MPI2_IOC_FACTS_REPLY))
10576 10594 NDBG15(("mpi_pre_ioc_facts(): Incorrect rep size, "
10577 10595 "0x%x, should be 0x%x", pt->data_size,
10578 10596 (int)sizeof (MPI2_IOC_FACTS_REPLY)));
10579 10597 pt->sgl_offset = (uint16_t)pt->request_size;
10580 10598 }
10581 10599
10582 10600 /*
10583 10601 * Prepare the pt for a PORT_FACTS request.
10584 10602 */
10585 10603 static void
10586 10604 mpi_pre_port_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
10587 10605 {
10588 10606 #ifndef __lock_lint
10589 10607 _NOTE(ARGUNUSED(mpt))
10590 10608 #endif
10591 10609 if (pt->request_size != sizeof (MPI2_PORT_FACTS_REQUEST))
10592 10610 NDBG15(("mpi_pre_port_facts(): Incorrect req size, "
10593 10611 "0x%x, should be 0x%x, dataoutsz 0x%x",
10594 10612 pt->request_size,
10595 10613 (int)sizeof (MPI2_PORT_FACTS_REQUEST),
10596 10614 pt->dataout_size));
10597 10615 if (pt->data_size != sizeof (MPI2_PORT_FACTS_REPLY))
10598 10616 NDBG15(("mpi_pre_port_facts(): Incorrect rep size, "
10599 10617 "0x%x, should be 0x%x", pt->data_size,
10600 10618 (int)sizeof (MPI2_PORT_FACTS_REPLY)));
10601 10619 pt->sgl_offset = (uint16_t)pt->request_size;
10602 10620 }
10603 10621
10604 10622 /*
10605 10623 * Prepare pt for a SATA_PASSTHROUGH request.
10606 10624 */
10607 10625 static void
10608 10626 mpi_pre_sata_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
10609 10627 {
10610 10628 #ifndef __lock_lint
10611 10629 _NOTE(ARGUNUSED(mpt))
10612 10630 #endif
10613 10631 pt->sgl_offset = offsetof(MPI2_SATA_PASSTHROUGH_REQUEST, SGL);
10614 10632 if (pt->request_size != pt->sgl_offset)
10615 10633 NDBG15(("mpi_pre_sata_passthrough(): Incorrect req size, "
10616 10634 "0x%x, should be 0x%x, dataoutsz 0x%x",
10617 10635 pt->request_size, pt->sgl_offset,
10618 10636 pt->dataout_size));
10619 10637 if (pt->data_size != sizeof (MPI2_SATA_PASSTHROUGH_REPLY))
10620 10638 NDBG15(("mpi_pre_sata_passthrough(): Incorrect rep size, "
10621 10639 "0x%x, should be 0x%x", pt->data_size,
10622 10640 (int)sizeof (MPI2_SATA_PASSTHROUGH_REPLY)));
10623 10641 }
10624 10642
10625 10643 static void
10626 10644 mpi_pre_smp_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
10627 10645 {
10628 10646 #ifndef __lock_lint
10629 10647 _NOTE(ARGUNUSED(mpt))
10630 10648 #endif
10631 10649 pt->sgl_offset = offsetof(MPI2_SMP_PASSTHROUGH_REQUEST, SGL);
10632 10650 if (pt->request_size != pt->sgl_offset)
10633 10651 NDBG15(("mpi_pre_smp_passthrough(): Incorrect req size, "
10634 10652 "0x%x, should be 0x%x, dataoutsz 0x%x",
10635 10653 pt->request_size, pt->sgl_offset,
10636 10654 pt->dataout_size));
10637 10655 if (pt->data_size != sizeof (MPI2_SMP_PASSTHROUGH_REPLY))
10638 10656 NDBG15(("mpi_pre_smp_passthrough(): Incorrect rep size, "
10639 10657 "0x%x, should be 0x%x", pt->data_size,
10640 10658 (int)sizeof (MPI2_SMP_PASSTHROUGH_REPLY)));
10641 10659 }
10642 10660
10643 10661 /*
10644 10662 * Prepare pt for a CONFIG request.
10645 10663 */
10646 10664 static void
10647 10665 mpi_pre_config(mptsas_t *mpt, mptsas_pt_request_t *pt)
10648 10666 {
10649 10667 #ifndef __lock_lint
10650 10668 _NOTE(ARGUNUSED(mpt))
10651 10669 #endif
10652 10670 pt->sgl_offset = offsetof(MPI2_CONFIG_REQUEST, PageBufferSGE);
10653 10671 if (pt->request_size != pt->sgl_offset)
10654 10672 NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
10655 10673 "should be 0x%x, dataoutsz 0x%x", pt->request_size,
10656 10674 pt->sgl_offset, pt->dataout_size));
10657 10675 if (pt->data_size != sizeof (MPI2_CONFIG_REPLY))
10658 10676 NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
10659 10677 "should be 0x%x", pt->data_size,
10660 10678 (int)sizeof (MPI2_CONFIG_REPLY)));
10661 10679 pt->simple = 1;
10662 10680 }
10663 10681
10664 10682 /*
10665 10683 * Prepare pt for a SCSI_IO_REQ request.
10666 10684 */
10667 10685 static void
10668 10686 mpi_pre_scsi_io_req(mptsas_t *mpt, mptsas_pt_request_t *pt)
10669 10687 {
10670 10688 #ifndef __lock_lint
10671 10689 _NOTE(ARGUNUSED(mpt))
10672 10690 #endif
10673 10691 pt->sgl_offset = offsetof(MPI2_SCSI_IO_REQUEST, SGL);
10674 10692 if (pt->request_size != pt->sgl_offset)
10675 10693 NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
10676 10694 "should be 0x%x, dataoutsz 0x%x", pt->request_size,
10677 10695 pt->sgl_offset,
10678 10696 pt->dataout_size));
10679 10697 if (pt->data_size != sizeof (MPI2_SCSI_IO_REPLY))
10680 10698 NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
10681 10699 "should be 0x%x", pt->data_size,
10682 10700 (int)sizeof (MPI2_SCSI_IO_REPLY)));
10683 10701 }
10684 10702
10685 10703 /*
10686 10704 * Prepare the mptsas_cmd for a SAS_IO_UNIT_CONTROL request.
10687 10705 */
10688 10706 static void
10689 10707 mpi_pre_sas_io_unit_control(mptsas_t *mpt, mptsas_pt_request_t *pt)
10690 10708 {
10691 10709 #ifndef __lock_lint
10692 10710 _NOTE(ARGUNUSED(mpt))
10693 10711 #endif
10694 10712 pt->sgl_offset = (uint16_t)pt->request_size;
10695 10713 }
10696 10714
10697 10715 /*
10698 10716 * A set of functions to prepare an mptsas_cmd for the various
10699 10717 * supported requests.
10700 10718 */
10701 10719 static struct mptsas_func {
10702 10720 U8 Function;
10703 10721 char *Name;
10704 10722 mptsas_pre_f *f_pre;
10705 10723 } mptsas_func_list[] = {
10706 10724 { MPI2_FUNCTION_IOC_FACTS, "IOC_FACTS", mpi_pre_ioc_facts },
10707 10725 { MPI2_FUNCTION_PORT_FACTS, "PORT_FACTS", mpi_pre_port_facts },
10708 10726 { MPI2_FUNCTION_FW_DOWNLOAD, "FW_DOWNLOAD", mpi_pre_fw_download },
10709 10727 { MPI2_FUNCTION_FW_UPLOAD, "FW_UPLOAD", mpi_pre_fw_upload },
10710 10728 { MPI2_FUNCTION_SATA_PASSTHROUGH, "SATA_PASSTHROUGH",
10711 10729 mpi_pre_sata_passthrough },
10712 10730 { MPI2_FUNCTION_SMP_PASSTHROUGH, "SMP_PASSTHROUGH",
10713 10731 mpi_pre_smp_passthrough},
10714 10732 { MPI2_FUNCTION_SCSI_IO_REQUEST, "SCSI_IO_REQUEST",
10715 10733 mpi_pre_scsi_io_req},
10716 10734 { MPI2_FUNCTION_CONFIG, "CONFIG", mpi_pre_config},
10717 10735 { MPI2_FUNCTION_SAS_IO_UNIT_CONTROL, "SAS_IO_UNIT_CONTROL",
10718 10736 mpi_pre_sas_io_unit_control },
10719 10737 { 0xFF, NULL, NULL } /* list end */
10720 10738 };
10721 10739
10722 10740 static void
10723 10741 mptsas_prep_sgl_offset(mptsas_t *mpt, mptsas_pt_request_t *pt)
10724 10742 {
10725 10743 pMPI2RequestHeader_t hdr;
10726 10744 struct mptsas_func *f;
10727 10745
10728 10746 hdr = (pMPI2RequestHeader_t)pt->request;
10729 10747
10730 10748 for (f = mptsas_func_list; f->f_pre != NULL; f++) {
10731 10749 if (hdr->Function == f->Function) {
10732 10750 f->f_pre(mpt, pt);
10733 10751 NDBG15(("mptsas_prep_sgl_offset: Function %s,"
10734 10752 " sgl_offset 0x%x", f->Name,
10735 10753 pt->sgl_offset));
10736 10754 return;
10737 10755 }
10738 10756 }
10739 10757 NDBG15(("mptsas_prep_sgl_offset: Unknown Function 0x%02x,"
10740 10758 " returning req_size 0x%x for sgl_offset",
10741 10759 hdr->Function, pt->request_size));
10742 10760 pt->sgl_offset = (uint16_t)pt->request_size;
10743 10761 }
10744 10762
10745 10763
10746 10764 static int
10747 10765 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
10748 10766 uint8_t *data, uint32_t request_size, uint32_t reply_size,
10749 10767 uint32_t data_size, uint32_t direction, uint8_t *dataout,
10750 10768 uint32_t dataout_size, short timeout, int mode)
10751 10769 {
10752 10770 mptsas_pt_request_t pt;
10753 10771 mptsas_dma_alloc_state_t data_dma_state;
10754 10772 mptsas_dma_alloc_state_t dataout_dma_state;
10755 10773 caddr_t memp;
10756 10774 mptsas_cmd_t *cmd = NULL;
10757 10775 struct scsi_pkt *pkt;
10758 10776 uint32_t reply_len = 0, sense_len = 0;
10759 10777 pMPI2RequestHeader_t request_hdrp;
10760 10778 pMPI2RequestHeader_t request_msg;
10761 10779 pMPI2DefaultReply_t reply_msg;
10762 10780 Mpi2SCSIIOReply_t rep_msg;
10763 10781 int rvalue;
10764 10782 int i, status = 0, pt_flags = 0, rv = 0;
10765 10783 uint8_t function;
10766 10784
10767 10785 ASSERT(mutex_owned(&mpt->m_mutex));
10768 10786
10769 10787 reply_msg = (pMPI2DefaultReply_t)(&rep_msg);
10770 10788 bzero(reply_msg, sizeof (MPI2_DEFAULT_REPLY));
10771 10789 request_msg = kmem_zalloc(request_size, KM_SLEEP);
10772 10790
10773 10791 mutex_exit(&mpt->m_mutex);
10774 10792 /*
10775 10793 * copy in the request buffer since it could be used by
10776 10794 * another thread when the pt request into waitq
10777 10795 */
10778 10796 if (ddi_copyin(request, request_msg, request_size, mode)) {
10779 10797 mutex_enter(&mpt->m_mutex);
10780 10798 status = EFAULT;
10781 10799 mptsas_log(mpt, CE_WARN, "failed to copy request data");
10782 10800 goto out;
10783 10801 }
10784 10802 NDBG27(("mptsas_do_passthru: mode 0x%x, size 0x%x, Func 0x%x",
10785 10803 mode, request_size, request_msg->Function));
10786 10804 mutex_enter(&mpt->m_mutex);
10787 10805
10788 10806 function = request_msg->Function;
10789 10807 if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
10790 10808 pMpi2SCSITaskManagementRequest_t task;
10791 10809 task = (pMpi2SCSITaskManagementRequest_t)request_msg;
10792 10810 mptsas_setup_bus_reset_delay(mpt);
10793 10811 rv = mptsas_ioc_task_management(mpt, task->TaskType,
10794 10812 task->DevHandle, (int)task->LUN[1], reply, reply_size,
10795 10813 mode);
10796 10814
10797 10815 if (rv != TRUE) {
10798 10816 status = EIO;
10799 10817 mptsas_log(mpt, CE_WARN, "task management failed");
10800 10818 }
10801 10819 goto out;
10802 10820 }
10803 10821
10804 10822 if (data_size != 0) {
10805 10823 data_dma_state.size = data_size;
10806 10824 if (mptsas_dma_alloc(mpt, &data_dma_state) != DDI_SUCCESS) {
10807 10825 status = ENOMEM;
10808 10826 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
10809 10827 "resource");
10810 10828 goto out;
10811 10829 }
10812 10830 pt_flags |= MPTSAS_DATA_ALLOCATED;
10813 10831 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10814 10832 mutex_exit(&mpt->m_mutex);
10815 10833 for (i = 0; i < data_size; i++) {
10816 10834 if (ddi_copyin(data + i, (uint8_t *)
10817 10835 data_dma_state.memp + i, 1, mode)) {
10818 10836 mutex_enter(&mpt->m_mutex);
10819 10837 status = EFAULT;
10820 10838 mptsas_log(mpt, CE_WARN, "failed to "
10821 10839 "copy read data");
10822 10840 goto out;
10823 10841 }
10824 10842 }
10825 10843 mutex_enter(&mpt->m_mutex);
10826 10844 }
10827 10845 } else {
10828 10846 bzero(&data_dma_state, sizeof (data_dma_state));
10829 10847 }
10830 10848
10831 10849 if (dataout_size != 0) {
10832 10850 dataout_dma_state.size = dataout_size;
10833 10851 if (mptsas_dma_alloc(mpt, &dataout_dma_state) != DDI_SUCCESS) {
10834 10852 status = ENOMEM;
10835 10853 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
10836 10854 "resource");
10837 10855 goto out;
10838 10856 }
10839 10857 pt_flags |= MPTSAS_DATAOUT_ALLOCATED;
10840 10858 mutex_exit(&mpt->m_mutex);
10841 10859 for (i = 0; i < dataout_size; i++) {
10842 10860 if (ddi_copyin(dataout + i, (uint8_t *)
10843 10861 dataout_dma_state.memp + i, 1, mode)) {
10844 10862 mutex_enter(&mpt->m_mutex);
10845 10863 mptsas_log(mpt, CE_WARN, "failed to copy out"
10846 10864 " data");
10847 10865 status = EFAULT;
10848 10866 goto out;
10849 10867 }
10850 10868 }
10851 10869 mutex_enter(&mpt->m_mutex);
10852 10870 } else {
10853 10871 bzero(&dataout_dma_state, sizeof (dataout_dma_state));
10854 10872 }
10855 10873
10856 10874 if ((rvalue = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10857 10875 status = EAGAIN;
10858 10876 mptsas_log(mpt, CE_NOTE, "event ack command pool is full");
10859 10877 goto out;
10860 10878 }
10861 10879 pt_flags |= MPTSAS_REQUEST_POOL_CMD;
10862 10880
10863 10881 bzero((caddr_t)cmd, sizeof (*cmd));
10864 10882 bzero((caddr_t)pkt, scsi_pkt_size());
10865 10883 bzero((caddr_t)&pt, sizeof (pt));
10866 10884
10867 10885 cmd->ioc_cmd_slot = (uint32_t)(rvalue);
10868 10886
10869 10887 pt.request = (uint8_t *)request_msg;
10870 10888 pt.direction = direction;
10871 10889 pt.simple = 0;
10872 10890 pt.request_size = request_size;
10873 10891 pt.data_size = data_size;
10874 10892 pt.dataout_size = dataout_size;
10875 10893 pt.data_cookie = data_dma_state.cookie;
10876 10894 pt.dataout_cookie = dataout_dma_state.cookie;
10877 10895 mptsas_prep_sgl_offset(mpt, &pt);
10878 10896
10879 10897 /*
10880 10898 * Form a blank cmd/pkt to store the acknowledgement message
10881 10899 */
10882 10900 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb[0];
10883 10901 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
10884 10902 pkt->pkt_ha_private = (opaque_t)&pt;
10885 10903 pkt->pkt_flags = FLAG_HEAD;
10886 10904 pkt->pkt_time = timeout;
10887 10905 cmd->cmd_pkt = pkt;
10888 10906 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_PASSTHRU;
10889 10907
10890 10908 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10891 10909 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10892 10910 uint8_t com, cdb_group_id;
10893 10911 boolean_t ret;
10894 10912
10895 10913 pkt->pkt_cdbp = ((pMpi2SCSIIORequest_t)request_msg)->CDB.CDB32;
10896 10914 com = pkt->pkt_cdbp[0];
10897 10915 cdb_group_id = CDB_GROUPID(com);
10898 10916 switch (cdb_group_id) {
10899 10917 case CDB_GROUPID_0: cmd->cmd_cdblen = CDB_GROUP0; break;
10900 10918 case CDB_GROUPID_1: cmd->cmd_cdblen = CDB_GROUP1; break;
10901 10919 case CDB_GROUPID_2: cmd->cmd_cdblen = CDB_GROUP2; break;
10902 10920 case CDB_GROUPID_4: cmd->cmd_cdblen = CDB_GROUP4; break;
10903 10921 case CDB_GROUPID_5: cmd->cmd_cdblen = CDB_GROUP5; break;
10904 10922 default:
10905 10923 NDBG27(("mptsas_do_passthru: SCSI_IO, reserved "
10906 10924 "CDBGROUP 0x%x requested!", cdb_group_id));
10907 10925 break;
10908 10926 }
10909 10927
10910 10928 reply_len = sizeof (MPI2_SCSI_IO_REPLY);
10911 10929 sense_len = reply_size - reply_len;
10912 10930 ret = mptsas_cmdarqsize(mpt, cmd, sense_len, KM_SLEEP);
10913 10931 VERIFY(ret == B_TRUE);
10914 10932 } else {
10915 10933 reply_len = reply_size;
10916 10934 sense_len = 0;
10917 10935 }
10918 10936
10919 10937 NDBG27(("mptsas_do_passthru: %s, dsz 0x%x, dosz 0x%x, replen 0x%x, "
10920 10938 "snslen 0x%x",
10921 10939 (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE)?"Write":"Read",
10922 10940 data_size, dataout_size, reply_len, sense_len));
10923 10941
10924 10942 /*
10925 10943 * Save the command in a slot
10926 10944 */
10927 10945 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10928 10946 /*
10929 10947 * Once passthru command get slot, set cmd_flags
10930 10948 * CFLAG_PREPARED.
10931 10949 */
10932 10950 cmd->cmd_flags |= CFLAG_PREPARED;
10933 10951 mptsas_start_passthru(mpt, cmd);
10934 10952 } else {
10935 10953 mptsas_waitq_add(mpt, cmd);
10936 10954 }
10937 10955
10938 10956 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10939 10957 cv_wait(&mpt->m_passthru_cv, &mpt->m_mutex);
10940 10958 }
10941 10959
10942 10960 NDBG27(("mptsas_do_passthru: Cmd complete, flags 0x%x, rfm 0x%x "
10943 10961 "pktreason 0x%x", cmd->cmd_flags, cmd->cmd_rfm,
10944 10962 pkt->pkt_reason));
10945 10963
10946 10964 if (cmd->cmd_flags & CFLAG_PREPARED) {
10947 10965 memp = mpt->m_req_frame + (mpt->m_req_frame_size *
10948 10966 cmd->cmd_slot);
10949 10967 request_hdrp = (pMPI2RequestHeader_t)memp;
10950 10968 }
10951 10969
10952 10970 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10953 10971 status = ETIMEDOUT;
10954 10972 mptsas_log(mpt, CE_WARN, "passthrough command timeout");
10955 10973 pt_flags |= MPTSAS_CMD_TIMEOUT;
10956 10974 goto out;
10957 10975 }
10958 10976
10959 10977 if (cmd->cmd_rfm) {
10960 10978 /*
10961 10979 * cmd_rfm is zero means the command reply is a CONTEXT
10962 10980 * reply and no PCI Write to post the free reply SMFA
10963 10981 * because no reply message frame is used.
10964 10982 * cmd_rfm is non-zero means the reply is a ADDRESS
10965 10983 * reply and reply message frame is used.
10966 10984 */
10967 10985 pt_flags |= MPTSAS_ADDRESS_REPLY;
10968 10986 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10969 10987 DDI_DMA_SYNC_FORCPU);
10970 10988 reply_msg = (pMPI2DefaultReply_t)
10971 10989 (mpt->m_reply_frame + (cmd->cmd_rfm -
10972 10990 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
10973 10991 }
10974 10992
10975 10993 mptsas_fma_check(mpt, cmd);
10976 10994 if (pkt->pkt_reason == CMD_TRAN_ERR) {
10977 10995 status = EAGAIN;
10978 10996 mptsas_log(mpt, CE_WARN, "passthru fma error");
10979 10997 goto out;
10980 10998 }
10981 10999 if (pkt->pkt_reason == CMD_RESET) {
10982 11000 status = EAGAIN;
10983 11001 mptsas_log(mpt, CE_WARN, "ioc reset abort passthru");
10984 11002 goto out;
10985 11003 }
10986 11004
10987 11005 if (pkt->pkt_reason == CMD_INCOMPLETE) {
10988 11006 status = EIO;
10989 11007 mptsas_log(mpt, CE_WARN, "passthrough command incomplete");
10990 11008 goto out;
10991 11009 }
10992 11010
10993 11011 mutex_exit(&mpt->m_mutex);
10994 11012 if (cmd->cmd_flags & CFLAG_PREPARED) {
10995 11013 function = request_hdrp->Function;
10996 11014 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10997 11015 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10998 11016 reply_len = sizeof (MPI2_SCSI_IO_REPLY);
10999 11017 sense_len = cmd->cmd_extrqslen ?
11000 11018 min(sense_len, cmd->cmd_extrqslen) :
11001 11019 min(sense_len, cmd->cmd_rqslen);
11002 11020 } else {
11003 11021 reply_len = reply_size;
11004 11022 sense_len = 0;
11005 11023 }
11006 11024
11007 11025 for (i = 0; i < reply_len; i++) {
11008 11026 if (ddi_copyout((uint8_t *)reply_msg + i, reply + i, 1,
11009 11027 mode)) {
11010 11028 mutex_enter(&mpt->m_mutex);
11011 11029 status = EFAULT;
11012 11030 mptsas_log(mpt, CE_WARN, "failed to copy out "
11013 11031 "reply data");
11014 11032 goto out;
11015 11033 }
11016 11034 }
11017 11035 for (i = 0; i < sense_len; i++) {
11018 11036 if (ddi_copyout((uint8_t *)request_hdrp + 64 + i,
11019 11037 reply + reply_len + i, 1, mode)) {
11020 11038 mutex_enter(&mpt->m_mutex);
11021 11039 status = EFAULT;
11022 11040 mptsas_log(mpt, CE_WARN, "failed to copy out "
11023 11041 "sense data");
11024 11042 goto out;
11025 11043 }
11026 11044 }
11027 11045 }
11028 11046
11029 11047 if (data_size) {
11030 11048 if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) {
11031 11049 (void) ddi_dma_sync(data_dma_state.handle, 0, 0,
11032 11050 DDI_DMA_SYNC_FORCPU);
11033 11051 for (i = 0; i < data_size; i++) {
11034 11052 if (ddi_copyout((uint8_t *)(
11035 11053 data_dma_state.memp + i), data + i, 1,
11036 11054 mode)) {
11037 11055 mutex_enter(&mpt->m_mutex);
11038 11056 status = EFAULT;
11039 11057 mptsas_log(mpt, CE_WARN, "failed to "
11040 11058 "copy out the reply data");
11041 11059 goto out;
11042 11060 }
11043 11061 }
11044 11062 }
11045 11063 }
11046 11064 mutex_enter(&mpt->m_mutex);
11047 11065 out:
11048 11066 /*
11049 11067 * Put the reply frame back on the free queue, increment the free
11050 11068 * index, and write the new index to the free index register. But only
11051 11069 * if this reply is an ADDRESS reply.
11052 11070 */
11053 11071 if (pt_flags & MPTSAS_ADDRESS_REPLY) {
11054 11072 ddi_put32(mpt->m_acc_free_queue_hdl,
11055 11073 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11056 11074 cmd->cmd_rfm);
11057 11075 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11058 11076 DDI_DMA_SYNC_FORDEV);
11059 11077 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11060 11078 mpt->m_free_index = 0;
11061 11079 }
11062 11080 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11063 11081 mpt->m_free_index);
11064 11082 }
11065 11083 if (cmd) {
11066 11084 if (cmd->cmd_extrqslen != 0) {
11067 11085 rmfree(mpt->m_erqsense_map, cmd->cmd_extrqschunks,
11068 11086 cmd->cmd_extrqsidx + 1);
11069 11087 }
11070 11088 if (cmd->cmd_flags & CFLAG_PREPARED) {
11071 11089 mptsas_remove_cmd(mpt, cmd);
11072 11090 pt_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11073 11091 }
11074 11092 }
11075 11093 if (pt_flags & MPTSAS_REQUEST_POOL_CMD)
11076 11094 mptsas_return_to_pool(mpt, cmd);
11077 11095 if (pt_flags & MPTSAS_DATA_ALLOCATED) {
11078 11096 if (mptsas_check_dma_handle(data_dma_state.handle) !=
11079 11097 DDI_SUCCESS) {
11080 11098 ddi_fm_service_impact(mpt->m_dip,
11081 11099 DDI_SERVICE_UNAFFECTED);
11082 11100 status = EFAULT;
11083 11101 }
11084 11102 mptsas_dma_free(&data_dma_state);
11085 11103 }
11086 11104 if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
11087 11105 if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
11088 11106 DDI_SUCCESS) {
11089 11107 ddi_fm_service_impact(mpt->m_dip,
11090 11108 DDI_SERVICE_UNAFFECTED);
11091 11109 status = EFAULT;
11092 11110 }
11093 11111 mptsas_dma_free(&dataout_dma_state);
11094 11112 }
11095 11113 if (pt_flags & MPTSAS_CMD_TIMEOUT) {
11096 11114 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
11097 11115 mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
11098 11116 }
11099 11117 }
11100 11118 if (request_msg)
11101 11119 kmem_free(request_msg, request_size);
11102 11120 NDBG27(("mptsas_do_passthru: Done status 0x%x", status));
11103 11121
11104 11122 return (status);
11105 11123 }
11106 11124
11107 11125 static int
11108 11126 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
11109 11127 {
11110 11128 /*
11111 11129 * If timeout is 0, set timeout to default of 60 seconds.
11112 11130 */
11113 11131 if (data->Timeout == 0) {
11114 11132 data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
11115 11133 }
11116 11134
11117 11135 if (((data->DataSize == 0) &&
11118 11136 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_NONE)) ||
11119 11137 ((data->DataSize != 0) &&
11120 11138 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_READ) ||
11121 11139 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_WRITE) ||
11122 11140 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) &&
11123 11141 (data->DataOutSize != 0))))) {
11124 11142 if (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) {
11125 11143 data->DataDirection = MPTSAS_PASS_THRU_DIRECTION_READ;
11126 11144 } else {
11127 11145 data->DataOutSize = 0;
11128 11146 }
11129 11147 /*
11130 11148 * Send passthru request messages
11131 11149 */
11132 11150 return (mptsas_do_passthru(mpt,
11133 11151 (uint8_t *)((uintptr_t)data->PtrRequest),
11134 11152 (uint8_t *)((uintptr_t)data->PtrReply),
11135 11153 (uint8_t *)((uintptr_t)data->PtrData),
11136 11154 data->RequestSize, data->ReplySize,
11137 11155 data->DataSize, data->DataDirection,
11138 11156 (uint8_t *)((uintptr_t)data->PtrDataOut),
11139 11157 data->DataOutSize, data->Timeout, mode));
11140 11158 } else {
11141 11159 return (EINVAL);
11142 11160 }
11143 11161 }
11144 11162
11145 11163 static uint8_t
11146 11164 mptsas_get_fw_diag_buffer_number(mptsas_t *mpt, uint32_t unique_id)
11147 11165 {
11148 11166 uint8_t index;
11149 11167
11150 11168 for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
11151 11169 if (mpt->m_fw_diag_buffer_list[index].unique_id == unique_id) {
11152 11170 return (index);
11153 11171 }
11154 11172 }
11155 11173
11156 11174 return (MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND);
11157 11175 }
11158 11176
11159 11177 static void
11160 11178 mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd)
11161 11179 {
11162 11180 pMpi2DiagBufferPostRequest_t pDiag_post_msg;
11163 11181 pMpi2DiagReleaseRequest_t pDiag_release_msg;
11164 11182 struct scsi_pkt *pkt = cmd->cmd_pkt;
11165 11183 mptsas_diag_request_t *diag = pkt->pkt_ha_private;
11166 11184 uint32_t i;
11167 11185 uint64_t request_desc;
11168 11186
11169 11187 ASSERT(mutex_owned(&mpt->m_mutex));
11170 11188
11171 11189 /*
11172 11190 * Form the diag message depending on the post or release function.
11173 11191 */
11174 11192 if (diag->function == MPI2_FUNCTION_DIAG_BUFFER_POST) {
11175 11193 pDiag_post_msg = (pMpi2DiagBufferPostRequest_t)
11176 11194 (mpt->m_req_frame + (mpt->m_req_frame_size *
11177 11195 cmd->cmd_slot));
11178 11196 bzero(pDiag_post_msg, mpt->m_req_frame_size);
11179 11197 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->Function,
11180 11198 diag->function);
11181 11199 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->BufferType,
11182 11200 diag->pBuffer->buffer_type);
11183 11201 ddi_put8(mpt->m_acc_req_frame_hdl,
11184 11202 &pDiag_post_msg->ExtendedType,
11185 11203 diag->pBuffer->extended_type);
11186 11204 ddi_put32(mpt->m_acc_req_frame_hdl,
11187 11205 &pDiag_post_msg->BufferLength,
11188 11206 diag->pBuffer->buffer_data.size);
11189 11207 for (i = 0; i < (sizeof (pDiag_post_msg->ProductSpecific) / 4);
11190 11208 i++) {
11191 11209 ddi_put32(mpt->m_acc_req_frame_hdl,
11192 11210 &pDiag_post_msg->ProductSpecific[i],
11193 11211 diag->pBuffer->product_specific[i]);
11194 11212 }
11195 11213 ddi_put32(mpt->m_acc_req_frame_hdl,
11196 11214 &pDiag_post_msg->BufferAddress.Low,
11197 11215 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
11198 11216 & 0xffffffffull));
11199 11217 ddi_put32(mpt->m_acc_req_frame_hdl,
11200 11218 &pDiag_post_msg->BufferAddress.High,
11201 11219 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
11202 11220 >> 32));
11203 11221 } else {
11204 11222 pDiag_release_msg = (pMpi2DiagReleaseRequest_t)
11205 11223 (mpt->m_req_frame + (mpt->m_req_frame_size *
11206 11224 cmd->cmd_slot));
11207 11225 bzero(pDiag_release_msg, mpt->m_req_frame_size);
11208 11226 ddi_put8(mpt->m_acc_req_frame_hdl,
11209 11227 &pDiag_release_msg->Function, diag->function);
11210 11228 ddi_put8(mpt->m_acc_req_frame_hdl,
11211 11229 &pDiag_release_msg->BufferType,
11212 11230 diag->pBuffer->buffer_type);
11213 11231 }
11214 11232
11215 11233 /*
11216 11234 * Send the message
11217 11235 */
11218 11236 (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
11219 11237 DDI_DMA_SYNC_FORDEV);
11220 11238 request_desc = (cmd->cmd_slot << 16) +
11221 11239 MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
11222 11240 cmd->cmd_rfm = NULL;
11223 11241 MPTSAS_START_CMD(mpt, request_desc);
11224 11242 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
11225 11243 DDI_SUCCESS) ||
11226 11244 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
11227 11245 DDI_SUCCESS)) {
11228 11246 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11229 11247 }
11230 11248 }
11231 11249
11232 11250 static int
11233 11251 mptsas_post_fw_diag_buffer(mptsas_t *mpt,
11234 11252 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
11235 11253 {
11236 11254 mptsas_diag_request_t diag;
11237 11255 int status, slot_num, post_flags = 0;
11238 11256 mptsas_cmd_t *cmd = NULL;
11239 11257 struct scsi_pkt *pkt;
11240 11258 pMpi2DiagBufferPostReply_t reply;
11241 11259 uint16_t iocstatus;
11242 11260 uint32_t iocloginfo, transfer_length;
11243 11261
11244 11262 /*
11245 11263 * If buffer is not enabled, just leave.
11246 11264 */
11247 11265 *return_code = MPTSAS_FW_DIAG_ERROR_POST_FAILED;
11248 11266 if (!pBuffer->enabled) {
11249 11267 status = DDI_FAILURE;
11250 11268 goto out;
11251 11269 }
11252 11270
11253 11271 /*
11254 11272 * Clear some flags initially.
11255 11273 */
11256 11274 pBuffer->force_release = FALSE;
11257 11275 pBuffer->valid_data = FALSE;
11258 11276 pBuffer->owned_by_firmware = FALSE;
11259 11277
11260 11278 /*
11261 11279 * Get a cmd buffer from the cmd buffer pool
11262 11280 */
11263 11281 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11264 11282 status = DDI_FAILURE;
11265 11283 mptsas_log(mpt, CE_NOTE, "command pool is full: Post FW Diag");
11266 11284 goto out;
11267 11285 }
11268 11286 post_flags |= MPTSAS_REQUEST_POOL_CMD;
11269 11287
11270 11288 bzero((caddr_t)cmd, sizeof (*cmd));
11271 11289 bzero((caddr_t)pkt, scsi_pkt_size());
11272 11290
11273 11291 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
11274 11292
11275 11293 diag.pBuffer = pBuffer;
11276 11294 diag.function = MPI2_FUNCTION_DIAG_BUFFER_POST;
11277 11295
11278 11296 /*
11279 11297 * Form a blank cmd/pkt to store the acknowledgement message
11280 11298 */
11281 11299 pkt->pkt_ha_private = (opaque_t)&diag;
11282 11300 pkt->pkt_flags = FLAG_HEAD;
11283 11301 pkt->pkt_time = 60;
11284 11302 cmd->cmd_pkt = pkt;
11285 11303 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
11286 11304
11287 11305 /*
11288 11306 * Save the command in a slot
11289 11307 */
11290 11308 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11291 11309 /*
11292 11310 * Once passthru command get slot, set cmd_flags
11293 11311 * CFLAG_PREPARED.
11294 11312 */
11295 11313 cmd->cmd_flags |= CFLAG_PREPARED;
11296 11314 mptsas_start_diag(mpt, cmd);
11297 11315 } else {
11298 11316 mptsas_waitq_add(mpt, cmd);
11299 11317 }
11300 11318
11301 11319 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11302 11320 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
11303 11321 }
11304 11322
11305 11323 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11306 11324 status = DDI_FAILURE;
11307 11325 mptsas_log(mpt, CE_WARN, "Post FW Diag command timeout");
11308 11326 goto out;
11309 11327 }
11310 11328
11311 11329 /*
11312 11330 * cmd_rfm points to the reply message if a reply was given. Check the
11313 11331 * IOCStatus to make sure everything went OK with the FW diag request
11314 11332 * and set buffer flags.
11315 11333 */
11316 11334 if (cmd->cmd_rfm) {
11317 11335 post_flags |= MPTSAS_ADDRESS_REPLY;
11318 11336 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11319 11337 DDI_DMA_SYNC_FORCPU);
11320 11338 reply = (pMpi2DiagBufferPostReply_t)(mpt->m_reply_frame +
11321 11339 (cmd->cmd_rfm -
11322 11340 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
11323 11341
11324 11342 /*
11325 11343 * Get the reply message data
11326 11344 */
11327 11345 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
11328 11346 &reply->IOCStatus);
11329 11347 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
11330 11348 &reply->IOCLogInfo);
11331 11349 transfer_length = ddi_get32(mpt->m_acc_reply_frame_hdl,
11332 11350 &reply->TransferLength);
11333 11351
11334 11352 /*
11335 11353 * If post failed quit.
11336 11354 */
11337 11355 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
11338 11356 status = DDI_FAILURE;
11339 11357 NDBG13(("post FW Diag Buffer failed: IOCStatus=0x%x, "
11340 11358 "IOCLogInfo=0x%x, TransferLength=0x%x", iocstatus,
11341 11359 iocloginfo, transfer_length));
11342 11360 goto out;
11343 11361 }
11344 11362
11345 11363 /*
11346 11364 * Post was successful.
11347 11365 */
11348 11366 pBuffer->valid_data = TRUE;
11349 11367 pBuffer->owned_by_firmware = TRUE;
11350 11368 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11351 11369 status = DDI_SUCCESS;
11352 11370 }
11353 11371
11354 11372 out:
11355 11373 /*
11356 11374 * Put the reply frame back on the free queue, increment the free
11357 11375 * index, and write the new index to the free index register. But only
11358 11376 * if this reply is an ADDRESS reply.
11359 11377 */
11360 11378 if (post_flags & MPTSAS_ADDRESS_REPLY) {
11361 11379 ddi_put32(mpt->m_acc_free_queue_hdl,
11362 11380 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11363 11381 cmd->cmd_rfm);
11364 11382 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11365 11383 DDI_DMA_SYNC_FORDEV);
11366 11384 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11367 11385 mpt->m_free_index = 0;
11368 11386 }
11369 11387 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11370 11388 mpt->m_free_index);
11371 11389 }
11372 11390 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
11373 11391 mptsas_remove_cmd(mpt, cmd);
11374 11392 post_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11375 11393 }
11376 11394 if (post_flags & MPTSAS_REQUEST_POOL_CMD) {
11377 11395 mptsas_return_to_pool(mpt, cmd);
11378 11396 }
11379 11397
11380 11398 return (status);
11381 11399 }
11382 11400
11383 11401 static int
11384 11402 mptsas_release_fw_diag_buffer(mptsas_t *mpt,
11385 11403 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
11386 11404 uint32_t diag_type)
11387 11405 {
11388 11406 mptsas_diag_request_t diag;
11389 11407 int status, slot_num, rel_flags = 0;
11390 11408 mptsas_cmd_t *cmd = NULL;
11391 11409 struct scsi_pkt *pkt;
11392 11410 pMpi2DiagReleaseReply_t reply;
11393 11411 uint16_t iocstatus;
11394 11412 uint32_t iocloginfo;
11395 11413
11396 11414 /*
11397 11415 * If buffer is not enabled, just leave.
11398 11416 */
11399 11417 *return_code = MPTSAS_FW_DIAG_ERROR_RELEASE_FAILED;
11400 11418 if (!pBuffer->enabled) {
11401 11419 mptsas_log(mpt, CE_NOTE, "This buffer type is not supported "
11402 11420 "by the IOC");
11403 11421 status = DDI_FAILURE;
11404 11422 goto out;
11405 11423 }
11406 11424
11407 11425 /*
11408 11426 * Clear some flags initially.
11409 11427 */
11410 11428 pBuffer->force_release = FALSE;
11411 11429 pBuffer->valid_data = FALSE;
11412 11430 pBuffer->owned_by_firmware = FALSE;
11413 11431
11414 11432 /*
11415 11433 * Get a cmd buffer from the cmd buffer pool
11416 11434 */
11417 11435 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11418 11436 status = DDI_FAILURE;
11419 11437 mptsas_log(mpt, CE_NOTE, "command pool is full: Release FW "
11420 11438 "Diag");
11421 11439 goto out;
11422 11440 }
11423 11441 rel_flags |= MPTSAS_REQUEST_POOL_CMD;
11424 11442
11425 11443 bzero((caddr_t)cmd, sizeof (*cmd));
11426 11444 bzero((caddr_t)pkt, scsi_pkt_size());
11427 11445
11428 11446 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
11429 11447
11430 11448 diag.pBuffer = pBuffer;
11431 11449 diag.function = MPI2_FUNCTION_DIAG_RELEASE;
11432 11450
11433 11451 /*
11434 11452 * Form a blank cmd/pkt to store the acknowledgement message
11435 11453 */
11436 11454 pkt->pkt_ha_private = (opaque_t)&diag;
11437 11455 pkt->pkt_flags = FLAG_HEAD;
11438 11456 pkt->pkt_time = 60;
11439 11457 cmd->cmd_pkt = pkt;
11440 11458 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
11441 11459
11442 11460 /*
11443 11461 * Save the command in a slot
11444 11462 */
11445 11463 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11446 11464 /*
11447 11465 * Once passthru command get slot, set cmd_flags
11448 11466 * CFLAG_PREPARED.
11449 11467 */
11450 11468 cmd->cmd_flags |= CFLAG_PREPARED;
11451 11469 mptsas_start_diag(mpt, cmd);
11452 11470 } else {
11453 11471 mptsas_waitq_add(mpt, cmd);
11454 11472 }
11455 11473
11456 11474 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11457 11475 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
11458 11476 }
11459 11477
11460 11478 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11461 11479 status = DDI_FAILURE;
11462 11480 mptsas_log(mpt, CE_WARN, "Release FW Diag command timeout");
11463 11481 goto out;
11464 11482 }
11465 11483
11466 11484 /*
11467 11485 * cmd_rfm points to the reply message if a reply was given. Check the
11468 11486 * IOCStatus to make sure everything went OK with the FW diag request
11469 11487 * and set buffer flags.
11470 11488 */
11471 11489 if (cmd->cmd_rfm) {
11472 11490 rel_flags |= MPTSAS_ADDRESS_REPLY;
11473 11491 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11474 11492 DDI_DMA_SYNC_FORCPU);
11475 11493 reply = (pMpi2DiagReleaseReply_t)(mpt->m_reply_frame +
11476 11494 (cmd->cmd_rfm -
11477 11495 (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
11478 11496
11479 11497 /*
11480 11498 * Get the reply message data
11481 11499 */
11482 11500 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
11483 11501 &reply->IOCStatus);
11484 11502 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
11485 11503 &reply->IOCLogInfo);
11486 11504
11487 11505 /*
11488 11506 * If release failed quit.
11489 11507 */
11490 11508 if ((iocstatus != MPI2_IOCSTATUS_SUCCESS) ||
11491 11509 pBuffer->owned_by_firmware) {
11492 11510 status = DDI_FAILURE;
11493 11511 NDBG13(("release FW Diag Buffer failed: "
11494 11512 "IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus,
11495 11513 iocloginfo));
11496 11514 goto out;
11497 11515 }
11498 11516
11499 11517 /*
11500 11518 * Release was successful.
11501 11519 */
11502 11520 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11503 11521 status = DDI_SUCCESS;
11504 11522
11505 11523 /*
11506 11524 * If this was for an UNREGISTER diag type command, clear the
11507 11525 * unique ID.
11508 11526 */
11509 11527 if (diag_type == MPTSAS_FW_DIAG_TYPE_UNREGISTER) {
11510 11528 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
11511 11529 }
11512 11530 }
11513 11531
11514 11532 out:
11515 11533 /*
11516 11534 * Put the reply frame back on the free queue, increment the free
11517 11535 * index, and write the new index to the free index register. But only
11518 11536 * if this reply is an ADDRESS reply.
11519 11537 */
11520 11538 if (rel_flags & MPTSAS_ADDRESS_REPLY) {
11521 11539 ddi_put32(mpt->m_acc_free_queue_hdl,
11522 11540 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11523 11541 cmd->cmd_rfm);
11524 11542 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11525 11543 DDI_DMA_SYNC_FORDEV);
11526 11544 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11527 11545 mpt->m_free_index = 0;
11528 11546 }
11529 11547 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11530 11548 mpt->m_free_index);
11531 11549 }
11532 11550 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
11533 11551 mptsas_remove_cmd(mpt, cmd);
11534 11552 rel_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11535 11553 }
11536 11554 if (rel_flags & MPTSAS_REQUEST_POOL_CMD) {
11537 11555 mptsas_return_to_pool(mpt, cmd);
11538 11556 }
11539 11557
11540 11558 return (status);
11541 11559 }
11542 11560
11543 11561 static int
11544 11562 mptsas_diag_register(mptsas_t *mpt, mptsas_fw_diag_register_t *diag_register,
11545 11563 uint32_t *return_code)
11546 11564 {
11547 11565 mptsas_fw_diagnostic_buffer_t *pBuffer;
11548 11566 uint8_t extended_type, buffer_type, i;
11549 11567 uint32_t buffer_size;
11550 11568 uint32_t unique_id;
11551 11569 int status;
11552 11570
11553 11571 ASSERT(mutex_owned(&mpt->m_mutex));
11554 11572
11555 11573 extended_type = diag_register->ExtendedType;
11556 11574 buffer_type = diag_register->BufferType;
11557 11575 buffer_size = diag_register->RequestedBufferSize;
11558 11576 unique_id = diag_register->UniqueId;
11559 11577
11560 11578 /*
11561 11579 * Check for valid buffer type
11562 11580 */
11563 11581 if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
11564 11582 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11565 11583 return (DDI_FAILURE);
11566 11584 }
11567 11585
11568 11586 /*
11569 11587 * Get the current buffer and look up the unique ID. The unique ID
11570 11588 * should not be found. If it is, the ID is already in use.
11571 11589 */
11572 11590 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11573 11591 pBuffer = &mpt->m_fw_diag_buffer_list[buffer_type];
11574 11592 if (i != MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11575 11593 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11576 11594 return (DDI_FAILURE);
11577 11595 }
11578 11596
11579 11597 /*
11580 11598 * The buffer's unique ID should not be registered yet, and the given
11581 11599 * unique ID cannot be 0.
11582 11600 */
11583 11601 if ((pBuffer->unique_id != MPTSAS_FW_DIAG_INVALID_UID) ||
11584 11602 (unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
11585 11603 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11586 11604 return (DDI_FAILURE);
11587 11605 }
11588 11606
11589 11607 /*
11590 11608 * If this buffer is already posted as immediate, just change owner.
11591 11609 */
11592 11610 if (pBuffer->immediate && pBuffer->owned_by_firmware &&
11593 11611 (pBuffer->unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
11594 11612 pBuffer->immediate = FALSE;
11595 11613 pBuffer->unique_id = unique_id;
11596 11614 return (DDI_SUCCESS);
11597 11615 }
11598 11616
11599 11617 /*
11600 11618 * Post a new buffer after checking if it's enabled. The DMA buffer
11601 11619 * that is allocated will be contiguous (sgl_len = 1).
11602 11620 */
11603 11621 if (!pBuffer->enabled) {
11604 11622 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
11605 11623 return (DDI_FAILURE);
11606 11624 }
11607 11625 bzero(&pBuffer->buffer_data, sizeof (mptsas_dma_alloc_state_t));
11608 11626 pBuffer->buffer_data.size = buffer_size;
11609 11627 if (mptsas_dma_alloc(mpt, &pBuffer->buffer_data) != DDI_SUCCESS) {
11610 11628 mptsas_log(mpt, CE_WARN, "failed to alloc DMA resource for "
11611 11629 "diag buffer: size = %d bytes", buffer_size);
11612 11630 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
11613 11631 return (DDI_FAILURE);
11614 11632 }
11615 11633
11616 11634 /*
11617 11635 * Copy the given info to the diag buffer and post the buffer.
11618 11636 */
11619 11637 pBuffer->buffer_type = buffer_type;
11620 11638 pBuffer->immediate = FALSE;
11621 11639 if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
11622 11640 for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
11623 11641 i++) {
11624 11642 pBuffer->product_specific[i] =
11625 11643 diag_register->ProductSpecific[i];
11626 11644 }
11627 11645 }
11628 11646 pBuffer->extended_type = extended_type;
11629 11647 pBuffer->unique_id = unique_id;
11630 11648 status = mptsas_post_fw_diag_buffer(mpt, pBuffer, return_code);
11631 11649
11632 11650 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
11633 11651 DDI_SUCCESS) {
11634 11652 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed in "
11635 11653 "mptsas_diag_register.");
11636 11654 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11637 11655 status = DDI_FAILURE;
11638 11656 }
11639 11657
11640 11658 /*
11641 11659 * In case there was a failure, free the DMA buffer.
11642 11660 */
11643 11661 if (status == DDI_FAILURE) {
11644 11662 mptsas_dma_free(&pBuffer->buffer_data);
11645 11663 }
11646 11664
11647 11665 return (status);
11648 11666 }
11649 11667
11650 11668 static int
11651 11669 mptsas_diag_unregister(mptsas_t *mpt,
11652 11670 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
11653 11671 {
11654 11672 mptsas_fw_diagnostic_buffer_t *pBuffer;
11655 11673 uint8_t i;
11656 11674 uint32_t unique_id;
11657 11675 int status;
11658 11676
11659 11677 ASSERT(mutex_owned(&mpt->m_mutex));
11660 11678
11661 11679 unique_id = diag_unregister->UniqueId;
11662 11680
11663 11681 /*
11664 11682 * Get the current buffer and look up the unique ID. The unique ID
11665 11683 * should be there.
11666 11684 */
11667 11685 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11668 11686 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11669 11687 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11670 11688 return (DDI_FAILURE);
11671 11689 }
11672 11690
11673 11691 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11674 11692
11675 11693 /*
11676 11694 * Try to release the buffer from FW before freeing it. If release
11677 11695 * fails, don't free the DMA buffer in case FW tries to access it
11678 11696 * later. If buffer is not owned by firmware, can't release it.
11679 11697 */
11680 11698 if (!pBuffer->owned_by_firmware) {
11681 11699 status = DDI_SUCCESS;
11682 11700 } else {
11683 11701 status = mptsas_release_fw_diag_buffer(mpt, pBuffer,
11684 11702 return_code, MPTSAS_FW_DIAG_TYPE_UNREGISTER);
11685 11703 }
11686 11704
11687 11705 /*
11688 11706 * At this point, return the current status no matter what happens with
11689 11707 * the DMA buffer.
11690 11708 */
11691 11709 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
11692 11710 if (status == DDI_SUCCESS) {
11693 11711 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
11694 11712 DDI_SUCCESS) {
11695 11713 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed "
11696 11714 "in mptsas_diag_unregister.");
11697 11715 ddi_fm_service_impact(mpt->m_dip,
11698 11716 DDI_SERVICE_UNAFFECTED);
11699 11717 }
11700 11718 mptsas_dma_free(&pBuffer->buffer_data);
11701 11719 }
11702 11720
11703 11721 return (status);
11704 11722 }
11705 11723
11706 11724 static int
11707 11725 mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
11708 11726 uint32_t *return_code)
11709 11727 {
11710 11728 mptsas_fw_diagnostic_buffer_t *pBuffer;
11711 11729 uint8_t i;
11712 11730 uint32_t unique_id;
11713 11731
11714 11732 ASSERT(mutex_owned(&mpt->m_mutex));
11715 11733
11716 11734 unique_id = diag_query->UniqueId;
11717 11735
11718 11736 /*
11719 11737 * If ID is valid, query on ID.
11720 11738 * If ID is invalid, query on buffer type.
11721 11739 */
11722 11740 if (unique_id == MPTSAS_FW_DIAG_INVALID_UID) {
11723 11741 i = diag_query->BufferType;
11724 11742 if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
11725 11743 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11726 11744 return (DDI_FAILURE);
11727 11745 }
11728 11746 } else {
11729 11747 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11730 11748 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11731 11749 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11732 11750 return (DDI_FAILURE);
11733 11751 }
11734 11752 }
11735 11753
11736 11754 /*
11737 11755 * Fill query structure with the diag buffer info.
11738 11756 */
11739 11757 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11740 11758 diag_query->BufferType = pBuffer->buffer_type;
11741 11759 diag_query->ExtendedType = pBuffer->extended_type;
11742 11760 if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
11743 11761 for (i = 0; i < (sizeof (diag_query->ProductSpecific) / 4);
11744 11762 i++) {
11745 11763 diag_query->ProductSpecific[i] =
11746 11764 pBuffer->product_specific[i];
11747 11765 }
11748 11766 }
11749 11767 diag_query->TotalBufferSize = pBuffer->buffer_data.size;
11750 11768 diag_query->DriverAddedBufferSize = 0;
11751 11769 diag_query->UniqueId = pBuffer->unique_id;
11752 11770 diag_query->ApplicationFlags = 0;
11753 11771 diag_query->DiagnosticFlags = 0;
11754 11772
11755 11773 /*
11756 11774 * Set/Clear application flags
11757 11775 */
11758 11776 if (pBuffer->immediate) {
11759 11777 diag_query->ApplicationFlags &= ~MPTSAS_FW_DIAG_FLAG_APP_OWNED;
11760 11778 } else {
11761 11779 diag_query->ApplicationFlags |= MPTSAS_FW_DIAG_FLAG_APP_OWNED;
11762 11780 }
11763 11781 if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
11764 11782 diag_query->ApplicationFlags |=
11765 11783 MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
11766 11784 } else {
11767 11785 diag_query->ApplicationFlags &=
11768 11786 ~MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
11769 11787 }
11770 11788 if (pBuffer->owned_by_firmware) {
11771 11789 diag_query->ApplicationFlags |=
11772 11790 MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
11773 11791 } else {
11774 11792 diag_query->ApplicationFlags &=
11775 11793 ~MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
11776 11794 }
11777 11795
11778 11796 return (DDI_SUCCESS);
11779 11797 }
11780 11798
11781 11799 static int
11782 11800 mptsas_diag_read_buffer(mptsas_t *mpt,
11783 11801 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
11784 11802 uint32_t *return_code, int ioctl_mode)
11785 11803 {
11786 11804 mptsas_fw_diagnostic_buffer_t *pBuffer;
11787 11805 uint8_t i, *pData;
11788 11806 uint32_t unique_id, byte;
11789 11807 int status;
11790 11808
11791 11809 ASSERT(mutex_owned(&mpt->m_mutex));
11792 11810
11793 11811 unique_id = diag_read_buffer->UniqueId;
11794 11812
11795 11813 /*
11796 11814 * Get the current buffer and look up the unique ID. The unique ID
11797 11815 * should be there.
11798 11816 */
11799 11817 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11800 11818 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11801 11819 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11802 11820 return (DDI_FAILURE);
11803 11821 }
11804 11822
11805 11823 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11806 11824
11807 11825 /*
11808 11826 * Make sure requested read is within limits
11809 11827 */
11810 11828 if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
11811 11829 pBuffer->buffer_data.size) {
11812 11830 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11813 11831 return (DDI_FAILURE);
11814 11832 }
11815 11833
11816 11834 /*
11817 11835 * Copy the requested data from DMA to the diag_read_buffer. The DMA
11818 11836 * buffer that was allocated is one contiguous buffer.
11819 11837 */
11820 11838 pData = (uint8_t *)(pBuffer->buffer_data.memp +
11821 11839 diag_read_buffer->StartingOffset);
11822 11840 (void) ddi_dma_sync(pBuffer->buffer_data.handle, 0, 0,
11823 11841 DDI_DMA_SYNC_FORCPU);
11824 11842 for (byte = 0; byte < diag_read_buffer->BytesToRead; byte++) {
11825 11843 if (ddi_copyout(pData + byte, ioctl_buf + byte, 1, ioctl_mode)
11826 11844 != 0) {
11827 11845 return (DDI_FAILURE);
11828 11846 }
11829 11847 }
11830 11848 diag_read_buffer->Status = 0;
11831 11849
11832 11850 /*
11833 11851 * Set or clear the Force Release flag.
11834 11852 */
11835 11853 if (pBuffer->force_release) {
11836 11854 diag_read_buffer->Flags |= MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
11837 11855 } else {
11838 11856 diag_read_buffer->Flags &= ~MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
11839 11857 }
11840 11858
11841 11859 /*
11842 11860 * If buffer is to be reregistered, make sure it's not already owned by
11843 11861 * firmware first.
11844 11862 */
11845 11863 status = DDI_SUCCESS;
11846 11864 if (!pBuffer->owned_by_firmware) {
11847 11865 if (diag_read_buffer->Flags & MPTSAS_FW_DIAG_FLAG_REREGISTER) {
11848 11866 status = mptsas_post_fw_diag_buffer(mpt, pBuffer,
11849 11867 return_code);
11850 11868 }
11851 11869 }
11852 11870
11853 11871 return (status);
11854 11872 }
11855 11873
11856 11874 static int
11857 11875 mptsas_diag_release(mptsas_t *mpt, mptsas_fw_diag_release_t *diag_release,
11858 11876 uint32_t *return_code)
11859 11877 {
11860 11878 mptsas_fw_diagnostic_buffer_t *pBuffer;
11861 11879 uint8_t i;
11862 11880 uint32_t unique_id;
11863 11881 int status;
11864 11882
11865 11883 ASSERT(mutex_owned(&mpt->m_mutex));
11866 11884
11867 11885 unique_id = diag_release->UniqueId;
11868 11886
11869 11887 /*
11870 11888 * Get the current buffer and look up the unique ID. The unique ID
11871 11889 * should be there.
11872 11890 */
11873 11891 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11874 11892 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11875 11893 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11876 11894 return (DDI_FAILURE);
11877 11895 }
11878 11896
11879 11897 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11880 11898
11881 11899 /*
11882 11900 * If buffer is not owned by firmware, it's already been released.
11883 11901 */
11884 11902 if (!pBuffer->owned_by_firmware) {
11885 11903 *return_code = MPTSAS_FW_DIAG_ERROR_ALREADY_RELEASED;
11886 11904 return (DDI_FAILURE);
11887 11905 }
11888 11906
11889 11907 /*
11890 11908 * Release the buffer.
11891 11909 */
11892 11910 status = mptsas_release_fw_diag_buffer(mpt, pBuffer, return_code,
11893 11911 MPTSAS_FW_DIAG_TYPE_RELEASE);
11894 11912 return (status);
11895 11913 }
11896 11914
11897 11915 static int
11898 11916 mptsas_do_diag_action(mptsas_t *mpt, uint32_t action, uint8_t *diag_action,
11899 11917 uint32_t length, uint32_t *return_code, int ioctl_mode)
11900 11918 {
11901 11919 mptsas_fw_diag_register_t diag_register;
11902 11920 mptsas_fw_diag_unregister_t diag_unregister;
11903 11921 mptsas_fw_diag_query_t diag_query;
11904 11922 mptsas_diag_read_buffer_t diag_read_buffer;
11905 11923 mptsas_fw_diag_release_t diag_release;
11906 11924 int status = DDI_SUCCESS;
11907 11925 uint32_t original_return_code, read_buf_len;
11908 11926
11909 11927 ASSERT(mutex_owned(&mpt->m_mutex));
11910 11928
11911 11929 original_return_code = *return_code;
11912 11930 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11913 11931
11914 11932 switch (action) {
11915 11933 case MPTSAS_FW_DIAG_TYPE_REGISTER:
11916 11934 if (!length) {
11917 11935 *return_code =
11918 11936 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11919 11937 status = DDI_FAILURE;
11920 11938 break;
11921 11939 }
11922 11940 if (ddi_copyin(diag_action, &diag_register,
11923 11941 sizeof (diag_register), ioctl_mode) != 0) {
11924 11942 return (DDI_FAILURE);
11925 11943 }
11926 11944 status = mptsas_diag_register(mpt, &diag_register,
11927 11945 return_code);
11928 11946 break;
11929 11947
11930 11948 case MPTSAS_FW_DIAG_TYPE_UNREGISTER:
11931 11949 if (length < sizeof (diag_unregister)) {
11932 11950 *return_code =
11933 11951 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11934 11952 status = DDI_FAILURE;
11935 11953 break;
11936 11954 }
11937 11955 if (ddi_copyin(diag_action, &diag_unregister,
11938 11956 sizeof (diag_unregister), ioctl_mode) != 0) {
11939 11957 return (DDI_FAILURE);
11940 11958 }
11941 11959 status = mptsas_diag_unregister(mpt, &diag_unregister,
11942 11960 return_code);
11943 11961 break;
11944 11962
11945 11963 case MPTSAS_FW_DIAG_TYPE_QUERY:
11946 11964 if (length < sizeof (diag_query)) {
11947 11965 *return_code =
11948 11966 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11949 11967 status = DDI_FAILURE;
11950 11968 break;
11951 11969 }
11952 11970 if (ddi_copyin(diag_action, &diag_query,
11953 11971 sizeof (diag_query), ioctl_mode) != 0) {
11954 11972 return (DDI_FAILURE);
11955 11973 }
11956 11974 status = mptsas_diag_query(mpt, &diag_query,
11957 11975 return_code);
11958 11976 if (status == DDI_SUCCESS) {
11959 11977 if (ddi_copyout(&diag_query, diag_action,
11960 11978 sizeof (diag_query), ioctl_mode) != 0) {
11961 11979 return (DDI_FAILURE);
11962 11980 }
11963 11981 }
11964 11982 break;
11965 11983
11966 11984 case MPTSAS_FW_DIAG_TYPE_READ_BUFFER:
11967 11985 if (ddi_copyin(diag_action, &diag_read_buffer,
11968 11986 sizeof (diag_read_buffer) - 4, ioctl_mode) != 0) {
11969 11987 return (DDI_FAILURE);
11970 11988 }
11971 11989 read_buf_len = sizeof (diag_read_buffer) -
11972 11990 sizeof (diag_read_buffer.DataBuffer) +
11973 11991 diag_read_buffer.BytesToRead;
11974 11992 if (length < read_buf_len) {
11975 11993 *return_code =
11976 11994 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11977 11995 status = DDI_FAILURE;
11978 11996 break;
11979 11997 }
11980 11998 status = mptsas_diag_read_buffer(mpt,
11981 11999 &diag_read_buffer, diag_action +
11982 12000 sizeof (diag_read_buffer) - 4, return_code,
11983 12001 ioctl_mode);
11984 12002 if (status == DDI_SUCCESS) {
11985 12003 if (ddi_copyout(&diag_read_buffer, diag_action,
11986 12004 sizeof (diag_read_buffer) - 4, ioctl_mode)
11987 12005 != 0) {
11988 12006 return (DDI_FAILURE);
11989 12007 }
11990 12008 }
11991 12009 break;
11992 12010
11993 12011 case MPTSAS_FW_DIAG_TYPE_RELEASE:
11994 12012 if (length < sizeof (diag_release)) {
11995 12013 *return_code =
11996 12014 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11997 12015 status = DDI_FAILURE;
11998 12016 break;
11999 12017 }
12000 12018 if (ddi_copyin(diag_action, &diag_release,
12001 12019 sizeof (diag_release), ioctl_mode) != 0) {
12002 12020 return (DDI_FAILURE);
12003 12021 }
12004 12022 status = mptsas_diag_release(mpt, &diag_release,
12005 12023 return_code);
12006 12024 break;
12007 12025
12008 12026 default:
12009 12027 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12010 12028 status = DDI_FAILURE;
12011 12029 break;
12012 12030 }
12013 12031
12014 12032 if ((status == DDI_FAILURE) &&
12015 12033 (original_return_code == MPTSAS_FW_DIAG_NEW) &&
12016 12034 (*return_code != MPTSAS_FW_DIAG_ERROR_SUCCESS)) {
12017 12035 status = DDI_SUCCESS;
12018 12036 }
12019 12037
12020 12038 return (status);
12021 12039 }
12022 12040
12023 12041 static int
12024 12042 mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *user_data, int mode)
12025 12043 {
12026 12044 int status;
12027 12045 mptsas_diag_action_t driver_data;
12028 12046
12029 12047 ASSERT(mutex_owned(&mpt->m_mutex));
12030 12048
12031 12049 /*
12032 12050 * Copy the user data to a driver data buffer.
12033 12051 */
12034 12052 if (ddi_copyin(user_data, &driver_data, sizeof (mptsas_diag_action_t),
12035 12053 mode) == 0) {
12036 12054 /*
12037 12055 * Send diag action request if Action is valid
12038 12056 */
12039 12057 if (driver_data.Action == MPTSAS_FW_DIAG_TYPE_REGISTER ||
12040 12058 driver_data.Action == MPTSAS_FW_DIAG_TYPE_UNREGISTER ||
12041 12059 driver_data.Action == MPTSAS_FW_DIAG_TYPE_QUERY ||
12042 12060 driver_data.Action == MPTSAS_FW_DIAG_TYPE_READ_BUFFER ||
12043 12061 driver_data.Action == MPTSAS_FW_DIAG_TYPE_RELEASE) {
12044 12062 status = mptsas_do_diag_action(mpt, driver_data.Action,
12045 12063 (void *)(uintptr_t)driver_data.PtrDiagAction,
12046 12064 driver_data.Length, &driver_data.ReturnCode,
12047 12065 mode);
12048 12066 if (status == DDI_SUCCESS) {
12049 12067 if (ddi_copyout(&driver_data.ReturnCode,
12050 12068 &user_data->ReturnCode,
12051 12069 sizeof (user_data->ReturnCode), mode)
12052 12070 != 0) {
12053 12071 status = EFAULT;
12054 12072 } else {
12055 12073 status = 0;
12056 12074 }
12057 12075 } else {
12058 12076 status = EIO;
12059 12077 }
12060 12078 } else {
12061 12079 status = EINVAL;
12062 12080 }
12063 12081 } else {
12064 12082 status = EFAULT;
12065 12083 }
12066 12084
12067 12085 return (status);
12068 12086 }
12069 12087
12070 12088 /*
12071 12089 * This routine handles the "event query" ioctl.
12072 12090 */
12073 12091 static int
12074 12092 mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, int mode,
12075 12093 int *rval)
12076 12094 {
12077 12095 int status;
12078 12096 mptsas_event_query_t driverdata;
12079 12097 uint8_t i;
12080 12098
12081 12099 driverdata.Entries = MPTSAS_EVENT_QUEUE_SIZE;
12082 12100
12083 12101 mutex_enter(&mpt->m_mutex);
12084 12102 for (i = 0; i < 4; i++) {
12085 12103 driverdata.Types[i] = mpt->m_event_mask[i];
12086 12104 }
12087 12105 mutex_exit(&mpt->m_mutex);
12088 12106
12089 12107 if (ddi_copyout(&driverdata, data, sizeof (driverdata), mode) != 0) {
12090 12108 status = EFAULT;
12091 12109 } else {
12092 12110 *rval = MPTIOCTL_STATUS_GOOD;
12093 12111 status = 0;
12094 12112 }
12095 12113
12096 12114 return (status);
12097 12115 }
12098 12116
12099 12117 /*
12100 12118 * This routine handles the "event enable" ioctl.
12101 12119 */
12102 12120 static int
12103 12121 mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, int mode,
12104 12122 int *rval)
12105 12123 {
12106 12124 int status;
12107 12125 mptsas_event_enable_t driverdata;
12108 12126 uint8_t i;
12109 12127
12110 12128 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
12111 12129 mutex_enter(&mpt->m_mutex);
12112 12130 for (i = 0; i < 4; i++) {
12113 12131 mpt->m_event_mask[i] = driverdata.Types[i];
12114 12132 }
12115 12133 mutex_exit(&mpt->m_mutex);
12116 12134
12117 12135 *rval = MPTIOCTL_STATUS_GOOD;
12118 12136 status = 0;
12119 12137 } else {
12120 12138 status = EFAULT;
12121 12139 }
12122 12140 return (status);
12123 12141 }
12124 12142
12125 12143 /*
12126 12144 * This routine handles the "event report" ioctl.
12127 12145 */
12128 12146 static int
12129 12147 mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, int mode,
12130 12148 int *rval)
12131 12149 {
12132 12150 int status;
12133 12151 mptsas_event_report_t driverdata;
12134 12152
12135 12153 mutex_enter(&mpt->m_mutex);
12136 12154
12137 12155 if (ddi_copyin(&data->Size, &driverdata.Size, sizeof (driverdata.Size),
12138 12156 mode) == 0) {
12139 12157 if (driverdata.Size >= sizeof (mpt->m_events)) {
12140 12158 if (ddi_copyout(mpt->m_events, data->Events,
12141 12159 sizeof (mpt->m_events), mode) != 0) {
12142 12160 status = EFAULT;
12143 12161 } else {
12144 12162 if (driverdata.Size > sizeof (mpt->m_events)) {
12145 12163 driverdata.Size =
12146 12164 sizeof (mpt->m_events);
12147 12165 if (ddi_copyout(&driverdata.Size,
12148 12166 &data->Size,
12149 12167 sizeof (driverdata.Size),
12150 12168 mode) != 0) {
12151 12169 status = EFAULT;
12152 12170 } else {
12153 12171 *rval = MPTIOCTL_STATUS_GOOD;
12154 12172 status = 0;
12155 12173 }
12156 12174 } else {
12157 12175 *rval = MPTIOCTL_STATUS_GOOD;
12158 12176 status = 0;
12159 12177 }
12160 12178 }
12161 12179 } else {
12162 12180 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
12163 12181 status = 0;
12164 12182 }
12165 12183 } else {
12166 12184 status = EFAULT;
12167 12185 }
12168 12186
12169 12187 mutex_exit(&mpt->m_mutex);
12170 12188 return (status);
12171 12189 }
12172 12190
12173 12191 static void
12174 12192 mptsas_lookup_pci_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
12175 12193 {
12176 12194 int *reg_data;
12177 12195 uint_t reglen;
12178 12196
12179 12197 /*
12180 12198 * Lookup the 'reg' property and extract the other data
12181 12199 */
12182 12200 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
12183 12201 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
12184 12202 DDI_PROP_SUCCESS) {
12185 12203 /*
12186 12204 * Extract the PCI data from the 'reg' property first DWORD.
12187 12205 * The entry looks like the following:
12188 12206 * First DWORD:
12189 12207 * Bits 0 - 7 8-bit Register number
12190 12208 * Bits 8 - 10 3-bit Function number
12191 12209 * Bits 11 - 15 5-bit Device number
12192 12210 * Bits 16 - 23 8-bit Bus number
12193 12211 * Bits 24 - 25 2-bit Address Space type identifier
12194 12212 *
12195 12213 */
12196 12214 adapter_data->PciInformation.u.bits.BusNumber =
12197 12215 (reg_data[0] & 0x00FF0000) >> 16;
12198 12216 adapter_data->PciInformation.u.bits.DeviceNumber =
12199 12217 (reg_data[0] & 0x0000F800) >> 11;
12200 12218 adapter_data->PciInformation.u.bits.FunctionNumber =
12201 12219 (reg_data[0] & 0x00000700) >> 8;
12202 12220 ddi_prop_free((void *)reg_data);
12203 12221 } else {
12204 12222 /*
12205 12223 * If we can't determine the PCI data then we fill in FF's for
12206 12224 * the data to indicate this.
12207 12225 */
12208 12226 adapter_data->PCIDeviceHwId = 0xFFFFFFFF;
12209 12227 adapter_data->MpiPortNumber = 0xFFFFFFFF;
12210 12228 adapter_data->PciInformation.u.AsDWORD = 0xFFFFFFFF;
12211 12229 }
12212 12230
12213 12231 /*
12214 12232 * Saved in the mpt->m_fwversion
12215 12233 */
12216 12234 adapter_data->MpiFirmwareVersion = mpt->m_fwversion;
12217 12235 }
12218 12236
12219 12237 static void
12220 12238 mptsas_read_adapter_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
12221 12239 {
12222 12240 char *driver_verstr = MPTSAS_MOD_STRING;
12223 12241
12224 12242 mptsas_lookup_pci_data(mpt, adapter_data);
12225 12243 adapter_data->AdapterType = mpt->m_MPI25 ?
12226 12244 MPTIOCTL_ADAPTER_TYPE_SAS3 :
12227 12245 MPTIOCTL_ADAPTER_TYPE_SAS2;
12228 12246 adapter_data->PCIDeviceHwId = (uint32_t)mpt->m_devid;
12229 12247 adapter_data->PCIDeviceHwRev = (uint32_t)mpt->m_revid;
12230 12248 adapter_data->SubSystemId = (uint32_t)mpt->m_ssid;
12231 12249 adapter_data->SubsystemVendorId = (uint32_t)mpt->m_svid;
12232 12250 (void) strcpy((char *)&adapter_data->DriverVersion[0], driver_verstr);
12233 12251 adapter_data->BiosVersion = 0;
12234 12252 (void) mptsas_get_bios_page3(mpt, &adapter_data->BiosVersion);
12235 12253 }
12236 12254
12237 12255 static void
12238 12256 mptsas_read_pci_info(mptsas_t *mpt, mptsas_pci_info_t *pci_info)
12239 12257 {
12240 12258 int *reg_data, i;
12241 12259 uint_t reglen;
12242 12260
12243 12261 /*
12244 12262 * Lookup the 'reg' property and extract the other data
12245 12263 */
12246 12264 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
12247 12265 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
12248 12266 DDI_PROP_SUCCESS) {
12249 12267 /*
12250 12268 * Extract the PCI data from the 'reg' property first DWORD.
12251 12269 * The entry looks like the following:
12252 12270 * First DWORD:
12253 12271 * Bits 8 - 10 3-bit Function number
12254 12272 * Bits 11 - 15 5-bit Device number
12255 12273 * Bits 16 - 23 8-bit Bus number
12256 12274 */
12257 12275 pci_info->BusNumber = (reg_data[0] & 0x00FF0000) >> 16;
12258 12276 pci_info->DeviceNumber = (reg_data[0] & 0x0000F800) >> 11;
12259 12277 pci_info->FunctionNumber = (reg_data[0] & 0x00000700) >> 8;
12260 12278 ddi_prop_free((void *)reg_data);
12261 12279 } else {
12262 12280 /*
12263 12281 * If we can't determine the PCI info then we fill in FF's for
12264 12282 * the data to indicate this.
12265 12283 */
12266 12284 pci_info->BusNumber = 0xFFFFFFFF;
12267 12285 pci_info->DeviceNumber = 0xFF;
12268 12286 pci_info->FunctionNumber = 0xFF;
12269 12287 }
12270 12288
12271 12289 /*
12272 12290 * Now get the interrupt vector and the pci header. The vector can
12273 12291 * only be 0 right now. The header is the first 256 bytes of config
12274 12292 * space.
12275 12293 */
12276 12294 pci_info->InterruptVector = 0;
12277 12295 for (i = 0; i < sizeof (pci_info->PciHeader); i++) {
12278 12296 pci_info->PciHeader[i] = pci_config_get8(mpt->m_config_handle,
12279 12297 i);
12280 12298 }
12281 12299 }
12282 12300
12283 12301 static int
12284 12302 mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data, int mode)
12285 12303 {
12286 12304 int status = 0;
12287 12305 mptsas_reg_access_t driverdata;
12288 12306
12289 12307 mutex_enter(&mpt->m_mutex);
12290 12308 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
12291 12309 switch (driverdata.Command) {
12292 12310 /*
12293 12311 * IO access is not supported.
12294 12312 */
12295 12313 case REG_IO_READ:
12296 12314 case REG_IO_WRITE:
12297 12315 mptsas_log(mpt, CE_WARN, "IO access is not "
12298 12316 "supported. Use memory access.");
12299 12317 status = EINVAL;
12300 12318 break;
12301 12319
12302 12320 case REG_MEM_READ:
12303 12321 driverdata.RegData = ddi_get32(mpt->m_datap,
12304 12322 (uint32_t *)(void *)mpt->m_reg +
12305 12323 driverdata.RegOffset);
12306 12324 if (ddi_copyout(&driverdata.RegData,
12307 12325 &data->RegData,
12308 12326 sizeof (driverdata.RegData), mode) != 0) {
12309 12327 mptsas_log(mpt, CE_WARN, "Register "
12310 12328 "Read Failed");
12311 12329 status = EFAULT;
12312 12330 }
12313 12331 break;
12314 12332
12315 12333 case REG_MEM_WRITE:
12316 12334 ddi_put32(mpt->m_datap,
12317 12335 (uint32_t *)(void *)mpt->m_reg +
12318 12336 driverdata.RegOffset,
12319 12337 driverdata.RegData);
12320 12338 break;
12321 12339
12322 12340 default:
12323 12341 status = EINVAL;
12324 12342 break;
12325 12343 }
12326 12344 } else {
12327 12345 status = EFAULT;
12328 12346 }
12329 12347
12330 12348 mutex_exit(&mpt->m_mutex);
12331 12349 return (status);
12332 12350 }
12333 12351
12334 12352 static int
12335 12353 led_control(mptsas_t *mpt, intptr_t data, int mode)
12336 12354 {
12337 12355 int ret = 0;
12338 12356 mptsas_led_control_t lc;
12339 12357 mptsas_target_t *ptgt;
12340 12358
12341 12359 if (ddi_copyin((void *)data, &lc, sizeof (lc), mode) != 0) {
12342 12360 return (EFAULT);
12343 12361 }
12344 12362
12345 12363 if ((lc.Command != MPTSAS_LEDCTL_FLAG_SET &&
12346 12364 lc.Command != MPTSAS_LEDCTL_FLAG_GET) ||
12347 12365 lc.Led < MPTSAS_LEDCTL_LED_MIN ||
12348 12366 lc.Led > MPTSAS_LEDCTL_LED_MAX ||
12349 12367 (lc.Command == MPTSAS_LEDCTL_FLAG_SET && lc.LedStatus != 0 &&
12350 12368 lc.LedStatus != 1)) {
12351 12369 return (EINVAL);
12352 12370 }
12353 12371
12354 12372 if ((lc.Command == MPTSAS_LEDCTL_FLAG_SET && (mode & FWRITE) == 0) ||
12355 12373 (lc.Command == MPTSAS_LEDCTL_FLAG_GET && (mode & FREAD) == 0))
12356 12374 return (EACCES);
12357 12375
12358 12376 /* Locate the target we're interrogating... */
12359 12377 mutex_enter(&mpt->m_mutex);
12360 12378 ptgt = refhash_linear_search(mpt->m_targets,
12361 12379 mptsas_target_eval_slot, &lc);
12362 12380 if (ptgt == NULL) {
12363 12381 /* We could not find a target for that enclosure/slot. */
12364 12382 mutex_exit(&mpt->m_mutex);
12365 12383 return (ENOENT);
12366 12384 }
12367 12385
12368 12386 if (lc.Command == MPTSAS_LEDCTL_FLAG_SET) {
12369 12387 /* Update our internal LED state. */
12370 12388 ptgt->m_led_status &= ~(1 << (lc.Led - 1));
12371 12389 ptgt->m_led_status |= lc.LedStatus << (lc.Led - 1);
12372 12390
12373 12391 /* Flush it to the controller. */
12374 12392 ret = mptsas_flush_led_status(mpt, ptgt);
12375 12393 mutex_exit(&mpt->m_mutex);
12376 12394 return (ret);
12377 12395 }
12378 12396
12379 12397 /* Return our internal LED state. */
12380 12398 lc.LedStatus = (ptgt->m_led_status >> (lc.Led - 1)) & 1;
12381 12399 mutex_exit(&mpt->m_mutex);
12382 12400
12383 12401 if (ddi_copyout(&lc, (void *)data, sizeof (lc), mode) != 0) {
12384 12402 return (EFAULT);
12385 12403 }
12386 12404
12387 12405 return (0);
12388 12406 }
12389 12407
12390 12408 static int
12391 12409 get_disk_info(mptsas_t *mpt, intptr_t data, int mode)
12392 12410 {
12393 12411 uint16_t i = 0;
12394 12412 uint16_t count = 0;
12395 12413 int ret = 0;
12396 12414 mptsas_target_t *ptgt;
12397 12415 mptsas_disk_info_t *di;
12398 12416 STRUCT_DECL(mptsas_get_disk_info, gdi);
12399 12417
12400 12418 if ((mode & FREAD) == 0)
12401 12419 return (EACCES);
12402 12420
12403 12421 STRUCT_INIT(gdi, get_udatamodel());
12404 12422
12405 12423 if (ddi_copyin((void *)data, STRUCT_BUF(gdi), STRUCT_SIZE(gdi),
12406 12424 mode) != 0) {
12407 12425 return (EFAULT);
12408 12426 }
12409 12427
12410 12428 /* Find out how many targets there are. */
12411 12429 mutex_enter(&mpt->m_mutex);
12412 12430 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12413 12431 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12414 12432 count++;
12415 12433 }
12416 12434 mutex_exit(&mpt->m_mutex);
12417 12435
12418 12436 /*
12419 12437 * If we haven't been asked to copy out information on each target,
12420 12438 * then just return the count.
12421 12439 */
12422 12440 STRUCT_FSET(gdi, DiskCount, count);
12423 12441 if (STRUCT_FGETP(gdi, PtrDiskInfoArray) == NULL)
12424 12442 goto copy_out;
12425 12443
12426 12444 /*
12427 12445 * If we haven't been given a large enough buffer to copy out into,
12428 12446 * let the caller know.
12429 12447 */
12430 12448 if (STRUCT_FGET(gdi, DiskInfoArraySize) <
12431 12449 count * sizeof (mptsas_disk_info_t)) {
12432 12450 ret = ENOSPC;
12433 12451 goto copy_out;
12434 12452 }
12435 12453
12436 12454 di = kmem_zalloc(count * sizeof (mptsas_disk_info_t), KM_SLEEP);
12437 12455
12438 12456 mutex_enter(&mpt->m_mutex);
12439 12457 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12440 12458 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12441 12459 if (i >= count) {
12442 12460 /*
12443 12461 * The number of targets changed while we weren't
12444 12462 * looking, so give up.
12445 12463 */
12446 12464 refhash_rele(mpt->m_targets, ptgt);
12447 12465 mutex_exit(&mpt->m_mutex);
12448 12466 kmem_free(di, count * sizeof (mptsas_disk_info_t));
12449 12467 return (EAGAIN);
12450 12468 }
12451 12469 di[i].Instance = mpt->m_instance;
12452 12470 di[i].Enclosure = ptgt->m_enclosure;
12453 12471 di[i].Slot = ptgt->m_slot_num;
12454 12472 di[i].SasAddress = ptgt->m_addr.mta_wwn;
12455 12473 i++;
12456 12474 }
12457 12475 mutex_exit(&mpt->m_mutex);
12458 12476 STRUCT_FSET(gdi, DiskCount, i);
12459 12477
12460 12478 /* Copy out the disk information to the caller. */
12461 12479 if (ddi_copyout((void *)di, STRUCT_FGETP(gdi, PtrDiskInfoArray),
12462 12480 i * sizeof (mptsas_disk_info_t), mode) != 0) {
12463 12481 ret = EFAULT;
12464 12482 }
12465 12483
12466 12484 kmem_free(di, count * sizeof (mptsas_disk_info_t));
12467 12485
12468 12486 copy_out:
12469 12487 if (ddi_copyout(STRUCT_BUF(gdi), (void *)data, STRUCT_SIZE(gdi),
12470 12488 mode) != 0) {
12471 12489 ret = EFAULT;
12472 12490 }
12473 12491
12474 12492 return (ret);
12475 12493 }
12476 12494
12477 12495 static int
12478 12496 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
12479 12497 int *rval)
12480 12498 {
12481 12499 int status = 0;
12482 12500 mptsas_t *mpt;
12483 12501 mptsas_update_flash_t flashdata;
12484 12502 mptsas_pass_thru_t passthru_data;
12485 12503 mptsas_adapter_data_t adapter_data;
12486 12504 mptsas_pci_info_t pci_info;
12487 12505 int copylen;
12488 12506
12489 12507 int iport_flag = 0;
12490 12508 dev_info_t *dip = NULL;
12491 12509 mptsas_phymask_t phymask = 0;
12492 12510 struct devctl_iocdata *dcp = NULL;
12493 12511 char *addr = NULL;
12494 12512 mptsas_target_t *ptgt = NULL;
12495 12513
12496 12514 *rval = MPTIOCTL_STATUS_GOOD;
12497 12515 if (secpolicy_sys_config(credp, B_FALSE) != 0) {
12498 12516 return (EPERM);
12499 12517 }
12500 12518
12501 12519 mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
12502 12520 if (mpt == NULL) {
12503 12521 /*
12504 12522 * Called from iport node, get the states
12505 12523 */
12506 12524 iport_flag = 1;
12507 12525 dip = mptsas_get_dip_from_dev(dev, &phymask);
12508 12526 if (dip == NULL) {
12509 12527 return (ENXIO);
12510 12528 }
12511 12529 mpt = DIP2MPT(dip);
12512 12530 }
12513 12531 /* Make sure power level is D0 before accessing registers */
12514 12532 mutex_enter(&mpt->m_mutex);
12515 12533 if (mpt->m_options & MPTSAS_OPT_PM) {
12516 12534 (void) pm_busy_component(mpt->m_dip, 0);
12517 12535 if (mpt->m_power_level != PM_LEVEL_D0) {
12518 12536 mutex_exit(&mpt->m_mutex);
12519 12537 if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
12520 12538 DDI_SUCCESS) {
12521 12539 mptsas_log(mpt, CE_WARN,
12522 12540 "mptsas%d: mptsas_ioctl: Raise power "
12523 12541 "request failed.", mpt->m_instance);
12524 12542 (void) pm_idle_component(mpt->m_dip, 0);
12525 12543 return (ENXIO);
12526 12544 }
12527 12545 } else {
12528 12546 mutex_exit(&mpt->m_mutex);
12529 12547 }
12530 12548 } else {
12531 12549 mutex_exit(&mpt->m_mutex);
12532 12550 }
12533 12551
12534 12552 if (iport_flag) {
12535 12553 status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
12536 12554 if (status != 0) {
12537 12555 goto out;
12538 12556 }
12539 12557 /*
12540 12558 * The following code control the OK2RM LED, it doesn't affect
12541 12559 * the ioctl return status.
12542 12560 */
12543 12561 if ((cmd == DEVCTL_DEVICE_ONLINE) ||
12544 12562 (cmd == DEVCTL_DEVICE_OFFLINE)) {
12545 12563 if (ndi_dc_allochdl((void *)data, &dcp) !=
12546 12564 NDI_SUCCESS) {
12547 12565 goto out;
12548 12566 }
12549 12567 addr = ndi_dc_getaddr(dcp);
12550 12568 ptgt = mptsas_addr_to_ptgt(mpt, addr, phymask);
12551 12569 if (ptgt == NULL) {
12552 12570 NDBG14(("mptsas_ioctl led control: tgt %s not "
12553 12571 "found", addr));
12554 12572 ndi_dc_freehdl(dcp);
12555 12573 goto out;
12556 12574 }
12557 12575 mutex_enter(&mpt->m_mutex);
12558 12576 if (cmd == DEVCTL_DEVICE_ONLINE) {
12559 12577 ptgt->m_tgt_unconfigured = 0;
12560 12578 } else if (cmd == DEVCTL_DEVICE_OFFLINE) {
12561 12579 ptgt->m_tgt_unconfigured = 1;
12562 12580 }
12563 12581 if (cmd == DEVCTL_DEVICE_OFFLINE) {
12564 12582 ptgt->m_led_status |=
12565 12583 (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1));
12566 12584 } else {
12567 12585 ptgt->m_led_status &=
12568 12586 ~(1 << (MPTSAS_LEDCTL_LED_OK2RM - 1));
12569 12587 }
12570 12588 (void) mptsas_flush_led_status(mpt, ptgt);
12571 12589 mutex_exit(&mpt->m_mutex);
12572 12590 ndi_dc_freehdl(dcp);
12573 12591 }
12574 12592 goto out;
12575 12593 }
12576 12594 switch (cmd) {
12577 12595 case MPTIOCTL_GET_DISK_INFO:
12578 12596 status = get_disk_info(mpt, data, mode);
12579 12597 break;
12580 12598 case MPTIOCTL_LED_CONTROL:
12581 12599 status = led_control(mpt, data, mode);
12582 12600 break;
12583 12601 case MPTIOCTL_UPDATE_FLASH:
12584 12602 if (ddi_copyin((void *)data, &flashdata,
12585 12603 sizeof (struct mptsas_update_flash), mode)) {
12586 12604 status = EFAULT;
12587 12605 break;
12588 12606 }
12589 12607
12590 12608 mutex_enter(&mpt->m_mutex);
12591 12609 if (mptsas_update_flash(mpt,
12592 12610 (caddr_t)(long)flashdata.PtrBuffer,
12593 12611 flashdata.ImageSize, flashdata.ImageType, mode)) {
12594 12612 status = EFAULT;
12595 12613 }
12596 12614
12597 12615 /*
12598 12616 * Reset the chip to start using the new
12599 12617 * firmware. Reset if failed also.
12600 12618 */
12601 12619 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12602 12620 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
12603 12621 status = EFAULT;
12604 12622 }
12605 12623 mutex_exit(&mpt->m_mutex);
12606 12624 break;
12607 12625 case MPTIOCTL_PASS_THRU:
12608 12626 /*
12609 12627 * The user has requested to pass through a command to
12610 12628 * be executed by the MPT firmware. Call our routine
12611 12629 * which does this. Only allow one passthru IOCTL at
12612 12630 * one time. Other threads will block on
12613 12631 * m_passthru_mutex, which is of adaptive variant.
12614 12632 */
12615 12633 if (ddi_copyin((void *)data, &passthru_data,
12616 12634 sizeof (mptsas_pass_thru_t), mode)) {
12617 12635 status = EFAULT;
12618 12636 break;
12619 12637 }
12620 12638 mutex_enter(&mpt->m_passthru_mutex);
12621 12639 mutex_enter(&mpt->m_mutex);
12622 12640 status = mptsas_pass_thru(mpt, &passthru_data, mode);
12623 12641 mutex_exit(&mpt->m_mutex);
12624 12642 mutex_exit(&mpt->m_passthru_mutex);
12625 12643
12626 12644 break;
12627 12645 case MPTIOCTL_GET_ADAPTER_DATA:
12628 12646 /*
12629 12647 * The user has requested to read adapter data. Call
12630 12648 * our routine which does this.
12631 12649 */
12632 12650 bzero(&adapter_data, sizeof (mptsas_adapter_data_t));
12633 12651 if (ddi_copyin((void *)data, (void *)&adapter_data,
12634 12652 sizeof (mptsas_adapter_data_t), mode)) {
12635 12653 status = EFAULT;
12636 12654 break;
12637 12655 }
12638 12656 if (adapter_data.StructureLength >=
12639 12657 sizeof (mptsas_adapter_data_t)) {
12640 12658 adapter_data.StructureLength = (uint32_t)
12641 12659 sizeof (mptsas_adapter_data_t);
12642 12660 copylen = sizeof (mptsas_adapter_data_t);
12643 12661 mutex_enter(&mpt->m_mutex);
12644 12662 mptsas_read_adapter_data(mpt, &adapter_data);
12645 12663 mutex_exit(&mpt->m_mutex);
12646 12664 } else {
12647 12665 adapter_data.StructureLength = (uint32_t)
12648 12666 sizeof (mptsas_adapter_data_t);
12649 12667 copylen = sizeof (adapter_data.StructureLength);
12650 12668 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
12651 12669 }
12652 12670 if (ddi_copyout((void *)(&adapter_data), (void *)data,
12653 12671 copylen, mode) != 0) {
12654 12672 status = EFAULT;
12655 12673 }
12656 12674 break;
12657 12675 case MPTIOCTL_GET_PCI_INFO:
12658 12676 /*
12659 12677 * The user has requested to read pci info. Call
12660 12678 * our routine which does this.
12661 12679 */
12662 12680 bzero(&pci_info, sizeof (mptsas_pci_info_t));
12663 12681 mutex_enter(&mpt->m_mutex);
12664 12682 mptsas_read_pci_info(mpt, &pci_info);
12665 12683 mutex_exit(&mpt->m_mutex);
12666 12684 if (ddi_copyout((void *)(&pci_info), (void *)data,
12667 12685 sizeof (mptsas_pci_info_t), mode) != 0) {
12668 12686 status = EFAULT;
12669 12687 }
12670 12688 break;
12671 12689 case MPTIOCTL_RESET_ADAPTER:
12672 12690 mutex_enter(&mpt->m_mutex);
12673 12691 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12674 12692 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
12675 12693 mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
12676 12694 "failed");
12677 12695 status = EFAULT;
12678 12696 }
12679 12697 mutex_exit(&mpt->m_mutex);
12680 12698 break;
12681 12699 case MPTIOCTL_DIAG_ACTION:
12682 12700 /*
12683 12701 * The user has done a diag buffer action. Call our
12684 12702 * routine which does this. Only allow one diag action
12685 12703 * at one time.
12686 12704 */
12687 12705 mutex_enter(&mpt->m_mutex);
12688 12706 if (mpt->m_diag_action_in_progress) {
12689 12707 mutex_exit(&mpt->m_mutex);
12690 12708 return (EBUSY);
12691 12709 }
12692 12710 mpt->m_diag_action_in_progress = 1;
12693 12711 status = mptsas_diag_action(mpt,
12694 12712 (mptsas_diag_action_t *)data, mode);
12695 12713 mpt->m_diag_action_in_progress = 0;
12696 12714 mutex_exit(&mpt->m_mutex);
12697 12715 break;
12698 12716 case MPTIOCTL_EVENT_QUERY:
12699 12717 /*
12700 12718 * The user has done an event query. Call our routine
12701 12719 * which does this.
12702 12720 */
12703 12721 status = mptsas_event_query(mpt,
12704 12722 (mptsas_event_query_t *)data, mode, rval);
12705 12723 break;
12706 12724 case MPTIOCTL_EVENT_ENABLE:
12707 12725 /*
12708 12726 * The user has done an event enable. Call our routine
12709 12727 * which does this.
12710 12728 */
12711 12729 status = mptsas_event_enable(mpt,
12712 12730 (mptsas_event_enable_t *)data, mode, rval);
12713 12731 break;
12714 12732 case MPTIOCTL_EVENT_REPORT:
12715 12733 /*
12716 12734 * The user has done an event report. Call our routine
12717 12735 * which does this.
12718 12736 */
12719 12737 status = mptsas_event_report(mpt,
12720 12738 (mptsas_event_report_t *)data, mode, rval);
12721 12739 break;
12722 12740 case MPTIOCTL_REG_ACCESS:
12723 12741 /*
12724 12742 * The user has requested register access. Call our
12725 12743 * routine which does this.
12726 12744 */
12727 12745 status = mptsas_reg_access(mpt,
12728 12746 (mptsas_reg_access_t *)data, mode);
12729 12747 break;
↓ open down ↓ |
10817 lines elided |
↑ open up ↑ |
12730 12748 default:
12731 12749 status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
12732 12750 rval);
12733 12751 break;
12734 12752 }
12735 12753
12736 12754 out:
12737 12755 return (status);
12738 12756 }
12739 12757
12758 +/* Dirty wrapper for taskq */
12759 +void
12760 +mptsas_handle_restart_ioc(void *mpt) {
12761 + mptsas_restart_ioc((mptsas_t *) mpt);
12762 +}
12763 +
12740 12764 int
12741 12765 mptsas_restart_ioc(mptsas_t *mpt)
12742 12766 {
12743 12767 int rval = DDI_SUCCESS;
12744 12768 mptsas_target_t *ptgt = NULL;
12745 12769
12746 12770 ASSERT(mutex_owned(&mpt->m_mutex));
12747 12771
12748 12772 /*
12749 12773 * Set a flag telling I/O path that we're processing a reset. This is
12750 12774 * needed because after the reset is complete, the hash table still
12751 12775 * needs to be rebuilt. If I/Os are started before the hash table is
12752 12776 * rebuilt, I/O errors will occur. This flag allows I/Os to be marked
12753 12777 * so that they can be retried.
12754 12778 */
12755 12779 mpt->m_in_reset = TRUE;
12756 12780
12757 12781 /*
12758 12782 * Set all throttles to HOLD
12759 12783 */
12760 12784 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12761 12785 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12762 12786 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
12763 12787 }
12764 12788
12765 12789 /*
12766 12790 * Disable interrupts
12767 12791 */
12768 12792 MPTSAS_DISABLE_INTR(mpt);
12769 12793
12770 12794 /*
12771 12795 * Abort all commands: outstanding commands, commands in waitq and
12772 12796 * tx_waitq.
12773 12797 */
12774 12798 mptsas_flush_hba(mpt);
12775 12799
12776 12800 /*
12777 12801 * Reinitialize the chip.
12778 12802 */
12779 12803 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
12780 12804 rval = DDI_FAILURE;
12781 12805 }
12782 12806
12783 12807 /*
12784 12808 * Enable interrupts again
12785 12809 */
12786 12810 MPTSAS_ENABLE_INTR(mpt);
12787 12811
12788 12812 /*
12789 12813 * If mptsas_init_chip was successful, update the driver data.
12790 12814 */
12791 12815 if (rval == DDI_SUCCESS) {
12792 12816 mptsas_update_driver_data(mpt);
12793 12817 }
12794 12818
12795 12819 /*
12796 12820 * Reset the throttles
12797 12821 */
12798 12822 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12799 12823 ptgt = refhash_next(mpt->m_targets, ptgt)) {
12800 12824 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
12801 12825 }
12802 12826
12803 12827 mptsas_doneq_empty(mpt);
12804 12828 mptsas_restart_hba(mpt);
12805 12829
12806 12830 if (rval != DDI_SUCCESS) {
12807 12831 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
12808 12832 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
12809 12833 }
12810 12834
12811 12835 /*
12812 12836 * Clear the reset flag so that I/Os can continue.
12813 12837 */
12814 12838 mpt->m_in_reset = FALSE;
12815 12839
12816 12840 return (rval);
12817 12841 }
12818 12842
12819 12843 static int
12820 12844 mptsas_init_chip(mptsas_t *mpt, int first_time)
12821 12845 {
12822 12846 ddi_dma_cookie_t cookie;
12823 12847 uint32_t i;
12824 12848 int rval;
12825 12849
12826 12850 /*
12827 12851 * Check to see if the firmware image is valid
12828 12852 */
12829 12853 if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) &
12830 12854 MPI2_DIAG_FLASH_BAD_SIG) {
12831 12855 mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
12832 12856 goto fail;
12833 12857 }
12834 12858
12835 12859 /*
12836 12860 * Reset the chip
12837 12861 */
12838 12862 rval = mptsas_ioc_reset(mpt, first_time);
12839 12863 if (rval == MPTSAS_RESET_FAIL) {
12840 12864 mptsas_log(mpt, CE_WARN, "hard reset failed!");
12841 12865 goto fail;
12842 12866 }
12843 12867
12844 12868 if ((rval == MPTSAS_SUCCESS_MUR) && (!first_time)) {
12845 12869 goto mur;
12846 12870 }
12847 12871 /*
12848 12872 * Setup configuration space
12849 12873 */
12850 12874 if (mptsas_config_space_init(mpt) == FALSE) {
12851 12875 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
12852 12876 "failed!");
12853 12877 goto fail;
12854 12878 }
12855 12879
12856 12880 /*
12857 12881 * IOC facts can change after a diag reset so all buffers that are
12858 12882 * based on these numbers must be de-allocated and re-allocated. Get
12859 12883 * new IOC facts each time chip is initialized.
12860 12884 */
12861 12885 if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
12862 12886 mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed");
12863 12887 goto fail;
12864 12888 }
12865 12889
12866 12890 if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
12867 12891 goto fail;
12868 12892 }
12869 12893 /*
12870 12894 * Allocate request message frames, reply free queue, reply descriptor
12871 12895 * post queue, and reply message frames using latest IOC facts.
12872 12896 */
12873 12897 if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
12874 12898 mptsas_log(mpt, CE_WARN, "mptsas_alloc_request_frames failed");
12875 12899 goto fail;
12876 12900 }
12877 12901 if (mptsas_alloc_sense_bufs(mpt) == DDI_FAILURE) {
12878 12902 mptsas_log(mpt, CE_WARN, "mptsas_alloc_sense_bufs failed");
12879 12903 goto fail;
12880 12904 }
12881 12905 if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
12882 12906 mptsas_log(mpt, CE_WARN, "mptsas_alloc_free_queue failed!");
12883 12907 goto fail;
12884 12908 }
12885 12909 if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
12886 12910 mptsas_log(mpt, CE_WARN, "mptsas_alloc_post_queue failed!");
12887 12911 goto fail;
12888 12912 }
12889 12913 if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
12890 12914 mptsas_log(mpt, CE_WARN, "mptsas_alloc_reply_frames failed!");
12891 12915 goto fail;
12892 12916 }
12893 12917
12894 12918 mur:
12895 12919 /*
12896 12920 * Re-Initialize ioc to operational state
12897 12921 */
12898 12922 if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
12899 12923 mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
12900 12924 goto fail;
12901 12925 }
12902 12926
12903 12927 mptsas_alloc_reply_args(mpt);
12904 12928
12905 12929 /*
12906 12930 * Initialize reply post index. Reply free index is initialized after
12907 12931 * the next loop.
12908 12932 */
12909 12933 mpt->m_post_index = 0;
12910 12934
12911 12935 /*
12912 12936 * Initialize the Reply Free Queue with the physical addresses of our
12913 12937 * reply frames.
12914 12938 */
12915 12939 cookie.dmac_address = mpt->m_reply_frame_dma_addr & 0xffffffffu;
12916 12940 for (i = 0; i < mpt->m_max_replies; i++) {
12917 12941 ddi_put32(mpt->m_acc_free_queue_hdl,
12918 12942 &((uint32_t *)(void *)mpt->m_free_queue)[i],
12919 12943 cookie.dmac_address);
12920 12944 cookie.dmac_address += mpt->m_reply_frame_size;
12921 12945 }
12922 12946 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
12923 12947 DDI_DMA_SYNC_FORDEV);
12924 12948
12925 12949 /*
12926 12950 * Initialize the reply free index to one past the last frame on the
12927 12951 * queue. This will signify that the queue is empty to start with.
12928 12952 */
12929 12953 mpt->m_free_index = i;
12930 12954 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, i);
12931 12955
12932 12956 /*
12933 12957 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
12934 12958 */
12935 12959 for (i = 0; i < mpt->m_post_queue_depth; i++) {
12936 12960 ddi_put64(mpt->m_acc_post_queue_hdl,
12937 12961 &((uint64_t *)(void *)mpt->m_post_queue)[i],
12938 12962 0xFFFFFFFFFFFFFFFF);
12939 12963 }
12940 12964 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
12941 12965 DDI_DMA_SYNC_FORDEV);
12942 12966
12943 12967 /*
12944 12968 * Enable ports
12945 12969 */
12946 12970 if (mptsas_ioc_enable_port(mpt) == DDI_FAILURE) {
12947 12971 mptsas_log(mpt, CE_WARN, "mptsas_ioc_enable_port failed");
12948 12972 goto fail;
12949 12973 }
12950 12974
12951 12975 /*
12952 12976 * enable events
12953 12977 */
12954 12978 if (mptsas_ioc_enable_event_notification(mpt)) {
12955 12979 mptsas_log(mpt, CE_WARN,
12956 12980 "mptsas_ioc_enable_event_notification failed");
12957 12981 goto fail;
12958 12982 }
12959 12983
12960 12984 /*
12961 12985 * We need checks in attach and these.
12962 12986 * chip_init is called in mult. places
12963 12987 */
12964 12988
12965 12989 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
12966 12990 DDI_SUCCESS) ||
12967 12991 (mptsas_check_dma_handle(mpt->m_dma_req_sense_hdl) !=
12968 12992 DDI_SUCCESS) ||
12969 12993 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
12970 12994 DDI_SUCCESS) ||
12971 12995 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
12972 12996 DDI_SUCCESS) ||
12973 12997 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
12974 12998 DDI_SUCCESS) ||
12975 12999 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
12976 13000 DDI_SUCCESS)) {
12977 13001 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
12978 13002 goto fail;
12979 13003 }
12980 13004
12981 13005 /* Check all acc handles */
12982 13006 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
12983 13007 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
12984 13008 DDI_SUCCESS) ||
12985 13009 (mptsas_check_acc_handle(mpt->m_acc_req_sense_hdl) !=
12986 13010 DDI_SUCCESS) ||
12987 13011 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
12988 13012 DDI_SUCCESS) ||
12989 13013 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
12990 13014 DDI_SUCCESS) ||
12991 13015 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
12992 13016 DDI_SUCCESS) ||
12993 13017 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
12994 13018 DDI_SUCCESS) ||
12995 13019 (mptsas_check_acc_handle(mpt->m_config_handle) !=
12996 13020 DDI_SUCCESS)) {
12997 13021 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
12998 13022 goto fail;
12999 13023 }
13000 13024
13001 13025 return (DDI_SUCCESS);
13002 13026
13003 13027 fail:
13004 13028 return (DDI_FAILURE);
13005 13029 }
13006 13030
13007 13031 static int
13008 13032 mptsas_get_pci_cap(mptsas_t *mpt)
13009 13033 {
13010 13034 ushort_t caps_ptr, cap, cap_count;
13011 13035
13012 13036 if (mpt->m_config_handle == NULL)
13013 13037 return (FALSE);
13014 13038 /*
13015 13039 * Check if capabilities list is supported and if so,
13016 13040 * get initial capabilities pointer and clear bits 0,1.
13017 13041 */
13018 13042 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
13019 13043 & PCI_STAT_CAP) {
13020 13044 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
13021 13045 PCI_CONF_CAP_PTR), 4);
13022 13046 } else {
13023 13047 caps_ptr = PCI_CAP_NEXT_PTR_NULL;
13024 13048 }
13025 13049
13026 13050 /*
13027 13051 * Walk capabilities if supported.
13028 13052 */
13029 13053 for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
13030 13054
13031 13055 /*
13032 13056 * Check that we haven't exceeded the maximum number of
13033 13057 * capabilities and that the pointer is in a valid range.
13034 13058 */
13035 13059 if (++cap_count > 48) {
13036 13060 mptsas_log(mpt, CE_WARN,
13037 13061 "too many device capabilities.\n");
13038 13062 break;
13039 13063 }
13040 13064 if (caps_ptr < 64) {
13041 13065 mptsas_log(mpt, CE_WARN,
13042 13066 "capabilities pointer 0x%x out of range.\n",
13043 13067 caps_ptr);
13044 13068 break;
13045 13069 }
13046 13070
13047 13071 /*
13048 13072 * Get next capability and check that it is valid.
13049 13073 * For now, we only support power management.
13050 13074 */
13051 13075 cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
13052 13076 switch (cap) {
13053 13077 case PCI_CAP_ID_PM:
13054 13078 mptsas_log(mpt, CE_NOTE,
13055 13079 "?mptsas%d supports power management.\n",
13056 13080 mpt->m_instance);
13057 13081 mpt->m_options |= MPTSAS_OPT_PM;
13058 13082
13059 13083 /* Save PMCSR offset */
13060 13084 mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
13061 13085 break;
13062 13086 /*
13063 13087 * The following capabilities are valid. Any others
13064 13088 * will cause a message to be logged.
13065 13089 */
13066 13090 case PCI_CAP_ID_VPD:
13067 13091 case PCI_CAP_ID_MSI:
13068 13092 case PCI_CAP_ID_PCIX:
13069 13093 case PCI_CAP_ID_PCI_E:
13070 13094 case PCI_CAP_ID_MSI_X:
13071 13095 break;
13072 13096 default:
13073 13097 mptsas_log(mpt, CE_NOTE,
13074 13098 "?mptsas%d unrecognized capability "
13075 13099 "0x%x.\n", mpt->m_instance, cap);
13076 13100 break;
13077 13101 }
13078 13102
13079 13103 /*
13080 13104 * Get next capabilities pointer and clear bits 0,1.
13081 13105 */
13082 13106 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
13083 13107 (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
13084 13108 }
13085 13109 return (TRUE);
13086 13110 }
13087 13111
13088 13112 static int
13089 13113 mptsas_init_pm(mptsas_t *mpt)
13090 13114 {
13091 13115 char pmc_name[16];
13092 13116 char *pmc[] = {
13093 13117 NULL,
13094 13118 "0=Off (PCI D3 State)",
13095 13119 "3=On (PCI D0 State)",
13096 13120 NULL
13097 13121 };
13098 13122 uint16_t pmcsr_stat;
13099 13123
13100 13124 if (mptsas_get_pci_cap(mpt) == FALSE) {
13101 13125 return (DDI_FAILURE);
13102 13126 }
13103 13127 /*
13104 13128 * If PCI's capability does not support PM, then don't need
13105 13129 * to registe the pm-components
13106 13130 */
13107 13131 if (!(mpt->m_options & MPTSAS_OPT_PM))
13108 13132 return (DDI_SUCCESS);
13109 13133 /*
13110 13134 * If power management is supported by this chip, create
13111 13135 * pm-components property for the power management framework
13112 13136 */
13113 13137 (void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
13114 13138 pmc[0] = pmc_name;
13115 13139 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
13116 13140 "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
13117 13141 mpt->m_options &= ~MPTSAS_OPT_PM;
13118 13142 mptsas_log(mpt, CE_WARN,
13119 13143 "mptsas%d: pm-component property creation failed.",
13120 13144 mpt->m_instance);
13121 13145 return (DDI_FAILURE);
13122 13146 }
13123 13147
13124 13148 /*
13125 13149 * Power on device.
13126 13150 */
13127 13151 (void) pm_busy_component(mpt->m_dip, 0);
13128 13152 pmcsr_stat = pci_config_get16(mpt->m_config_handle,
13129 13153 mpt->m_pmcsr_offset);
13130 13154 if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
13131 13155 mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
13132 13156 mpt->m_instance);
13133 13157 pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
13134 13158 PCI_PMCSR_D0);
13135 13159 }
13136 13160 if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
13137 13161 mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
13138 13162 return (DDI_FAILURE);
13139 13163 }
13140 13164 mpt->m_power_level = PM_LEVEL_D0;
13141 13165 /*
13142 13166 * Set pm idle delay.
13143 13167 */
13144 13168 mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
13145 13169 mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
13146 13170
13147 13171 return (DDI_SUCCESS);
13148 13172 }
13149 13173
13150 13174 static int
13151 13175 mptsas_register_intrs(mptsas_t *mpt)
13152 13176 {
13153 13177 dev_info_t *dip;
13154 13178 int intr_types;
13155 13179
13156 13180 dip = mpt->m_dip;
13157 13181
13158 13182 /* Get supported interrupt types */
13159 13183 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
13160 13184 mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
13161 13185 "failed\n");
13162 13186 return (FALSE);
13163 13187 }
13164 13188
13165 13189 NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
13166 13190
13167 13191 /*
13168 13192 * Try MSI, but fall back to FIXED
13169 13193 */
13170 13194 if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
13171 13195 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
13172 13196 NDBG0(("Using MSI interrupt type"));
13173 13197 mpt->m_intr_type = DDI_INTR_TYPE_MSI;
13174 13198 return (TRUE);
13175 13199 }
13176 13200 }
13177 13201 if (intr_types & DDI_INTR_TYPE_FIXED) {
13178 13202 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
13179 13203 NDBG0(("Using FIXED interrupt type"));
13180 13204 mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
13181 13205 return (TRUE);
13182 13206 } else {
13183 13207 NDBG0(("FIXED interrupt registration failed"));
13184 13208 return (FALSE);
13185 13209 }
13186 13210 }
13187 13211
13188 13212 return (FALSE);
13189 13213 }
13190 13214
13191 13215 static void
13192 13216 mptsas_unregister_intrs(mptsas_t *mpt)
13193 13217 {
13194 13218 mptsas_rem_intrs(mpt);
13195 13219 }
13196 13220
13197 13221 /*
13198 13222 * mptsas_add_intrs:
13199 13223 *
13200 13224 * Register FIXED or MSI interrupts.
13201 13225 */
13202 13226 static int
13203 13227 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
13204 13228 {
13205 13229 dev_info_t *dip = mpt->m_dip;
13206 13230 int avail, actual, count = 0;
13207 13231 int i, flag, ret;
13208 13232
13209 13233 NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
13210 13234
13211 13235 /* Get number of interrupts */
13212 13236 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
13213 13237 if ((ret != DDI_SUCCESS) || (count <= 0)) {
13214 13238 mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
13215 13239 "ret %d count %d\n", ret, count);
13216 13240
13217 13241 return (DDI_FAILURE);
13218 13242 }
13219 13243
13220 13244 /* Get number of available interrupts */
13221 13245 ret = ddi_intr_get_navail(dip, intr_type, &avail);
13222 13246 if ((ret != DDI_SUCCESS) || (avail == 0)) {
13223 13247 mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
13224 13248 "ret %d avail %d\n", ret, avail);
13225 13249
13226 13250 return (DDI_FAILURE);
13227 13251 }
13228 13252
13229 13253 if (avail < count) {
13230 13254 mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, "
13231 13255 "navail() returned %d", count, avail);
13232 13256 }
13233 13257
13234 13258 /* Mpt only have one interrupt routine */
13235 13259 if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
13236 13260 count = 1;
13237 13261 }
13238 13262
13239 13263 /* Allocate an array of interrupt handles */
13240 13264 mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
13241 13265 mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
13242 13266
13243 13267 flag = DDI_INTR_ALLOC_NORMAL;
13244 13268
13245 13269 /* call ddi_intr_alloc() */
13246 13270 ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
13247 13271 count, &actual, flag);
13248 13272
13249 13273 if ((ret != DDI_SUCCESS) || (actual == 0)) {
13250 13274 mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d\n",
13251 13275 ret);
13252 13276 kmem_free(mpt->m_htable, mpt->m_intr_size);
13253 13277 return (DDI_FAILURE);
13254 13278 }
13255 13279
13256 13280 /* use interrupt count returned or abort? */
13257 13281 if (actual < count) {
13258 13282 mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d\n",
13259 13283 count, actual);
13260 13284 }
13261 13285
13262 13286 mpt->m_intr_cnt = actual;
13263 13287
13264 13288 /*
13265 13289 * Get priority for first msi, assume remaining are all the same
13266 13290 */
13267 13291 if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
13268 13292 &mpt->m_intr_pri)) != DDI_SUCCESS) {
13269 13293 mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d\n", ret);
13270 13294
13271 13295 /* Free already allocated intr */
13272 13296 for (i = 0; i < actual; i++) {
13273 13297 (void) ddi_intr_free(mpt->m_htable[i]);
13274 13298 }
13275 13299
13276 13300 kmem_free(mpt->m_htable, mpt->m_intr_size);
13277 13301 return (DDI_FAILURE);
13278 13302 }
13279 13303
13280 13304 /* Test for high level mutex */
13281 13305 if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
13282 13306 mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
13283 13307 "Hi level interrupt not supported\n");
13284 13308
13285 13309 /* Free already allocated intr */
13286 13310 for (i = 0; i < actual; i++) {
13287 13311 (void) ddi_intr_free(mpt->m_htable[i]);
13288 13312 }
13289 13313
13290 13314 kmem_free(mpt->m_htable, mpt->m_intr_size);
13291 13315 return (DDI_FAILURE);
13292 13316 }
13293 13317
13294 13318 /* Call ddi_intr_add_handler() */
13295 13319 for (i = 0; i < actual; i++) {
13296 13320 if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
13297 13321 (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
13298 13322 mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
13299 13323 "failed %d\n", ret);
13300 13324
13301 13325 /* Free already allocated intr */
13302 13326 for (i = 0; i < actual; i++) {
13303 13327 (void) ddi_intr_free(mpt->m_htable[i]);
13304 13328 }
13305 13329
13306 13330 kmem_free(mpt->m_htable, mpt->m_intr_size);
13307 13331 return (DDI_FAILURE);
13308 13332 }
13309 13333 }
13310 13334
13311 13335 if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
13312 13336 != DDI_SUCCESS) {
13313 13337 mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d\n", ret);
13314 13338
13315 13339 /* Free already allocated intr */
13316 13340 for (i = 0; i < actual; i++) {
13317 13341 (void) ddi_intr_free(mpt->m_htable[i]);
13318 13342 }
13319 13343
13320 13344 kmem_free(mpt->m_htable, mpt->m_intr_size);
13321 13345 return (DDI_FAILURE);
13322 13346 }
13323 13347
13324 13348 /*
13325 13349 * Enable interrupts
13326 13350 */
13327 13351 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
13328 13352 /* Call ddi_intr_block_enable() for MSI interrupts */
13329 13353 (void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
13330 13354 } else {
13331 13355 /* Call ddi_intr_enable for MSI or FIXED interrupts */
13332 13356 for (i = 0; i < mpt->m_intr_cnt; i++) {
13333 13357 (void) ddi_intr_enable(mpt->m_htable[i]);
13334 13358 }
13335 13359 }
13336 13360 return (DDI_SUCCESS);
13337 13361 }
13338 13362
13339 13363 /*
13340 13364 * mptsas_rem_intrs:
13341 13365 *
13342 13366 * Unregister FIXED or MSI interrupts
13343 13367 */
13344 13368 static void
13345 13369 mptsas_rem_intrs(mptsas_t *mpt)
13346 13370 {
13347 13371 int i;
13348 13372
13349 13373 NDBG6(("mptsas_rem_intrs"));
13350 13374
13351 13375 /* Disable all interrupts */
13352 13376 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
13353 13377 /* Call ddi_intr_block_disable() */
13354 13378 (void) ddi_intr_block_disable(mpt->m_htable, mpt->m_intr_cnt);
13355 13379 } else {
13356 13380 for (i = 0; i < mpt->m_intr_cnt; i++) {
13357 13381 (void) ddi_intr_disable(mpt->m_htable[i]);
13358 13382 }
13359 13383 }
13360 13384
13361 13385 /* Call ddi_intr_remove_handler() */
13362 13386 for (i = 0; i < mpt->m_intr_cnt; i++) {
13363 13387 (void) ddi_intr_remove_handler(mpt->m_htable[i]);
13364 13388 (void) ddi_intr_free(mpt->m_htable[i]);
13365 13389 }
13366 13390
13367 13391 kmem_free(mpt->m_htable, mpt->m_intr_size);
13368 13392 }
13369 13393
13370 13394 /*
13371 13395 * The IO fault service error handling callback function
13372 13396 */
13373 13397 /*ARGSUSED*/
13374 13398 static int
13375 13399 mptsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
13376 13400 {
13377 13401 /*
13378 13402 * as the driver can always deal with an error in any dma or
13379 13403 * access handle, we can just return the fme_status value.
13380 13404 */
13381 13405 pci_ereport_post(dip, err, NULL);
13382 13406 return (err->fme_status);
13383 13407 }
13384 13408
13385 13409 /*
13386 13410 * mptsas_fm_init - initialize fma capabilities and register with IO
13387 13411 * fault services.
13388 13412 */
13389 13413 static void
13390 13414 mptsas_fm_init(mptsas_t *mpt)
13391 13415 {
13392 13416 /*
13393 13417 * Need to change iblock to priority for new MSI intr
13394 13418 */
13395 13419 ddi_iblock_cookie_t fm_ibc;
13396 13420
13397 13421 /* Only register with IO Fault Services if we have some capability */
13398 13422 if (mpt->m_fm_capabilities) {
13399 13423 /* Adjust access and dma attributes for FMA */
13400 13424 mpt->m_reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
13401 13425 mpt->m_msg_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
13402 13426 mpt->m_io_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
13403 13427
13404 13428 /*
13405 13429 * Register capabilities with IO Fault Services.
13406 13430 * mpt->m_fm_capabilities will be updated to indicate
13407 13431 * capabilities actually supported (not requested.)
13408 13432 */
13409 13433 ddi_fm_init(mpt->m_dip, &mpt->m_fm_capabilities, &fm_ibc);
13410 13434
13411 13435 /*
13412 13436 * Initialize pci ereport capabilities if ereport
13413 13437 * capable (should always be.)
13414 13438 */
13415 13439 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
13416 13440 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13417 13441 pci_ereport_setup(mpt->m_dip);
13418 13442 }
13419 13443
13420 13444 /*
13421 13445 * Register error callback if error callback capable.
13422 13446 */
13423 13447 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13424 13448 ddi_fm_handler_register(mpt->m_dip,
13425 13449 mptsas_fm_error_cb, (void *) mpt);
13426 13450 }
13427 13451 }
13428 13452 }
13429 13453
13430 13454 /*
13431 13455 * mptsas_fm_fini - Releases fma capabilities and un-registers with IO
13432 13456 * fault services.
13433 13457 *
13434 13458 */
13435 13459 static void
13436 13460 mptsas_fm_fini(mptsas_t *mpt)
13437 13461 {
13438 13462 /* Only unregister FMA capabilities if registered */
13439 13463 if (mpt->m_fm_capabilities) {
13440 13464
13441 13465 /*
13442 13466 * Un-register error callback if error callback capable.
13443 13467 */
13444 13468
13445 13469 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13446 13470 ddi_fm_handler_unregister(mpt->m_dip);
13447 13471 }
13448 13472
13449 13473 /*
13450 13474 * Release any resources allocated by pci_ereport_setup()
13451 13475 */
13452 13476
13453 13477 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
13454 13478 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13455 13479 pci_ereport_teardown(mpt->m_dip);
13456 13480 }
13457 13481
13458 13482 /* Unregister from IO Fault Services */
13459 13483 ddi_fm_fini(mpt->m_dip);
13460 13484
13461 13485 /* Adjust access and dma attributes for FMA */
13462 13486 mpt->m_reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
13463 13487 mpt->m_msg_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
13464 13488 mpt->m_io_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
13465 13489
13466 13490 }
13467 13491 }
13468 13492
13469 13493 int
13470 13494 mptsas_check_acc_handle(ddi_acc_handle_t handle)
13471 13495 {
13472 13496 ddi_fm_error_t de;
13473 13497
13474 13498 if (handle == NULL)
13475 13499 return (DDI_FAILURE);
13476 13500 ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
13477 13501 return (de.fme_status);
13478 13502 }
13479 13503
13480 13504 int
13481 13505 mptsas_check_dma_handle(ddi_dma_handle_t handle)
13482 13506 {
13483 13507 ddi_fm_error_t de;
13484 13508
13485 13509 if (handle == NULL)
13486 13510 return (DDI_FAILURE);
13487 13511 ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
13488 13512 return (de.fme_status);
13489 13513 }
13490 13514
13491 13515 void
13492 13516 mptsas_fm_ereport(mptsas_t *mpt, char *detail)
13493 13517 {
13494 13518 uint64_t ena;
13495 13519 char buf[FM_MAX_CLASS];
13496 13520
13497 13521 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
13498 13522 ena = fm_ena_generate(0, FM_ENA_FMT1);
13499 13523 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities)) {
13500 13524 ddi_fm_ereport_post(mpt->m_dip, buf, ena, DDI_NOSLEEP,
13501 13525 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
13502 13526 }
13503 13527 }
13504 13528
13505 13529 static int
13506 13530 mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
13507 13531 uint16_t *dev_handle, mptsas_target_t **pptgt)
13508 13532 {
13509 13533 int rval;
13510 13534 uint32_t dev_info;
13511 13535 uint64_t sas_wwn;
13512 13536 mptsas_phymask_t phymask;
13513 13537 uint8_t physport, phynum, config, disk;
13514 13538 uint64_t devicename;
13515 13539 uint16_t pdev_hdl;
13516 13540 mptsas_target_t *tmp_tgt = NULL;
13517 13541 uint16_t bay_num, enclosure, io_flags;
13518 13542
13519 13543 ASSERT(*pptgt == NULL);
13520 13544
13521 13545 rval = mptsas_get_sas_device_page0(mpt, page_address, dev_handle,
13522 13546 &sas_wwn, &dev_info, &physport, &phynum, &pdev_hdl,
13523 13547 &bay_num, &enclosure, &io_flags);
13524 13548 if (rval != DDI_SUCCESS) {
13525 13549 rval = DEV_INFO_FAIL_PAGE0;
13526 13550 return (rval);
13527 13551 }
13528 13552
13529 13553 if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
13530 13554 MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13531 13555 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) == NULL) {
13532 13556 rval = DEV_INFO_WRONG_DEVICE_TYPE;
13533 13557 return (rval);
13534 13558 }
13535 13559
13536 13560 /*
13537 13561 * Check if the dev handle is for a Phys Disk. If so, set return value
13538 13562 * and exit. Don't add Phys Disks to hash.
13539 13563 */
13540 13564 for (config = 0; config < mpt->m_num_raid_configs; config++) {
13541 13565 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
13542 13566 if (*dev_handle == mpt->m_raidconfig[config].
13543 13567 m_physdisk_devhdl[disk]) {
13544 13568 rval = DEV_INFO_PHYS_DISK;
13545 13569 return (rval);
13546 13570 }
13547 13571 }
13548 13572 }
13549 13573
13550 13574 /*
13551 13575 * Get SATA Device Name from SAS device page0 for
13552 13576 * sata device, if device name doesn't exist, set mta_wwn to
13553 13577 * 0 for direct attached SATA. For the device behind the expander
13554 13578 * we still can use STP address assigned by expander.
13555 13579 */
13556 13580 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13557 13581 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
13558 13582 mutex_exit(&mpt->m_mutex);
13559 13583 /* alloc a tmp_tgt to send the cmd */
13560 13584 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target),
13561 13585 KM_SLEEP);
13562 13586 tmp_tgt->m_devhdl = *dev_handle;
13563 13587 tmp_tgt->m_deviceinfo = dev_info;
13564 13588 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
13565 13589 tmp_tgt->m_qfull_retry_interval =
13566 13590 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
13567 13591 tmp_tgt->m_t_throttle = MAX_THROTTLE;
13568 13592 devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
13569 13593 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
13570 13594 mutex_enter(&mpt->m_mutex);
13571 13595 if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
13572 13596 sas_wwn = devicename;
13573 13597 } else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
13574 13598 sas_wwn = 0;
13575 13599 }
13576 13600 }
13577 13601
13578 13602 phymask = mptsas_physport_to_phymask(mpt, physport);
13579 13603 *pptgt = mptsas_tgt_alloc(mpt, *dev_handle, sas_wwn,
13580 13604 dev_info, phymask, phynum);
13581 13605 if (*pptgt == NULL) {
13582 13606 mptsas_log(mpt, CE_WARN, "Failed to allocated target"
13583 13607 "structure!");
13584 13608 rval = DEV_INFO_FAIL_ALLOC;
13585 13609 return (rval);
13586 13610 }
13587 13611 (*pptgt)->m_io_flags = io_flags;
13588 13612 (*pptgt)->m_enclosure = enclosure;
13589 13613 (*pptgt)->m_slot_num = bay_num;
13590 13614 return (DEV_INFO_SUCCESS);
13591 13615 }
13592 13616
13593 13617 uint64_t
13594 13618 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
13595 13619 {
13596 13620 uint64_t sata_guid = 0, *pwwn = NULL;
13597 13621 int target = ptgt->m_devhdl;
13598 13622 uchar_t *inq83 = NULL;
13599 13623 int inq83_len = 0xFF;
13600 13624 uchar_t *dblk = NULL;
13601 13625 int inq83_retry = 3;
13602 13626 int rval = DDI_FAILURE;
13603 13627
13604 13628 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
13605 13629
13606 13630 inq83_retry:
13607 13631 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
13608 13632 inq83_len, NULL, 1);
13609 13633 if (rval != DDI_SUCCESS) {
13610 13634 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
13611 13635 "0x83 for target:%x, lun:%x failed!", target, lun);
13612 13636 goto out;
13613 13637 }
13614 13638 /* According to SAT2, the first descriptor is logic unit name */
13615 13639 dblk = &inq83[4];
13616 13640 if ((dblk[1] & 0x30) != 0) {
13617 13641 mptsas_log(mpt, CE_WARN, "!Descriptor is not lun associated.");
13618 13642 goto out;
13619 13643 }
13620 13644 pwwn = (uint64_t *)(void *)(&dblk[4]);
13621 13645 if ((dblk[4] & 0xf0) == 0x50) {
13622 13646 sata_guid = BE_64(*pwwn);
13623 13647 goto out;
13624 13648 } else if (dblk[4] == 'A') {
13625 13649 NDBG20(("SATA drive has no NAA format GUID."));
13626 13650 goto out;
13627 13651 } else {
13628 13652 /* The data is not ready, wait and retry */
13629 13653 inq83_retry--;
13630 13654 if (inq83_retry <= 0) {
13631 13655 goto out;
13632 13656 }
13633 13657 NDBG20(("The GUID is not ready, retry..."));
13634 13658 delay(1 * drv_usectohz(1000000));
13635 13659 goto inq83_retry;
13636 13660 }
13637 13661 out:
13638 13662 kmem_free(inq83, inq83_len);
13639 13663 return (sata_guid);
13640 13664 }
13641 13665
13642 13666 static int
13643 13667 mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, uchar_t page,
13644 13668 unsigned char *buf, int len, int *reallen, uchar_t evpd)
13645 13669 {
13646 13670 uchar_t cdb[CDB_GROUP0];
13647 13671 struct scsi_address ap;
13648 13672 struct buf *data_bp = NULL;
13649 13673 int resid = 0;
13650 13674 int ret = DDI_FAILURE;
13651 13675
13652 13676 ASSERT(len <= 0xffff);
13653 13677
13654 13678 ap.a_target = MPTSAS_INVALID_DEVHDL;
13655 13679 ap.a_lun = (uchar_t)(lun);
13656 13680 ap.a_hba_tran = mpt->m_tran;
13657 13681
13658 13682 data_bp = scsi_alloc_consistent_buf(&ap,
13659 13683 (struct buf *)NULL, len, B_READ, NULL_FUNC, NULL);
13660 13684 if (data_bp == NULL) {
13661 13685 return (ret);
13662 13686 }
13663 13687 bzero(cdb, CDB_GROUP0);
13664 13688 cdb[0] = SCMD_INQUIRY;
13665 13689 cdb[1] = evpd;
13666 13690 cdb[2] = page;
13667 13691 cdb[3] = (len & 0xff00) >> 8;
13668 13692 cdb[4] = (len & 0x00ff);
13669 13693 cdb[5] = 0;
13670 13694
13671 13695 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP0, data_bp,
13672 13696 &resid);
13673 13697 if (ret == DDI_SUCCESS) {
13674 13698 if (reallen) {
13675 13699 *reallen = len - resid;
13676 13700 }
13677 13701 bcopy((caddr_t)data_bp->b_un.b_addr, buf, len);
13678 13702 }
13679 13703 if (data_bp) {
13680 13704 scsi_free_consistent_buf(data_bp);
13681 13705 }
13682 13706 return (ret);
13683 13707 }
13684 13708
13685 13709 static int
13686 13710 mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
13687 13711 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
13688 13712 int *resid)
13689 13713 {
13690 13714 struct scsi_pkt *pktp = NULL;
13691 13715 scsi_hba_tran_t *tran_clone = NULL;
13692 13716 mptsas_tgt_private_t *tgt_private = NULL;
13693 13717 int ret = DDI_FAILURE;
13694 13718
13695 13719 /*
13696 13720 * scsi_hba_tran_t->tran_tgt_private is used to pass the address
13697 13721 * information to scsi_init_pkt, allocate a scsi_hba_tran structure
13698 13722 * to simulate the cmds from sd
13699 13723 */
13700 13724 tran_clone = kmem_alloc(
13701 13725 sizeof (scsi_hba_tran_t), KM_SLEEP);
13702 13726 if (tran_clone == NULL) {
13703 13727 goto out;
13704 13728 }
13705 13729 bcopy((caddr_t)mpt->m_tran,
13706 13730 (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
13707 13731 tgt_private = kmem_alloc(
13708 13732 sizeof (mptsas_tgt_private_t), KM_SLEEP);
13709 13733 if (tgt_private == NULL) {
13710 13734 goto out;
13711 13735 }
13712 13736 tgt_private->t_lun = ap->a_lun;
13713 13737 tgt_private->t_private = ptgt;
13714 13738 tran_clone->tran_tgt_private = tgt_private;
13715 13739 ap->a_hba_tran = tran_clone;
13716 13740
13717 13741 pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
13718 13742 data_bp, cdblen, sizeof (struct scsi_arq_status),
13719 13743 0, PKT_CONSISTENT, NULL, NULL);
13720 13744 if (pktp == NULL) {
13721 13745 goto out;
13722 13746 }
13723 13747 bcopy(cdb, pktp->pkt_cdbp, cdblen);
13724 13748 pktp->pkt_flags = FLAG_NOPARITY;
13725 13749 if (scsi_poll(pktp) < 0) {
13726 13750 goto out;
13727 13751 }
13728 13752 if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
13729 13753 goto out;
13730 13754 }
13731 13755 if (resid != NULL) {
13732 13756 *resid = pktp->pkt_resid;
13733 13757 }
13734 13758
13735 13759 ret = DDI_SUCCESS;
13736 13760 out:
13737 13761 if (pktp) {
13738 13762 scsi_destroy_pkt(pktp);
13739 13763 }
13740 13764 if (tran_clone) {
13741 13765 kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
13742 13766 }
13743 13767 if (tgt_private) {
13744 13768 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
13745 13769 }
13746 13770 return (ret);
13747 13771 }
13748 13772 static int
13749 13773 mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, int *lun)
13750 13774 {
13751 13775 char *cp = NULL;
13752 13776 char *ptr = NULL;
13753 13777 size_t s = 0;
13754 13778 char *wwid_str = NULL;
13755 13779 char *lun_str = NULL;
13756 13780 long lunnum;
13757 13781 long phyid = -1;
13758 13782 int rc = DDI_FAILURE;
13759 13783
13760 13784 ptr = name;
13761 13785 ASSERT(ptr[0] == 'w' || ptr[0] == 'p');
13762 13786 ptr++;
13763 13787 if ((cp = strchr(ptr, ',')) == NULL) {
13764 13788 return (DDI_FAILURE);
13765 13789 }
13766 13790
13767 13791 wwid_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13768 13792 s = (uintptr_t)cp - (uintptr_t)ptr;
13769 13793
13770 13794 bcopy(ptr, wwid_str, s);
13771 13795 wwid_str[s] = '\0';
13772 13796
13773 13797 ptr = ++cp;
13774 13798
13775 13799 if ((cp = strchr(ptr, '\0')) == NULL) {
13776 13800 goto out;
13777 13801 }
13778 13802 lun_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13779 13803 s = (uintptr_t)cp - (uintptr_t)ptr;
13780 13804
13781 13805 bcopy(ptr, lun_str, s);
13782 13806 lun_str[s] = '\0';
13783 13807
13784 13808 if (name[0] == 'p') {
13785 13809 rc = ddi_strtol(wwid_str, NULL, 0x10, &phyid);
13786 13810 } else {
13787 13811 rc = scsi_wwnstr_to_wwn(wwid_str, wwid);
13788 13812 }
13789 13813 if (rc != DDI_SUCCESS)
13790 13814 goto out;
13791 13815
13792 13816 if (phyid != -1) {
13793 13817 ASSERT(phyid < MPTSAS_MAX_PHYS);
13794 13818 *phy = (uint8_t)phyid;
13795 13819 }
13796 13820 rc = ddi_strtol(lun_str, NULL, 0x10, &lunnum);
13797 13821 if (rc != 0)
13798 13822 goto out;
13799 13823
13800 13824 *lun = (int)lunnum;
13801 13825 rc = DDI_SUCCESS;
13802 13826 out:
13803 13827 if (wwid_str)
13804 13828 kmem_free(wwid_str, SCSI_MAXNAMELEN);
13805 13829 if (lun_str)
13806 13830 kmem_free(lun_str, SCSI_MAXNAMELEN);
13807 13831
13808 13832 return (rc);
13809 13833 }
13810 13834
13811 13835 /*
13812 13836 * mptsas_parse_smp_name() is to parse sas wwn string
13813 13837 * which format is "wWWN"
13814 13838 */
13815 13839 static int
13816 13840 mptsas_parse_smp_name(char *name, uint64_t *wwn)
13817 13841 {
13818 13842 char *ptr = name;
13819 13843
13820 13844 if (*ptr != 'w') {
13821 13845 return (DDI_FAILURE);
13822 13846 }
13823 13847
13824 13848 ptr++;
13825 13849 if (scsi_wwnstr_to_wwn(ptr, wwn)) {
13826 13850 return (DDI_FAILURE);
13827 13851 }
13828 13852 return (DDI_SUCCESS);
13829 13853 }
13830 13854
13831 13855 static int
13832 13856 mptsas_bus_config(dev_info_t *pdip, uint_t flag,
13833 13857 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
13834 13858 {
13835 13859 int ret = NDI_FAILURE;
13836 13860 int circ = 0;
13837 13861 int circ1 = 0;
13838 13862 mptsas_t *mpt;
13839 13863 char *ptr = NULL;
13840 13864 char *devnm = NULL;
13841 13865 uint64_t wwid = 0;
13842 13866 uint8_t phy = 0xFF;
13843 13867 int lun = 0;
13844 13868 uint_t mflags = flag;
13845 13869 int bconfig = TRUE;
13846 13870
13847 13871 if (scsi_hba_iport_unit_address(pdip) == 0) {
13848 13872 return (DDI_FAILURE);
13849 13873 }
13850 13874
13851 13875 mpt = DIP2MPT(pdip);
13852 13876 if (!mpt) {
13853 13877 return (DDI_FAILURE);
13854 13878 }
13855 13879 /*
13856 13880 * Hold the nexus across the bus_config
13857 13881 */
13858 13882 ndi_devi_enter(scsi_vhci_dip, &circ);
13859 13883 ndi_devi_enter(pdip, &circ1);
13860 13884 switch (op) {
13861 13885 case BUS_CONFIG_ONE:
13862 13886 /* parse wwid/target name out of name given */
13863 13887 if ((ptr = strchr((char *)arg, '@')) == NULL) {
13864 13888 ret = NDI_FAILURE;
13865 13889 break;
13866 13890 }
13867 13891 ptr++;
13868 13892 if (strncmp((char *)arg, "smp", 3) == 0) {
13869 13893 /*
13870 13894 * This is a SMP target device
13871 13895 */
13872 13896 ret = mptsas_parse_smp_name(ptr, &wwid);
13873 13897 if (ret != DDI_SUCCESS) {
13874 13898 ret = NDI_FAILURE;
13875 13899 break;
13876 13900 }
13877 13901 ret = mptsas_config_smp(pdip, wwid, childp);
13878 13902 } else if ((ptr[0] == 'w') || (ptr[0] == 'p')) {
13879 13903 /*
13880 13904 * OBP could pass down a non-canonical form
13881 13905 * bootpath without LUN part when LUN is 0.
13882 13906 * So driver need adjust the string.
13883 13907 */
13884 13908 if (strchr(ptr, ',') == NULL) {
13885 13909 devnm = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13886 13910 (void) sprintf(devnm, "%s,0", (char *)arg);
13887 13911 ptr = strchr(devnm, '@');
13888 13912 ptr++;
13889 13913 }
13890 13914
13891 13915 /*
13892 13916 * The device path is wWWID format and the device
13893 13917 * is not SMP target device.
13894 13918 */
13895 13919 ret = mptsas_parse_address(ptr, &wwid, &phy, &lun);
13896 13920 if (ret != DDI_SUCCESS) {
13897 13921 ret = NDI_FAILURE;
13898 13922 break;
13899 13923 }
13900 13924 *childp = NULL;
13901 13925 if (ptr[0] == 'w') {
13902 13926 ret = mptsas_config_one_addr(pdip, wwid,
13903 13927 lun, childp);
13904 13928 } else if (ptr[0] == 'p') {
13905 13929 ret = mptsas_config_one_phy(pdip, phy, lun,
13906 13930 childp);
13907 13931 }
13908 13932
13909 13933 /*
13910 13934 * If this is CD/DVD device in OBP path, the
13911 13935 * ndi_busop_bus_config can be skipped as config one
13912 13936 * operation is done above.
13913 13937 */
13914 13938 if ((ret == NDI_SUCCESS) && (*childp != NULL) &&
13915 13939 (strcmp(ddi_node_name(*childp), "cdrom") == 0) &&
13916 13940 (strncmp((char *)arg, "disk", 4) == 0)) {
13917 13941 bconfig = FALSE;
13918 13942 ndi_hold_devi(*childp);
13919 13943 }
13920 13944 } else {
13921 13945 ret = NDI_FAILURE;
13922 13946 break;
13923 13947 }
13924 13948
13925 13949 /*
13926 13950 * DDI group instructed us to use this flag.
13927 13951 */
13928 13952 mflags |= NDI_MDI_FALLBACK;
13929 13953 break;
13930 13954 case BUS_CONFIG_DRIVER:
13931 13955 case BUS_CONFIG_ALL:
13932 13956 mptsas_config_all(pdip);
13933 13957 ret = NDI_SUCCESS;
13934 13958 break;
13935 13959 }
13936 13960
13937 13961 if ((ret == NDI_SUCCESS) && bconfig) {
13938 13962 ret = ndi_busop_bus_config(pdip, mflags, op,
13939 13963 (devnm == NULL) ? arg : devnm, childp, 0);
13940 13964 }
13941 13965
13942 13966 ndi_devi_exit(pdip, circ1);
13943 13967 ndi_devi_exit(scsi_vhci_dip, circ);
13944 13968 if (devnm != NULL)
13945 13969 kmem_free(devnm, SCSI_MAXNAMELEN);
13946 13970 return (ret);
13947 13971 }
13948 13972
13949 13973 static int
13950 13974 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
13951 13975 mptsas_target_t *ptgt)
13952 13976 {
13953 13977 int rval = DDI_FAILURE;
13954 13978 struct scsi_inquiry *sd_inq = NULL;
13955 13979 mptsas_t *mpt = DIP2MPT(pdip);
13956 13980
13957 13981 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
13958 13982
13959 13983 rval = mptsas_inquiry(mpt, ptgt, lun, 0, (uchar_t *)sd_inq,
13960 13984 SUN_INQSIZE, 0, (uchar_t)0);
13961 13985
13962 13986 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
13963 13987 rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
13964 13988 } else {
13965 13989 rval = DDI_FAILURE;
13966 13990 }
13967 13991
13968 13992 kmem_free(sd_inq, SUN_INQSIZE);
13969 13993 return (rval);
13970 13994 }
13971 13995
13972 13996 static int
13973 13997 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
13974 13998 dev_info_t **lundip)
13975 13999 {
13976 14000 int rval;
13977 14001 mptsas_t *mpt = DIP2MPT(pdip);
13978 14002 int phymask;
13979 14003 mptsas_target_t *ptgt = NULL;
13980 14004
13981 14005 /*
13982 14006 * Get the physical port associated to the iport
13983 14007 */
13984 14008 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13985 14009 "phymask", 0);
13986 14010
13987 14011 ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
13988 14012 if (ptgt == NULL) {
13989 14013 /*
13990 14014 * didn't match any device by searching
13991 14015 */
13992 14016 return (DDI_FAILURE);
13993 14017 }
13994 14018 /*
13995 14019 * If the LUN already exists and the status is online,
13996 14020 * we just return the pointer to dev_info_t directly.
13997 14021 * For the mdi_pathinfo node, we'll handle it in
13998 14022 * mptsas_create_virt_lun()
13999 14023 * TODO should be also in mptsas_handle_dr
14000 14024 */
14001 14025
14002 14026 *lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
14003 14027 if (*lundip != NULL) {
14004 14028 /*
14005 14029 * TODO Another senario is, we hotplug the same disk
14006 14030 * on the same slot, the devhdl changed, is this
14007 14031 * possible?
14008 14032 * tgt_private->t_private != ptgt
14009 14033 */
14010 14034 if (sasaddr != ptgt->m_addr.mta_wwn) {
14011 14035 /*
14012 14036 * The device has changed although the devhdl is the
14013 14037 * same (Enclosure mapping mode, change drive on the
14014 14038 * same slot)
14015 14039 */
14016 14040 return (DDI_FAILURE);
14017 14041 }
14018 14042 return (DDI_SUCCESS);
14019 14043 }
14020 14044
14021 14045 if (phymask == 0) {
14022 14046 /*
14023 14047 * Configure IR volume
14024 14048 */
14025 14049 rval = mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
14026 14050 return (rval);
14027 14051 }
14028 14052 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
14029 14053
14030 14054 return (rval);
14031 14055 }
14032 14056
14033 14057 static int
14034 14058 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
14035 14059 dev_info_t **lundip)
14036 14060 {
14037 14061 int rval;
14038 14062 mptsas_t *mpt = DIP2MPT(pdip);
14039 14063 mptsas_phymask_t phymask;
14040 14064 mptsas_target_t *ptgt = NULL;
14041 14065
14042 14066 /*
14043 14067 * Get the physical port associated to the iport
14044 14068 */
14045 14069 phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14046 14070 "phymask", 0);
14047 14071
14048 14072 ptgt = mptsas_phy_to_tgt(mpt, phymask, phy);
14049 14073 if (ptgt == NULL) {
14050 14074 /*
14051 14075 * didn't match any device by searching
14052 14076 */
14053 14077 return (DDI_FAILURE);
14054 14078 }
14055 14079
14056 14080 /*
14057 14081 * If the LUN already exists and the status is online,
14058 14082 * we just return the pointer to dev_info_t directly.
14059 14083 * For the mdi_pathinfo node, we'll handle it in
14060 14084 * mptsas_create_virt_lun().
14061 14085 */
14062 14086
14063 14087 *lundip = mptsas_find_child_phy(pdip, phy);
14064 14088 if (*lundip != NULL) {
14065 14089 return (DDI_SUCCESS);
14066 14090 }
14067 14091
14068 14092 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
14069 14093
14070 14094 return (rval);
14071 14095 }
14072 14096
14073 14097 static int
14074 14098 mptsas_retrieve_lundata(int lun_cnt, uint8_t *buf, uint16_t *lun_num,
14075 14099 uint8_t *lun_addr_type)
14076 14100 {
14077 14101 uint32_t lun_idx = 0;
14078 14102
14079 14103 ASSERT(lun_num != NULL);
14080 14104 ASSERT(lun_addr_type != NULL);
14081 14105
14082 14106 lun_idx = (lun_cnt + 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
14083 14107 /* determine report luns addressing type */
14084 14108 switch (buf[lun_idx] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) {
14085 14109 /*
14086 14110 * Vendors in the field have been found to be concatenating
14087 14111 * bus/target/lun to equal the complete lun value instead
14088 14112 * of switching to flat space addressing
14089 14113 */
14090 14114 /* 00b - peripheral device addressing method */
14091 14115 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL:
14092 14116 /* FALLTHRU */
14093 14117 /* 10b - logical unit addressing method */
14094 14118 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT:
14095 14119 /* FALLTHRU */
14096 14120 /* 01b - flat space addressing method */
14097 14121 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE:
14098 14122 /* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
14099 14123 *lun_addr_type = (buf[lun_idx] &
14100 14124 MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) >> 6;
14101 14125 *lun_num = (buf[lun_idx] & 0x3F) << 8;
14102 14126 *lun_num |= buf[lun_idx + 1];
14103 14127 return (DDI_SUCCESS);
14104 14128 default:
14105 14129 return (DDI_FAILURE);
14106 14130 }
14107 14131 }
14108 14132
14109 14133 static int
14110 14134 mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt)
14111 14135 {
14112 14136 struct buf *repluns_bp = NULL;
14113 14137 struct scsi_address ap;
14114 14138 uchar_t cdb[CDB_GROUP5];
14115 14139 int ret = DDI_FAILURE;
14116 14140 int retry = 0;
14117 14141 int lun_list_len = 0;
14118 14142 uint16_t lun_num = 0;
14119 14143 uint8_t lun_addr_type = 0;
14120 14144 uint32_t lun_cnt = 0;
14121 14145 uint32_t lun_total = 0;
14122 14146 dev_info_t *cdip = NULL;
14123 14147 uint16_t *saved_repluns = NULL;
14124 14148 char *buffer = NULL;
14125 14149 int buf_len = 128;
14126 14150 mptsas_t *mpt = DIP2MPT(pdip);
14127 14151 uint64_t sas_wwn = 0;
14128 14152 uint8_t phy = 0xFF;
14129 14153 uint32_t dev_info = 0;
14130 14154
14131 14155 mutex_enter(&mpt->m_mutex);
14132 14156 sas_wwn = ptgt->m_addr.mta_wwn;
14133 14157 phy = ptgt->m_phynum;
14134 14158 dev_info = ptgt->m_deviceinfo;
14135 14159 mutex_exit(&mpt->m_mutex);
14136 14160
14137 14161 if (sas_wwn == 0) {
14138 14162 /*
14139 14163 * It's a SATA without Device Name
14140 14164 * So don't try multi-LUNs
14141 14165 */
14142 14166 if (mptsas_find_child_phy(pdip, phy)) {
14143 14167 return (DDI_SUCCESS);
14144 14168 } else {
14145 14169 /*
14146 14170 * need configure and create node
14147 14171 */
14148 14172 return (DDI_FAILURE);
14149 14173 }
14150 14174 }
14151 14175
14152 14176 /*
14153 14177 * WWN (SAS address or Device Name exist)
14154 14178 */
14155 14179 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14156 14180 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14157 14181 /*
14158 14182 * SATA device with Device Name
14159 14183 * So don't try multi-LUNs
14160 14184 */
14161 14185 if (mptsas_find_child_addr(pdip, sas_wwn, 0)) {
14162 14186 return (DDI_SUCCESS);
14163 14187 } else {
14164 14188 return (DDI_FAILURE);
14165 14189 }
14166 14190 }
14167 14191
14168 14192 do {
14169 14193 ap.a_target = MPTSAS_INVALID_DEVHDL;
14170 14194 ap.a_lun = 0;
14171 14195 ap.a_hba_tran = mpt->m_tran;
14172 14196 repluns_bp = scsi_alloc_consistent_buf(&ap,
14173 14197 (struct buf *)NULL, buf_len, B_READ, NULL_FUNC, NULL);
14174 14198 if (repluns_bp == NULL) {
14175 14199 retry++;
14176 14200 continue;
14177 14201 }
14178 14202 bzero(cdb, CDB_GROUP5);
14179 14203 cdb[0] = SCMD_REPORT_LUNS;
14180 14204 cdb[6] = (buf_len & 0xff000000) >> 24;
14181 14205 cdb[7] = (buf_len & 0x00ff0000) >> 16;
14182 14206 cdb[8] = (buf_len & 0x0000ff00) >> 8;
14183 14207 cdb[9] = (buf_len & 0x000000ff);
14184 14208
14185 14209 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP5,
14186 14210 repluns_bp, NULL);
14187 14211 if (ret != DDI_SUCCESS) {
14188 14212 scsi_free_consistent_buf(repluns_bp);
14189 14213 retry++;
14190 14214 continue;
14191 14215 }
14192 14216 lun_list_len = BE_32(*(int *)((void *)(
14193 14217 repluns_bp->b_un.b_addr)));
14194 14218 if (buf_len >= lun_list_len + 8) {
14195 14219 ret = DDI_SUCCESS;
14196 14220 break;
14197 14221 }
14198 14222 scsi_free_consistent_buf(repluns_bp);
14199 14223 buf_len = lun_list_len + 8;
14200 14224
14201 14225 } while (retry < 3);
14202 14226
14203 14227 if (ret != DDI_SUCCESS)
14204 14228 return (ret);
14205 14229 buffer = (char *)repluns_bp->b_un.b_addr;
14206 14230 /*
14207 14231 * find out the number of luns returned by the SCSI ReportLun call
14208 14232 * and allocate buffer space
14209 14233 */
14210 14234 lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
14211 14235 saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
14212 14236 if (saved_repluns == NULL) {
14213 14237 scsi_free_consistent_buf(repluns_bp);
14214 14238 return (DDI_FAILURE);
14215 14239 }
14216 14240 for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
14217 14241 if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
14218 14242 &lun_num, &lun_addr_type) != DDI_SUCCESS) {
14219 14243 continue;
14220 14244 }
14221 14245 saved_repluns[lun_cnt] = lun_num;
14222 14246 if (cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num))
14223 14247 ret = DDI_SUCCESS;
14224 14248 else
14225 14249 ret = mptsas_probe_lun(pdip, lun_num, &cdip,
14226 14250 ptgt);
14227 14251 if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
14228 14252 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
14229 14253 MPTSAS_DEV_GONE);
14230 14254 }
14231 14255 }
14232 14256 mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
14233 14257 kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
14234 14258 scsi_free_consistent_buf(repluns_bp);
14235 14259 return (DDI_SUCCESS);
14236 14260 }
14237 14261
14238 14262 static int
14239 14263 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
14240 14264 {
14241 14265 int rval = DDI_FAILURE;
14242 14266 struct scsi_inquiry *sd_inq = NULL;
14243 14267 mptsas_t *mpt = DIP2MPT(pdip);
14244 14268 mptsas_target_t *ptgt = NULL;
14245 14269
14246 14270 mutex_enter(&mpt->m_mutex);
14247 14271 ptgt = refhash_linear_search(mpt->m_targets,
14248 14272 mptsas_target_eval_devhdl, &target);
14249 14273 mutex_exit(&mpt->m_mutex);
14250 14274 if (ptgt == NULL) {
14251 14275 mptsas_log(mpt, CE_WARN, "Volume with VolDevHandle of 0x%x "
14252 14276 "not found.", target);
14253 14277 return (rval);
14254 14278 }
14255 14279
14256 14280 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
14257 14281 rval = mptsas_inquiry(mpt, ptgt, 0, 0, (uchar_t *)sd_inq,
14258 14282 SUN_INQSIZE, 0, (uchar_t)0);
14259 14283
14260 14284 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
14261 14285 rval = mptsas_create_phys_lun(pdip, sd_inq, NULL, dip, ptgt,
14262 14286 0);
14263 14287 } else {
14264 14288 rval = DDI_FAILURE;
14265 14289 }
14266 14290
14267 14291 kmem_free(sd_inq, SUN_INQSIZE);
14268 14292 return (rval);
14269 14293 }
14270 14294
14271 14295 /*
14272 14296 * configure all RAID volumes for virtual iport
14273 14297 */
14274 14298 static void
14275 14299 mptsas_config_all_viport(dev_info_t *pdip)
14276 14300 {
14277 14301 mptsas_t *mpt = DIP2MPT(pdip);
14278 14302 int config, vol;
14279 14303 int target;
14280 14304 dev_info_t *lundip = NULL;
14281 14305
14282 14306 /*
14283 14307 * Get latest RAID info and search for any Volume DevHandles. If any
14284 14308 * are found, configure the volume.
14285 14309 */
14286 14310 mutex_enter(&mpt->m_mutex);
14287 14311 for (config = 0; config < mpt->m_num_raid_configs; config++) {
14288 14312 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
14289 14313 if (mpt->m_raidconfig[config].m_raidvol[vol].m_israid
14290 14314 == 1) {
14291 14315 target = mpt->m_raidconfig[config].
14292 14316 m_raidvol[vol].m_raidhandle;
14293 14317 mutex_exit(&mpt->m_mutex);
14294 14318 (void) mptsas_config_raid(pdip, target,
14295 14319 &lundip);
14296 14320 mutex_enter(&mpt->m_mutex);
14297 14321 }
14298 14322 }
14299 14323 }
14300 14324 mutex_exit(&mpt->m_mutex);
14301 14325 }
14302 14326
14303 14327 static void
14304 14328 mptsas_offline_missed_luns(dev_info_t *pdip, uint16_t *repluns,
14305 14329 int lun_cnt, mptsas_target_t *ptgt)
14306 14330 {
14307 14331 dev_info_t *child = NULL, *savechild = NULL;
14308 14332 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
14309 14333 uint64_t sas_wwn, wwid;
14310 14334 uint8_t phy;
14311 14335 int lun;
14312 14336 int i;
14313 14337 int find;
14314 14338 char *addr;
14315 14339 char *nodename;
14316 14340 mptsas_t *mpt = DIP2MPT(pdip);
14317 14341
14318 14342 mutex_enter(&mpt->m_mutex);
14319 14343 wwid = ptgt->m_addr.mta_wwn;
14320 14344 mutex_exit(&mpt->m_mutex);
14321 14345
14322 14346 child = ddi_get_child(pdip);
14323 14347 while (child) {
14324 14348 find = 0;
14325 14349 savechild = child;
14326 14350 child = ddi_get_next_sibling(child);
14327 14351
14328 14352 nodename = ddi_node_name(savechild);
14329 14353 if (strcmp(nodename, "smp") == 0) {
14330 14354 continue;
14331 14355 }
14332 14356
14333 14357 addr = ddi_get_name_addr(savechild);
14334 14358 if (addr == NULL) {
14335 14359 continue;
14336 14360 }
14337 14361
14338 14362 if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
14339 14363 DDI_SUCCESS) {
14340 14364 continue;
14341 14365 }
14342 14366
14343 14367 if (wwid == sas_wwn) {
14344 14368 for (i = 0; i < lun_cnt; i++) {
14345 14369 if (repluns[i] == lun) {
14346 14370 find = 1;
14347 14371 break;
14348 14372 }
14349 14373 }
14350 14374 } else {
14351 14375 continue;
14352 14376 }
14353 14377 if (find == 0) {
14354 14378 /*
14355 14379 * The lun has not been there already
14356 14380 */
14357 14381 (void) mptsas_offline_lun(pdip, savechild, NULL,
14358 14382 NDI_DEVI_REMOVE);
14359 14383 }
14360 14384 }
14361 14385
14362 14386 pip = mdi_get_next_client_path(pdip, NULL);
14363 14387 while (pip) {
14364 14388 find = 0;
14365 14389 savepip = pip;
14366 14390 addr = MDI_PI(pip)->pi_addr;
14367 14391
14368 14392 pip = mdi_get_next_client_path(pdip, pip);
14369 14393
14370 14394 if (addr == NULL) {
14371 14395 continue;
14372 14396 }
14373 14397
14374 14398 if (mptsas_parse_address(addr, &sas_wwn, &phy,
14375 14399 &lun) != DDI_SUCCESS) {
14376 14400 continue;
14377 14401 }
14378 14402
14379 14403 if (sas_wwn == wwid) {
14380 14404 for (i = 0; i < lun_cnt; i++) {
14381 14405 if (repluns[i] == lun) {
14382 14406 find = 1;
14383 14407 break;
14384 14408 }
14385 14409 }
14386 14410 } else {
14387 14411 continue;
14388 14412 }
14389 14413
14390 14414 if (find == 0) {
14391 14415 /*
14392 14416 * The lun has not been there already
14393 14417 */
14394 14418 (void) mptsas_offline_lun(pdip, NULL, savepip,
14395 14419 NDI_DEVI_REMOVE);
14396 14420 }
14397 14421 }
14398 14422 }
14399 14423
14400 14424 void
14401 14425 mptsas_update_hashtab(struct mptsas *mpt)
14402 14426 {
14403 14427 uint32_t page_address;
14404 14428 int rval = 0;
14405 14429 uint16_t dev_handle;
14406 14430 mptsas_target_t *ptgt = NULL;
14407 14431 mptsas_smp_t smp_node;
14408 14432
14409 14433 /*
14410 14434 * Get latest RAID info.
14411 14435 */
14412 14436 (void) mptsas_get_raid_info(mpt);
14413 14437
14414 14438 dev_handle = mpt->m_smp_devhdl;
14415 14439 for (; mpt->m_done_traverse_smp == 0; ) {
14416 14440 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
14417 14441 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)dev_handle;
14418 14442 if (mptsas_get_sas_expander_page0(mpt, page_address, &smp_node)
14419 14443 != DDI_SUCCESS) {
14420 14444 break;
14421 14445 }
14422 14446 mpt->m_smp_devhdl = dev_handle = smp_node.m_devhdl;
14423 14447 (void) mptsas_smp_alloc(mpt, &smp_node);
14424 14448 }
14425 14449
14426 14450 /*
14427 14451 * Config target devices
14428 14452 */
14429 14453 dev_handle = mpt->m_dev_handle;
14430 14454
14431 14455 /*
14432 14456 * Do loop to get sas device page 0 by GetNextHandle till the
14433 14457 * the last handle. If the sas device is a SATA/SSP target,
14434 14458 * we try to config it.
14435 14459 */
14436 14460 for (; mpt->m_done_traverse_dev == 0; ) {
14437 14461 ptgt = NULL;
14438 14462 page_address =
14439 14463 (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
14440 14464 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14441 14465 (uint32_t)dev_handle;
14442 14466 rval = mptsas_get_target_device_info(mpt, page_address,
14443 14467 &dev_handle, &ptgt);
14444 14468 if ((rval == DEV_INFO_FAIL_PAGE0) ||
14445 14469 (rval == DEV_INFO_FAIL_ALLOC)) {
14446 14470 break;
14447 14471 }
14448 14472
14449 14473 mpt->m_dev_handle = dev_handle;
14450 14474 }
14451 14475
14452 14476 }
14453 14477
14454 14478 void
14455 14479 mptsas_update_driver_data(struct mptsas *mpt)
14456 14480 {
14457 14481 mptsas_target_t *tp;
14458 14482 mptsas_smp_t *sp;
14459 14483
14460 14484 ASSERT(MUTEX_HELD(&mpt->m_mutex));
14461 14485
14462 14486 /*
14463 14487 * TODO after hard reset, update the driver data structures
14464 14488 * 1. update port/phymask mapping table mpt->m_phy_info
14465 14489 * 2. invalid all the entries in hash table
14466 14490 * m_devhdl = 0xffff and m_deviceinfo = 0
14467 14491 * 3. call sas_device_page/expander_page to update hash table
14468 14492 */
14469 14493 mptsas_update_phymask(mpt);
14470 14494
14471 14495 /*
14472 14496 * Remove all the devhdls for existing entries but leave their
14473 14497 * addresses alone. In update_hashtab() below, we'll find all
14474 14498 * targets that are still present and reassociate them with
14475 14499 * their potentially new devhdls. Leaving the targets around in
14476 14500 * this fashion allows them to be used on the tx waitq even
14477 14501 * while IOC reset is occurring.
14478 14502 */
14479 14503 for (tp = refhash_first(mpt->m_targets); tp != NULL;
14480 14504 tp = refhash_next(mpt->m_targets, tp)) {
14481 14505 tp->m_devhdl = MPTSAS_INVALID_DEVHDL;
14482 14506 tp->m_deviceinfo = 0;
14483 14507 tp->m_dr_flag = MPTSAS_DR_INACTIVE;
14484 14508 }
14485 14509 for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
14486 14510 sp = refhash_next(mpt->m_smp_targets, sp)) {
14487 14511 sp->m_devhdl = MPTSAS_INVALID_DEVHDL;
14488 14512 sp->m_deviceinfo = 0;
14489 14513 }
14490 14514 mpt->m_done_traverse_dev = 0;
14491 14515 mpt->m_done_traverse_smp = 0;
14492 14516 mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
14493 14517 mptsas_update_hashtab(mpt);
14494 14518 }
14495 14519
14496 14520 static void
14497 14521 mptsas_config_all(dev_info_t *pdip)
14498 14522 {
14499 14523 dev_info_t *smpdip = NULL;
14500 14524 mptsas_t *mpt = DIP2MPT(pdip);
14501 14525 int phymask = 0;
14502 14526 mptsas_phymask_t phy_mask;
14503 14527 mptsas_target_t *ptgt = NULL;
14504 14528 mptsas_smp_t *psmp;
14505 14529
14506 14530 /*
14507 14531 * Get the phymask associated to the iport
14508 14532 */
14509 14533 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14510 14534 "phymask", 0);
14511 14535
14512 14536 /*
14513 14537 * Enumerate RAID volumes here (phymask == 0).
14514 14538 */
14515 14539 if (phymask == 0) {
14516 14540 mptsas_config_all_viport(pdip);
14517 14541 return;
14518 14542 }
14519 14543
14520 14544 mutex_enter(&mpt->m_mutex);
14521 14545
14522 14546 if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp) {
14523 14547 mptsas_update_hashtab(mpt);
14524 14548 }
14525 14549
14526 14550 for (psmp = refhash_first(mpt->m_smp_targets); psmp != NULL;
14527 14551 psmp = refhash_next(mpt->m_smp_targets, psmp)) {
14528 14552 phy_mask = psmp->m_addr.mta_phymask;
14529 14553 if (phy_mask == phymask) {
14530 14554 smpdip = NULL;
14531 14555 mutex_exit(&mpt->m_mutex);
14532 14556 (void) mptsas_online_smp(pdip, psmp, &smpdip);
14533 14557 mutex_enter(&mpt->m_mutex);
14534 14558 }
14535 14559 }
14536 14560
14537 14561 for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
14538 14562 ptgt = refhash_next(mpt->m_targets, ptgt)) {
14539 14563 phy_mask = ptgt->m_addr.mta_phymask;
14540 14564 if (phy_mask == phymask) {
14541 14565 mutex_exit(&mpt->m_mutex);
14542 14566 (void) mptsas_config_target(pdip, ptgt);
14543 14567 mutex_enter(&mpt->m_mutex);
14544 14568 }
14545 14569 }
14546 14570 mutex_exit(&mpt->m_mutex);
14547 14571 }
14548 14572
14549 14573 static int
14550 14574 mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt)
14551 14575 {
14552 14576 int rval = DDI_FAILURE;
14553 14577 dev_info_t *tdip;
14554 14578
14555 14579 rval = mptsas_config_luns(pdip, ptgt);
14556 14580 if (rval != DDI_SUCCESS) {
14557 14581 /*
14558 14582 * The return value means the SCMD_REPORT_LUNS
14559 14583 * did not execute successfully. The target maybe
14560 14584 * doesn't support such command.
14561 14585 */
14562 14586 rval = mptsas_probe_lun(pdip, 0, &tdip, ptgt);
14563 14587 }
14564 14588 return (rval);
14565 14589 }
14566 14590
14567 14591 /*
14568 14592 * Return fail if not all the childs/paths are freed.
14569 14593 * if there is any path under the HBA, the return value will be always fail
14570 14594 * because we didn't call mdi_pi_free for path
14571 14595 */
14572 14596 static int
14573 14597 mptsas_offline_target(dev_info_t *pdip, char *name)
14574 14598 {
14575 14599 dev_info_t *child = NULL, *prechild = NULL;
14576 14600 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
14577 14601 int tmp_rval, rval = DDI_SUCCESS;
14578 14602 char *addr, *cp;
14579 14603 size_t s;
14580 14604 mptsas_t *mpt = DIP2MPT(pdip);
14581 14605
14582 14606 child = ddi_get_child(pdip);
14583 14607 while (child) {
14584 14608 addr = ddi_get_name_addr(child);
14585 14609 prechild = child;
14586 14610 child = ddi_get_next_sibling(child);
14587 14611
14588 14612 if (addr == NULL) {
14589 14613 continue;
14590 14614 }
14591 14615 if ((cp = strchr(addr, ',')) == NULL) {
14592 14616 continue;
14593 14617 }
14594 14618
14595 14619 s = (uintptr_t)cp - (uintptr_t)addr;
14596 14620
14597 14621 if (strncmp(addr, name, s) != 0) {
14598 14622 continue;
14599 14623 }
14600 14624
14601 14625 tmp_rval = mptsas_offline_lun(pdip, prechild, NULL,
14602 14626 NDI_DEVI_REMOVE);
14603 14627 if (tmp_rval != DDI_SUCCESS) {
14604 14628 rval = DDI_FAILURE;
14605 14629 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14606 14630 prechild, MPTSAS_DEV_GONE) !=
14607 14631 DDI_PROP_SUCCESS) {
14608 14632 mptsas_log(mpt, CE_WARN, "mptsas driver "
14609 14633 "unable to create property for "
14610 14634 "SAS %s (MPTSAS_DEV_GONE)", addr);
14611 14635 }
14612 14636 }
14613 14637 }
14614 14638
14615 14639 pip = mdi_get_next_client_path(pdip, NULL);
14616 14640 while (pip) {
14617 14641 addr = MDI_PI(pip)->pi_addr;
14618 14642 savepip = pip;
14619 14643 pip = mdi_get_next_client_path(pdip, pip);
14620 14644 if (addr == NULL) {
14621 14645 continue;
14622 14646 }
14623 14647
14624 14648 if ((cp = strchr(addr, ',')) == NULL) {
14625 14649 continue;
14626 14650 }
14627 14651
14628 14652 s = (uintptr_t)cp - (uintptr_t)addr;
14629 14653
14630 14654 if (strncmp(addr, name, s) != 0) {
14631 14655 continue;
14632 14656 }
14633 14657
14634 14658 (void) mptsas_offline_lun(pdip, NULL, savepip,
14635 14659 NDI_DEVI_REMOVE);
14636 14660 /*
14637 14661 * driver will not invoke mdi_pi_free, so path will not
14638 14662 * be freed forever, return DDI_FAILURE.
14639 14663 */
14640 14664 rval = DDI_FAILURE;
14641 14665 }
14642 14666 return (rval);
14643 14667 }
14644 14668
14645 14669 static int
14646 14670 mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
14647 14671 mdi_pathinfo_t *rpip, uint_t flags)
14648 14672 {
14649 14673 int rval = DDI_FAILURE;
14650 14674 char *devname;
14651 14675 dev_info_t *cdip, *parent;
14652 14676
14653 14677 if (rpip != NULL) {
14654 14678 parent = scsi_vhci_dip;
14655 14679 cdip = mdi_pi_get_client(rpip);
14656 14680 } else if (rdip != NULL) {
14657 14681 parent = pdip;
14658 14682 cdip = rdip;
14659 14683 } else {
14660 14684 return (DDI_FAILURE);
14661 14685 }
14662 14686
14663 14687 /*
14664 14688 * Make sure node is attached otherwise
14665 14689 * it won't have related cache nodes to
14666 14690 * clean up. i_ddi_devi_attached is
14667 14691 * similiar to i_ddi_node_state(cdip) >=
14668 14692 * DS_ATTACHED.
14669 14693 */
14670 14694 if (i_ddi_devi_attached(cdip)) {
14671 14695
14672 14696 /* Get full devname */
14673 14697 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
14674 14698 (void) ddi_deviname(cdip, devname);
14675 14699 /* Clean cache */
14676 14700 (void) devfs_clean(parent, devname + 1,
14677 14701 DV_CLEAN_FORCE);
14678 14702 kmem_free(devname, MAXNAMELEN + 1);
14679 14703 }
14680 14704 if (rpip != NULL) {
14681 14705 if (MDI_PI_IS_OFFLINE(rpip)) {
14682 14706 rval = DDI_SUCCESS;
14683 14707 } else {
14684 14708 rval = mdi_pi_offline(rpip, 0);
14685 14709 }
14686 14710 } else {
14687 14711 rval = ndi_devi_offline(cdip, flags);
14688 14712 }
14689 14713
14690 14714 return (rval);
14691 14715 }
14692 14716
14693 14717 static dev_info_t *
14694 14718 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
14695 14719 {
14696 14720 dev_info_t *child = NULL;
14697 14721 char *smp_wwn = NULL;
14698 14722
14699 14723 child = ddi_get_child(parent);
14700 14724 while (child) {
14701 14725 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
14702 14726 DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
14703 14727 != DDI_SUCCESS) {
14704 14728 child = ddi_get_next_sibling(child);
14705 14729 continue;
14706 14730 }
14707 14731
14708 14732 if (strcmp(smp_wwn, str_wwn) == 0) {
14709 14733 ddi_prop_free(smp_wwn);
14710 14734 break;
14711 14735 }
14712 14736 child = ddi_get_next_sibling(child);
14713 14737 ddi_prop_free(smp_wwn);
14714 14738 }
14715 14739 return (child);
14716 14740 }
14717 14741
14718 14742 static int
14719 14743 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, uint_t flags)
14720 14744 {
14721 14745 int rval = DDI_FAILURE;
14722 14746 char *devname;
14723 14747 char wwn_str[MPTSAS_WWN_STRLEN];
14724 14748 dev_info_t *cdip;
14725 14749
14726 14750 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
14727 14751
14728 14752 cdip = mptsas_find_smp_child(pdip, wwn_str);
14729 14753
14730 14754 if (cdip == NULL)
14731 14755 return (DDI_SUCCESS);
14732 14756
14733 14757 /*
14734 14758 * Make sure node is attached otherwise
14735 14759 * it won't have related cache nodes to
14736 14760 * clean up. i_ddi_devi_attached is
14737 14761 * similiar to i_ddi_node_state(cdip) >=
14738 14762 * DS_ATTACHED.
14739 14763 */
14740 14764 if (i_ddi_devi_attached(cdip)) {
14741 14765
14742 14766 /* Get full devname */
14743 14767 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
14744 14768 (void) ddi_deviname(cdip, devname);
14745 14769 /* Clean cache */
14746 14770 (void) devfs_clean(pdip, devname + 1,
14747 14771 DV_CLEAN_FORCE);
14748 14772 kmem_free(devname, MAXNAMELEN + 1);
14749 14773 }
14750 14774
14751 14775 rval = ndi_devi_offline(cdip, flags);
14752 14776
14753 14777 return (rval);
14754 14778 }
14755 14779
14756 14780 static dev_info_t *
14757 14781 mptsas_find_child(dev_info_t *pdip, char *name)
14758 14782 {
14759 14783 dev_info_t *child = NULL;
14760 14784 char *rname = NULL;
14761 14785 int rval = DDI_FAILURE;
14762 14786
14763 14787 rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14764 14788
14765 14789 child = ddi_get_child(pdip);
14766 14790 while (child) {
14767 14791 rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
14768 14792 if (rval != DDI_SUCCESS) {
14769 14793 child = ddi_get_next_sibling(child);
14770 14794 bzero(rname, SCSI_MAXNAMELEN);
14771 14795 continue;
14772 14796 }
14773 14797
14774 14798 if (strcmp(rname, name) == 0) {
14775 14799 break;
14776 14800 }
14777 14801 child = ddi_get_next_sibling(child);
14778 14802 bzero(rname, SCSI_MAXNAMELEN);
14779 14803 }
14780 14804
14781 14805 kmem_free(rname, SCSI_MAXNAMELEN);
14782 14806
14783 14807 return (child);
14784 14808 }
14785 14809
14786 14810
14787 14811 static dev_info_t *
14788 14812 mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, int lun)
14789 14813 {
14790 14814 dev_info_t *child = NULL;
14791 14815 char *name = NULL;
14792 14816 char *addr = NULL;
14793 14817
14794 14818 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14795 14819 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14796 14820 (void) sprintf(name, "%016"PRIx64, sasaddr);
14797 14821 (void) sprintf(addr, "w%s,%x", name, lun);
14798 14822 child = mptsas_find_child(pdip, addr);
14799 14823 kmem_free(name, SCSI_MAXNAMELEN);
14800 14824 kmem_free(addr, SCSI_MAXNAMELEN);
14801 14825 return (child);
14802 14826 }
14803 14827
14804 14828 static dev_info_t *
14805 14829 mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy)
14806 14830 {
14807 14831 dev_info_t *child;
14808 14832 char *addr;
14809 14833
14810 14834 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14811 14835 (void) sprintf(addr, "p%x,0", phy);
14812 14836 child = mptsas_find_child(pdip, addr);
14813 14837 kmem_free(addr, SCSI_MAXNAMELEN);
14814 14838 return (child);
14815 14839 }
14816 14840
14817 14841 static mdi_pathinfo_t *
14818 14842 mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy)
14819 14843 {
14820 14844 mdi_pathinfo_t *path;
14821 14845 char *addr = NULL;
14822 14846
14823 14847 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14824 14848 (void) sprintf(addr, "p%x,0", phy);
14825 14849 path = mdi_pi_find(pdip, NULL, addr);
14826 14850 kmem_free(addr, SCSI_MAXNAMELEN);
14827 14851 return (path);
14828 14852 }
14829 14853
14830 14854 static mdi_pathinfo_t *
14831 14855 mptsas_find_path_addr(dev_info_t *parent, uint64_t sasaddr, int lun)
14832 14856 {
14833 14857 mdi_pathinfo_t *path;
14834 14858 char *name = NULL;
14835 14859 char *addr = NULL;
14836 14860
14837 14861 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14838 14862 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14839 14863 (void) sprintf(name, "%016"PRIx64, sasaddr);
14840 14864 (void) sprintf(addr, "w%s,%x", name, lun);
14841 14865 path = mdi_pi_find(parent, NULL, addr);
14842 14866 kmem_free(name, SCSI_MAXNAMELEN);
14843 14867 kmem_free(addr, SCSI_MAXNAMELEN);
14844 14868
14845 14869 return (path);
14846 14870 }
14847 14871
14848 14872 static int
14849 14873 mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
14850 14874 dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
14851 14875 {
14852 14876 int i = 0;
14853 14877 uchar_t *inq83 = NULL;
14854 14878 int inq83_len1 = 0xFF;
14855 14879 int inq83_len = 0;
14856 14880 int rval = DDI_FAILURE;
14857 14881 ddi_devid_t devid;
14858 14882 char *guid = NULL;
14859 14883 int target = ptgt->m_devhdl;
14860 14884 mdi_pathinfo_t *pip = NULL;
14861 14885 mptsas_t *mpt = DIP2MPT(pdip);
14862 14886
14863 14887 /*
14864 14888 * For DVD/CD ROM and tape devices and optical
14865 14889 * devices, we won't try to enumerate them under
14866 14890 * scsi_vhci, so no need to try page83
14867 14891 */
14868 14892 if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
14869 14893 sd_inq->inq_dtype == DTYPE_OPTICAL ||
14870 14894 sd_inq->inq_dtype == DTYPE_ESI))
14871 14895 goto create_lun;
14872 14896
14873 14897 /*
14874 14898 * The LCA returns good SCSI status, but corrupt page 83 data the first
14875 14899 * time it is queried. The solution is to keep trying to request page83
14876 14900 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
14877 14901 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
14878 14902 * give up to get VPD page at this stage and fail the enumeration.
14879 14903 */
14880 14904
14881 14905 inq83 = kmem_zalloc(inq83_len1, KM_SLEEP);
14882 14906
14883 14907 for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
14884 14908 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
14885 14909 inq83_len1, &inq83_len, 1);
14886 14910 if (rval != 0) {
14887 14911 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
14888 14912 "0x83 for target:%x, lun:%x failed!", target, lun);
14889 14913 if (mptsas_physical_bind_failed_page_83 != B_FALSE)
14890 14914 goto create_lun;
14891 14915 goto out;
14892 14916 }
14893 14917 /*
14894 14918 * create DEVID from inquiry data
14895 14919 */
14896 14920 if ((rval = ddi_devid_scsi_encode(
14897 14921 DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
14898 14922 sizeof (struct scsi_inquiry), NULL, 0, inq83,
14899 14923 (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
14900 14924 /*
14901 14925 * extract GUID from DEVID
14902 14926 */
14903 14927 guid = ddi_devid_to_guid(devid);
14904 14928
14905 14929 /*
14906 14930 * Do not enable MPXIO if the strlen(guid) is greater
14907 14931 * than MPTSAS_MAX_GUID_LEN, this constrain would be
14908 14932 * handled by framework later.
14909 14933 */
14910 14934 if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
14911 14935 ddi_devid_free_guid(guid);
14912 14936 guid = NULL;
14913 14937 if (mpt->m_mpxio_enable == TRUE) {
14914 14938 mptsas_log(mpt, CE_NOTE, "!Target:%x, "
14915 14939 "lun:%x doesn't have a valid GUID, "
14916 14940 "multipathing for this drive is "
14917 14941 "not enabled", target, lun);
14918 14942 }
14919 14943 }
14920 14944
14921 14945 /*
14922 14946 * devid no longer needed
14923 14947 */
14924 14948 ddi_devid_free(devid);
14925 14949 break;
14926 14950 } else if (rval == DDI_NOT_WELL_FORMED) {
14927 14951 /*
14928 14952 * return value of ddi_devid_scsi_encode equal to
14929 14953 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
14930 14954 * to retry inquiry page 0x83 and get GUID.
14931 14955 */
14932 14956 NDBG20(("Not well formed devid, retry..."));
14933 14957 delay(1 * drv_usectohz(1000000));
14934 14958 continue;
14935 14959 } else {
14936 14960 mptsas_log(mpt, CE_WARN, "!Encode devid failed for "
14937 14961 "path target:%x, lun:%x", target, lun);
14938 14962 rval = DDI_FAILURE;
14939 14963 goto create_lun;
14940 14964 }
14941 14965 }
14942 14966
14943 14967 if (i == mptsas_inq83_retry_timeout) {
14944 14968 mptsas_log(mpt, CE_WARN, "!Repeated page83 requests timeout "
14945 14969 "for path target:%x, lun:%x", target, lun);
14946 14970 }
14947 14971
14948 14972 rval = DDI_FAILURE;
14949 14973
14950 14974 create_lun:
14951 14975 if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
14952 14976 rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
14953 14977 ptgt, lun);
14954 14978 }
14955 14979 if (rval != DDI_SUCCESS) {
14956 14980 rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
14957 14981 ptgt, lun);
14958 14982
14959 14983 }
14960 14984 out:
14961 14985 if (guid != NULL) {
14962 14986 /*
14963 14987 * guid no longer needed
14964 14988 */
14965 14989 ddi_devid_free_guid(guid);
14966 14990 }
14967 14991 if (inq83 != NULL)
14968 14992 kmem_free(inq83, inq83_len1);
14969 14993 return (rval);
14970 14994 }
14971 14995
14972 14996 static int
14973 14997 mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid,
14974 14998 dev_info_t **lun_dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, int lun)
14975 14999 {
14976 15000 int target;
14977 15001 char *nodename = NULL;
14978 15002 char **compatible = NULL;
14979 15003 int ncompatible = 0;
14980 15004 int mdi_rtn = MDI_FAILURE;
14981 15005 int rval = DDI_FAILURE;
14982 15006 char *old_guid = NULL;
14983 15007 mptsas_t *mpt = DIP2MPT(pdip);
14984 15008 char *lun_addr = NULL;
14985 15009 char *wwn_str = NULL;
14986 15010 char *attached_wwn_str = NULL;
14987 15011 char *component = NULL;
14988 15012 uint8_t phy = 0xFF;
14989 15013 uint64_t sas_wwn;
14990 15014 int64_t lun64 = 0;
14991 15015 uint32_t devinfo;
14992 15016 uint16_t dev_hdl;
14993 15017 uint16_t pdev_hdl;
14994 15018 uint64_t dev_sas_wwn;
14995 15019 uint64_t pdev_sas_wwn;
14996 15020 uint32_t pdev_info;
14997 15021 uint8_t physport;
14998 15022 uint8_t phy_id;
14999 15023 uint32_t page_address;
15000 15024 uint16_t bay_num, enclosure, io_flags;
15001 15025 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
15002 15026 uint32_t dev_info;
15003 15027
15004 15028 mutex_enter(&mpt->m_mutex);
15005 15029 target = ptgt->m_devhdl;
15006 15030 sas_wwn = ptgt->m_addr.mta_wwn;
15007 15031 devinfo = ptgt->m_deviceinfo;
15008 15032 phy = ptgt->m_phynum;
15009 15033 mutex_exit(&mpt->m_mutex);
15010 15034
15011 15035 if (sas_wwn) {
15012 15036 *pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
15013 15037 } else {
15014 15038 *pip = mptsas_find_path_phy(pdip, phy);
15015 15039 }
15016 15040
15017 15041 if (*pip != NULL) {
15018 15042 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
15019 15043 ASSERT(*lun_dip != NULL);
15020 15044 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
15021 15045 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
15022 15046 MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
15023 15047 if (strncmp(guid, old_guid, strlen(guid)) == 0) {
15024 15048 /*
15025 15049 * Same path back online again.
15026 15050 */
15027 15051 (void) ddi_prop_free(old_guid);
15028 15052 if ((!MDI_PI_IS_ONLINE(*pip)) &&
15029 15053 (!MDI_PI_IS_STANDBY(*pip)) &&
15030 15054 (ptgt->m_tgt_unconfigured == 0)) {
15031 15055 rval = mdi_pi_online(*pip, 0);
15032 15056 mutex_enter(&mpt->m_mutex);
15033 15057 ptgt->m_led_status = 0;
15034 15058 (void) mptsas_flush_led_status(mpt,
15035 15059 ptgt);
15036 15060 mutex_exit(&mpt->m_mutex);
15037 15061 } else {
15038 15062 rval = DDI_SUCCESS;
15039 15063 }
15040 15064 if (rval != DDI_SUCCESS) {
15041 15065 mptsas_log(mpt, CE_WARN, "path:target: "
15042 15066 "%x, lun:%x online failed!", target,
15043 15067 lun);
15044 15068 *pip = NULL;
15045 15069 *lun_dip = NULL;
15046 15070 }
15047 15071 return (rval);
15048 15072 } else {
15049 15073 /*
15050 15074 * The GUID of the LUN has changed which maybe
15051 15075 * because customer mapped another volume to the
15052 15076 * same LUN.
15053 15077 */
15054 15078 mptsas_log(mpt, CE_WARN, "The GUID of the "
15055 15079 "target:%x, lun:%x was changed, maybe "
15056 15080 "because someone mapped another volume "
15057 15081 "to the same LUN", target, lun);
15058 15082 (void) ddi_prop_free(old_guid);
15059 15083 if (!MDI_PI_IS_OFFLINE(*pip)) {
15060 15084 rval = mdi_pi_offline(*pip, 0);
15061 15085 if (rval != MDI_SUCCESS) {
15062 15086 mptsas_log(mpt, CE_WARN, "path:"
15063 15087 "target:%x, lun:%x offline "
15064 15088 "failed!", target, lun);
15065 15089 *pip = NULL;
15066 15090 *lun_dip = NULL;
15067 15091 return (DDI_FAILURE);
15068 15092 }
15069 15093 }
15070 15094 if (mdi_pi_free(*pip, 0) != MDI_SUCCESS) {
15071 15095 mptsas_log(mpt, CE_WARN, "path:target:"
15072 15096 "%x, lun:%x free failed!", target,
15073 15097 lun);
15074 15098 *pip = NULL;
15075 15099 *lun_dip = NULL;
15076 15100 return (DDI_FAILURE);
15077 15101 }
15078 15102 }
15079 15103 } else {
15080 15104 mptsas_log(mpt, CE_WARN, "Can't get client-guid "
15081 15105 "property for path:target:%x, lun:%x", target, lun);
15082 15106 *pip = NULL;
15083 15107 *lun_dip = NULL;
15084 15108 return (DDI_FAILURE);
15085 15109 }
15086 15110 }
15087 15111 scsi_hba_nodename_compatible_get(inq, NULL,
15088 15112 inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
15089 15113
15090 15114 /*
15091 15115 * if nodename can't be determined then print a message and skip it
15092 15116 */
15093 15117 if (nodename == NULL) {
15094 15118 mptsas_log(mpt, CE_WARN, "mptsas driver found no compatible "
15095 15119 "driver for target%d lun %d dtype:0x%02x", target, lun,
15096 15120 inq->inq_dtype);
15097 15121 return (DDI_FAILURE);
15098 15122 }
15099 15123
15100 15124 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
15101 15125 /* The property is needed by MPAPI */
15102 15126 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
15103 15127
15104 15128 lun_addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15105 15129 if (guid) {
15106 15130 (void) sprintf(lun_addr, "w%s,%x", wwn_str, lun);
15107 15131 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15108 15132 } else {
15109 15133 (void) sprintf(lun_addr, "p%x,%x", phy, lun);
15110 15134 (void) sprintf(wwn_str, "p%x", phy);
15111 15135 }
15112 15136
15113 15137 mdi_rtn = mdi_pi_alloc_compatible(pdip, nodename,
15114 15138 guid, lun_addr, compatible, ncompatible,
15115 15139 0, pip);
15116 15140 if (mdi_rtn == MDI_SUCCESS) {
15117 15141
15118 15142 if (mdi_prop_update_string(*pip, MDI_GUID,
15119 15143 guid) != DDI_SUCCESS) {
15120 15144 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15121 15145 "create prop for target %d lun %d (MDI_GUID)",
15122 15146 target, lun);
15123 15147 mdi_rtn = MDI_FAILURE;
15124 15148 goto virt_create_done;
15125 15149 }
15126 15150
15127 15151 if (mdi_prop_update_int(*pip, LUN_PROP,
15128 15152 lun) != DDI_SUCCESS) {
15129 15153 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15130 15154 "create prop for target %d lun %d (LUN_PROP)",
15131 15155 target, lun);
15132 15156 mdi_rtn = MDI_FAILURE;
15133 15157 goto virt_create_done;
15134 15158 }
15135 15159 lun64 = (int64_t)lun;
15136 15160 if (mdi_prop_update_int64(*pip, LUN64_PROP,
15137 15161 lun64) != DDI_SUCCESS) {
15138 15162 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15139 15163 "create prop for target %d (LUN64_PROP)",
15140 15164 target);
15141 15165 mdi_rtn = MDI_FAILURE;
15142 15166 goto virt_create_done;
15143 15167 }
15144 15168 if (mdi_prop_update_string_array(*pip, "compatible",
15145 15169 compatible, ncompatible) !=
15146 15170 DDI_PROP_SUCCESS) {
15147 15171 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15148 15172 "create prop for target %d lun %d (COMPATIBLE)",
15149 15173 target, lun);
15150 15174 mdi_rtn = MDI_FAILURE;
15151 15175 goto virt_create_done;
15152 15176 }
15153 15177 if (sas_wwn && (mdi_prop_update_string(*pip,
15154 15178 SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS)) {
15155 15179 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15156 15180 "create prop for target %d lun %d "
15157 15181 "(target-port)", target, lun);
15158 15182 mdi_rtn = MDI_FAILURE;
15159 15183 goto virt_create_done;
15160 15184 } else if ((sas_wwn == 0) && (mdi_prop_update_int(*pip,
15161 15185 "sata-phy", phy) != DDI_PROP_SUCCESS)) {
15162 15186 /*
15163 15187 * Direct attached SATA device without DeviceName
15164 15188 */
15165 15189 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15166 15190 "create prop for SAS target %d lun %d "
15167 15191 "(sata-phy)", target, lun);
15168 15192 mdi_rtn = MDI_FAILURE;
15169 15193 goto virt_create_done;
15170 15194 }
15171 15195 mutex_enter(&mpt->m_mutex);
15172 15196
15173 15197 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15174 15198 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15175 15199 (uint32_t)ptgt->m_devhdl;
15176 15200 rval = mptsas_get_sas_device_page0(mpt, page_address,
15177 15201 &dev_hdl, &dev_sas_wwn, &dev_info, &physport,
15178 15202 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15179 15203 if (rval != DDI_SUCCESS) {
15180 15204 mutex_exit(&mpt->m_mutex);
15181 15205 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15182 15206 "parent device for handle %d", page_address);
15183 15207 mdi_rtn = MDI_FAILURE;
15184 15208 goto virt_create_done;
15185 15209 }
15186 15210
15187 15211 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15188 15212 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
15189 15213 rval = mptsas_get_sas_device_page0(mpt, page_address,
15190 15214 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
15191 15215 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15192 15216 if (rval != DDI_SUCCESS) {
15193 15217 mutex_exit(&mpt->m_mutex);
15194 15218 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
15195 15219 "device info for handle %d", page_address);
15196 15220 mdi_rtn = MDI_FAILURE;
15197 15221 goto virt_create_done;
15198 15222 }
15199 15223
15200 15224 mutex_exit(&mpt->m_mutex);
15201 15225
15202 15226 /*
15203 15227 * If this device direct attached to the controller
15204 15228 * set the attached-port to the base wwid
15205 15229 */
15206 15230 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15207 15231 != DEVINFO_DIRECT_ATTACHED) {
15208 15232 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15209 15233 pdev_sas_wwn);
15210 15234 } else {
15211 15235 /*
15212 15236 * Update the iport's attached-port to guid
15213 15237 */
15214 15238 if (sas_wwn == 0) {
15215 15239 (void) sprintf(wwn_str, "p%x", phy);
15216 15240 } else {
15217 15241 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15218 15242 }
15219 15243 if (ddi_prop_update_string(DDI_DEV_T_NONE,
15220 15244 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15221 15245 DDI_PROP_SUCCESS) {
15222 15246 mptsas_log(mpt, CE_WARN,
15223 15247 "mptsas unable to create "
15224 15248 "property for iport target-port"
15225 15249 " %s (sas_wwn)",
15226 15250 wwn_str);
15227 15251 mdi_rtn = MDI_FAILURE;
15228 15252 goto virt_create_done;
15229 15253 }
15230 15254
15231 15255 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15232 15256 mpt->un.m_base_wwid);
15233 15257 }
15234 15258
15235 15259 if (mdi_prop_update_string(*pip,
15236 15260 SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
15237 15261 DDI_PROP_SUCCESS) {
15238 15262 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15239 15263 "property for iport attached-port %s (sas_wwn)",
15240 15264 attached_wwn_str);
15241 15265 mdi_rtn = MDI_FAILURE;
15242 15266 goto virt_create_done;
15243 15267 }
15244 15268
15245 15269
15246 15270 if (inq->inq_dtype == 0) {
15247 15271 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
15248 15272 /*
15249 15273 * set obp path for pathinfo
15250 15274 */
15251 15275 (void) snprintf(component, MAXPATHLEN,
15252 15276 "disk@%s", lun_addr);
15253 15277
15254 15278 if (mdi_pi_pathname_obp_set(*pip, component) !=
15255 15279 DDI_SUCCESS) {
15256 15280 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
15257 15281 "unable to set obp-path for object %s",
15258 15282 component);
15259 15283 mdi_rtn = MDI_FAILURE;
15260 15284 goto virt_create_done;
15261 15285 }
15262 15286 }
15263 15287
15264 15288 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
15265 15289 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
15266 15290 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
15267 15291 if ((ndi_prop_update_int(DDI_DEV_T_NONE, *lun_dip,
15268 15292 "pm-capable", 1)) !=
15269 15293 DDI_PROP_SUCCESS) {
15270 15294 mptsas_log(mpt, CE_WARN, "mptsas driver"
15271 15295 "failed to create pm-capable "
15272 15296 "property, target %d", target);
15273 15297 mdi_rtn = MDI_FAILURE;
15274 15298 goto virt_create_done;
15275 15299 }
15276 15300 }
15277 15301 /*
15278 15302 * Create the phy-num property
15279 15303 */
15280 15304 if (mdi_prop_update_int(*pip, "phy-num",
15281 15305 ptgt->m_phynum) != DDI_SUCCESS) {
15282 15306 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15283 15307 "create phy-num property for target %d lun %d",
15284 15308 target, lun);
15285 15309 mdi_rtn = MDI_FAILURE;
15286 15310 goto virt_create_done;
15287 15311 }
15288 15312 NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
15289 15313 mdi_rtn = mdi_pi_online(*pip, 0);
15290 15314 if (mdi_rtn == MDI_SUCCESS) {
15291 15315 mutex_enter(&mpt->m_mutex);
15292 15316 ptgt->m_led_status = 0;
15293 15317 (void) mptsas_flush_led_status(mpt, ptgt);
15294 15318 mutex_exit(&mpt->m_mutex);
15295 15319 }
15296 15320 if (mdi_rtn == MDI_NOT_SUPPORTED) {
15297 15321 mdi_rtn = MDI_FAILURE;
15298 15322 }
15299 15323 virt_create_done:
15300 15324 if (*pip && mdi_rtn != MDI_SUCCESS) {
15301 15325 (void) mdi_pi_free(*pip, 0);
15302 15326 *pip = NULL;
15303 15327 *lun_dip = NULL;
15304 15328 }
15305 15329 }
15306 15330
15307 15331 scsi_hba_nodename_compatible_free(nodename, compatible);
15308 15332 if (lun_addr != NULL) {
15309 15333 kmem_free(lun_addr, SCSI_MAXNAMELEN);
15310 15334 }
15311 15335 if (wwn_str != NULL) {
15312 15336 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
15313 15337 }
15314 15338 if (component != NULL) {
15315 15339 kmem_free(component, MAXPATHLEN);
15316 15340 }
15317 15341
15318 15342 return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15319 15343 }
15320 15344
15321 15345 static int
15322 15346 mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq,
15323 15347 char *guid, dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
15324 15348 {
15325 15349 int target;
15326 15350 int rval;
15327 15351 int ndi_rtn = NDI_FAILURE;
15328 15352 uint64_t be_sas_wwn;
15329 15353 char *nodename = NULL;
15330 15354 char **compatible = NULL;
15331 15355 int ncompatible = 0;
15332 15356 int instance = 0;
15333 15357 mptsas_t *mpt = DIP2MPT(pdip);
15334 15358 char *wwn_str = NULL;
15335 15359 char *component = NULL;
15336 15360 char *attached_wwn_str = NULL;
15337 15361 uint8_t phy = 0xFF;
15338 15362 uint64_t sas_wwn;
15339 15363 uint32_t devinfo;
15340 15364 uint16_t dev_hdl;
15341 15365 uint16_t pdev_hdl;
15342 15366 uint64_t pdev_sas_wwn;
15343 15367 uint64_t dev_sas_wwn;
15344 15368 uint32_t pdev_info;
15345 15369 uint8_t physport;
15346 15370 uint8_t phy_id;
15347 15371 uint32_t page_address;
15348 15372 uint16_t bay_num, enclosure, io_flags;
15349 15373 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
15350 15374 uint32_t dev_info;
15351 15375 int64_t lun64 = 0;
15352 15376
15353 15377 mutex_enter(&mpt->m_mutex);
15354 15378 target = ptgt->m_devhdl;
15355 15379 sas_wwn = ptgt->m_addr.mta_wwn;
15356 15380 devinfo = ptgt->m_deviceinfo;
15357 15381 phy = ptgt->m_phynum;
15358 15382 mutex_exit(&mpt->m_mutex);
15359 15383
15360 15384 /*
15361 15385 * generate compatible property with binding-set "mpt"
15362 15386 */
15363 15387 scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL,
15364 15388 &nodename, &compatible, &ncompatible);
15365 15389
15366 15390 /*
15367 15391 * if nodename can't be determined then print a message and skip it
15368 15392 */
15369 15393 if (nodename == NULL) {
15370 15394 mptsas_log(mpt, CE_WARN, "mptsas found no compatible driver "
15371 15395 "for target %d lun %d", target, lun);
15372 15396 return (DDI_FAILURE);
15373 15397 }
15374 15398
15375 15399 ndi_rtn = ndi_devi_alloc(pdip, nodename,
15376 15400 DEVI_SID_NODEID, lun_dip);
15377 15401
15378 15402 /*
15379 15403 * if lun alloc success, set props
15380 15404 */
15381 15405 if (ndi_rtn == NDI_SUCCESS) {
15382 15406
15383 15407 if (ndi_prop_update_int(DDI_DEV_T_NONE,
15384 15408 *lun_dip, LUN_PROP, lun) !=
15385 15409 DDI_PROP_SUCCESS) {
15386 15410 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15387 15411 "property for target %d lun %d (LUN_PROP)",
15388 15412 target, lun);
15389 15413 ndi_rtn = NDI_FAILURE;
15390 15414 goto phys_create_done;
15391 15415 }
15392 15416
15393 15417 lun64 = (int64_t)lun;
15394 15418 if (ndi_prop_update_int64(DDI_DEV_T_NONE,
15395 15419 *lun_dip, LUN64_PROP, lun64) !=
15396 15420 DDI_PROP_SUCCESS) {
15397 15421 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15398 15422 "property for target %d lun64 %d (LUN64_PROP)",
15399 15423 target, lun);
15400 15424 ndi_rtn = NDI_FAILURE;
15401 15425 goto phys_create_done;
15402 15426 }
15403 15427 if (ndi_prop_update_string_array(DDI_DEV_T_NONE,
15404 15428 *lun_dip, "compatible", compatible, ncompatible)
15405 15429 != DDI_PROP_SUCCESS) {
15406 15430 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15407 15431 "property for target %d lun %d (COMPATIBLE)",
15408 15432 target, lun);
15409 15433 ndi_rtn = NDI_FAILURE;
15410 15434 goto phys_create_done;
15411 15435 }
15412 15436
15413 15437 /*
15414 15438 * We need the SAS WWN for non-multipath devices, so
15415 15439 * we'll use the same property as that multipathing
15416 15440 * devices need to present for MPAPI. If we don't have
15417 15441 * a WWN (e.g. parallel SCSI), don't create the prop.
15418 15442 */
15419 15443 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
15420 15444 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15421 15445 if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE,
15422 15446 *lun_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str)
15423 15447 != DDI_PROP_SUCCESS) {
15424 15448 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15425 15449 "create property for SAS target %d lun %d "
15426 15450 "(target-port)", target, lun);
15427 15451 ndi_rtn = NDI_FAILURE;
15428 15452 goto phys_create_done;
15429 15453 }
15430 15454
15431 15455 be_sas_wwn = BE_64(sas_wwn);
15432 15456 if (sas_wwn && ndi_prop_update_byte_array(
15433 15457 DDI_DEV_T_NONE, *lun_dip, "port-wwn",
15434 15458 (uchar_t *)&be_sas_wwn, 8) != DDI_PROP_SUCCESS) {
15435 15459 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15436 15460 "create property for SAS target %d lun %d "
15437 15461 "(port-wwn)", target, lun);
15438 15462 ndi_rtn = NDI_FAILURE;
15439 15463 goto phys_create_done;
15440 15464 } else if ((sas_wwn == 0) && (ndi_prop_update_int(
15441 15465 DDI_DEV_T_NONE, *lun_dip, "sata-phy", phy) !=
15442 15466 DDI_PROP_SUCCESS)) {
15443 15467 /*
15444 15468 * Direct attached SATA device without DeviceName
15445 15469 */
15446 15470 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15447 15471 "create property for SAS target %d lun %d "
15448 15472 "(sata-phy)", target, lun);
15449 15473 ndi_rtn = NDI_FAILURE;
15450 15474 goto phys_create_done;
15451 15475 }
15452 15476
15453 15477 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15454 15478 *lun_dip, SAS_PROP) != DDI_PROP_SUCCESS) {
15455 15479 mptsas_log(mpt, CE_WARN, "mptsas unable to"
15456 15480 "create property for SAS target %d lun %d"
15457 15481 " (SAS_PROP)", target, lun);
15458 15482 ndi_rtn = NDI_FAILURE;
15459 15483 goto phys_create_done;
15460 15484 }
15461 15485 if (guid && (ndi_prop_update_string(DDI_DEV_T_NONE,
15462 15486 *lun_dip, NDI_GUID, guid) != DDI_SUCCESS)) {
15463 15487 mptsas_log(mpt, CE_WARN, "mptsas unable "
15464 15488 "to create guid property for target %d "
15465 15489 "lun %d", target, lun);
15466 15490 ndi_rtn = NDI_FAILURE;
15467 15491 goto phys_create_done;
15468 15492 }
15469 15493
15470 15494 /*
15471 15495 * The following code is to set properties for SM-HBA support,
15472 15496 * it doesn't apply to RAID volumes
15473 15497 */
15474 15498 if (ptgt->m_addr.mta_phymask == 0)
15475 15499 goto phys_raid_lun;
15476 15500
15477 15501 mutex_enter(&mpt->m_mutex);
15478 15502
15479 15503 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15480 15504 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15481 15505 (uint32_t)ptgt->m_devhdl;
15482 15506 rval = mptsas_get_sas_device_page0(mpt, page_address,
15483 15507 &dev_hdl, &dev_sas_wwn, &dev_info,
15484 15508 &physport, &phy_id, &pdev_hdl,
15485 15509 &bay_num, &enclosure, &io_flags);
15486 15510 if (rval != DDI_SUCCESS) {
15487 15511 mutex_exit(&mpt->m_mutex);
15488 15512 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
15489 15513 "parent device for handle %d.", page_address);
15490 15514 ndi_rtn = NDI_FAILURE;
15491 15515 goto phys_create_done;
15492 15516 }
15493 15517
15494 15518 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15495 15519 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
15496 15520 rval = mptsas_get_sas_device_page0(mpt, page_address,
15497 15521 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
15498 15522 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15499 15523 if (rval != DDI_SUCCESS) {
15500 15524 mutex_exit(&mpt->m_mutex);
15501 15525 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15502 15526 "device for handle %d.", page_address);
15503 15527 ndi_rtn = NDI_FAILURE;
15504 15528 goto phys_create_done;
15505 15529 }
15506 15530
15507 15531 mutex_exit(&mpt->m_mutex);
15508 15532
15509 15533 /*
15510 15534 * If this device direct attached to the controller
15511 15535 * set the attached-port to the base wwid
15512 15536 */
15513 15537 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15514 15538 != DEVINFO_DIRECT_ATTACHED) {
15515 15539 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15516 15540 pdev_sas_wwn);
15517 15541 } else {
15518 15542 /*
15519 15543 * Update the iport's attached-port to guid
15520 15544 */
15521 15545 if (sas_wwn == 0) {
15522 15546 (void) sprintf(wwn_str, "p%x", phy);
15523 15547 } else {
15524 15548 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15525 15549 }
15526 15550 if (ddi_prop_update_string(DDI_DEV_T_NONE,
15527 15551 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15528 15552 DDI_PROP_SUCCESS) {
15529 15553 mptsas_log(mpt, CE_WARN,
15530 15554 "mptsas unable to create "
15531 15555 "property for iport target-port"
15532 15556 " %s (sas_wwn)",
15533 15557 wwn_str);
15534 15558 ndi_rtn = NDI_FAILURE;
15535 15559 goto phys_create_done;
15536 15560 }
15537 15561
15538 15562 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15539 15563 mpt->un.m_base_wwid);
15540 15564 }
15541 15565
15542 15566 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15543 15567 *lun_dip, SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
15544 15568 DDI_PROP_SUCCESS) {
15545 15569 mptsas_log(mpt, CE_WARN,
15546 15570 "mptsas unable to create "
15547 15571 "property for iport attached-port %s (sas_wwn)",
15548 15572 attached_wwn_str);
15549 15573 ndi_rtn = NDI_FAILURE;
15550 15574 goto phys_create_done;
15551 15575 }
15552 15576
15553 15577 if (IS_SATA_DEVICE(dev_info)) {
15554 15578 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15555 15579 *lun_dip, MPTSAS_VARIANT, "sata") !=
15556 15580 DDI_PROP_SUCCESS) {
15557 15581 mptsas_log(mpt, CE_WARN,
15558 15582 "mptsas unable to create "
15559 15583 "property for device variant ");
15560 15584 ndi_rtn = NDI_FAILURE;
15561 15585 goto phys_create_done;
15562 15586 }
15563 15587 }
15564 15588
15565 15589 if (IS_ATAPI_DEVICE(dev_info)) {
15566 15590 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15567 15591 *lun_dip, MPTSAS_VARIANT, "atapi") !=
15568 15592 DDI_PROP_SUCCESS) {
15569 15593 mptsas_log(mpt, CE_WARN,
15570 15594 "mptsas unable to create "
15571 15595 "property for device variant ");
15572 15596 ndi_rtn = NDI_FAILURE;
15573 15597 goto phys_create_done;
15574 15598 }
15575 15599 }
15576 15600
15577 15601 phys_raid_lun:
15578 15602 /*
15579 15603 * if this is a SAS controller, and the target is a SATA
15580 15604 * drive, set the 'pm-capable' property for sd and if on
15581 15605 * an OPL platform, also check if this is an ATAPI
15582 15606 * device.
15583 15607 */
15584 15608 instance = ddi_get_instance(mpt->m_dip);
15585 15609 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
15586 15610 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
15587 15611 NDBG2(("mptsas%d: creating pm-capable property, "
15588 15612 "target %d", instance, target));
15589 15613
15590 15614 if ((ndi_prop_update_int(DDI_DEV_T_NONE,
15591 15615 *lun_dip, "pm-capable", 1)) !=
15592 15616 DDI_PROP_SUCCESS) {
15593 15617 mptsas_log(mpt, CE_WARN, "mptsas "
15594 15618 "failed to create pm-capable "
15595 15619 "property, target %d", target);
15596 15620 ndi_rtn = NDI_FAILURE;
15597 15621 goto phys_create_done;
15598 15622 }
15599 15623
15600 15624 }
15601 15625
15602 15626 if ((inq->inq_dtype == 0) || (inq->inq_dtype == 5)) {
15603 15627 /*
15604 15628 * add 'obp-path' properties for devinfo
15605 15629 */
15606 15630 bzero(wwn_str, sizeof (wwn_str));
15607 15631 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
15608 15632 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
15609 15633 if (guid) {
15610 15634 (void) snprintf(component, MAXPATHLEN,
15611 15635 "disk@w%s,%x", wwn_str, lun);
15612 15636 } else {
15613 15637 (void) snprintf(component, MAXPATHLEN,
15614 15638 "disk@p%x,%x", phy, lun);
15615 15639 }
15616 15640 if (ddi_pathname_obp_set(*lun_dip, component)
15617 15641 != DDI_SUCCESS) {
15618 15642 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
15619 15643 "unable to set obp-path for SAS "
15620 15644 "object %s", component);
15621 15645 ndi_rtn = NDI_FAILURE;
15622 15646 goto phys_create_done;
15623 15647 }
15624 15648 }
15625 15649 /*
15626 15650 * Create the phy-num property for non-raid disk
15627 15651 */
15628 15652 if (ptgt->m_addr.mta_phymask != 0) {
15629 15653 if (ndi_prop_update_int(DDI_DEV_T_NONE,
15630 15654 *lun_dip, "phy-num", ptgt->m_phynum) !=
15631 15655 DDI_PROP_SUCCESS) {
15632 15656 mptsas_log(mpt, CE_WARN, "mptsas driver "
15633 15657 "failed to create phy-num property for "
15634 15658 "target %d", target);
15635 15659 ndi_rtn = NDI_FAILURE;
15636 15660 goto phys_create_done;
15637 15661 }
15638 15662 }
15639 15663 phys_create_done:
15640 15664 /*
15641 15665 * If props were setup ok, online the lun
15642 15666 */
15643 15667 if (ndi_rtn == NDI_SUCCESS) {
15644 15668 /*
15645 15669 * Try to online the new node
15646 15670 */
15647 15671 ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
15648 15672 }
15649 15673 if (ndi_rtn == NDI_SUCCESS) {
15650 15674 mutex_enter(&mpt->m_mutex);
15651 15675 ptgt->m_led_status = 0;
15652 15676 (void) mptsas_flush_led_status(mpt, ptgt);
15653 15677 mutex_exit(&mpt->m_mutex);
15654 15678 }
15655 15679
15656 15680 /*
15657 15681 * If success set rtn flag, else unwire alloc'd lun
15658 15682 */
15659 15683 if (ndi_rtn != NDI_SUCCESS) {
15660 15684 NDBG12(("mptsas driver unable to online "
15661 15685 "target %d lun %d", target, lun));
15662 15686 ndi_prop_remove_all(*lun_dip);
15663 15687 (void) ndi_devi_free(*lun_dip);
15664 15688 *lun_dip = NULL;
15665 15689 }
15666 15690 }
15667 15691
15668 15692 scsi_hba_nodename_compatible_free(nodename, compatible);
15669 15693
15670 15694 if (wwn_str != NULL) {
15671 15695 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
15672 15696 }
15673 15697 if (component != NULL) {
15674 15698 kmem_free(component, MAXPATHLEN);
15675 15699 }
15676 15700
15677 15701
15678 15702 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15679 15703 }
15680 15704
15681 15705 static int
15682 15706 mptsas_probe_smp(dev_info_t *pdip, uint64_t wwn)
15683 15707 {
15684 15708 mptsas_t *mpt = DIP2MPT(pdip);
15685 15709 struct smp_device smp_sd;
15686 15710
15687 15711 /* XXX An HBA driver should not be allocating an smp_device. */
15688 15712 bzero(&smp_sd, sizeof (struct smp_device));
15689 15713 smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran;
15690 15714 bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE);
15691 15715
15692 15716 if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS)
15693 15717 return (NDI_FAILURE);
15694 15718 return (NDI_SUCCESS);
15695 15719 }
15696 15720
15697 15721 static int
15698 15722 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
15699 15723 {
15700 15724 mptsas_t *mpt = DIP2MPT(pdip);
15701 15725 mptsas_smp_t *psmp = NULL;
15702 15726 int rval;
15703 15727 int phymask;
15704 15728
15705 15729 /*
15706 15730 * Get the physical port associated to the iport
15707 15731 * PHYMASK TODO
15708 15732 */
15709 15733 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
15710 15734 "phymask", 0);
15711 15735 /*
15712 15736 * Find the smp node in hash table with specified sas address and
15713 15737 * physical port
15714 15738 */
15715 15739 psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
15716 15740 if (psmp == NULL) {
15717 15741 return (DDI_FAILURE);
15718 15742 }
15719 15743
15720 15744 rval = mptsas_online_smp(pdip, psmp, smp_dip);
15721 15745
15722 15746 return (rval);
15723 15747 }
15724 15748
15725 15749 static int
15726 15750 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
15727 15751 dev_info_t **smp_dip)
15728 15752 {
15729 15753 char wwn_str[MPTSAS_WWN_STRLEN];
15730 15754 char attached_wwn_str[MPTSAS_WWN_STRLEN];
15731 15755 int ndi_rtn = NDI_FAILURE;
15732 15756 int rval = 0;
15733 15757 mptsas_smp_t dev_info;
15734 15758 uint32_t page_address;
15735 15759 mptsas_t *mpt = DIP2MPT(pdip);
15736 15760 uint16_t dev_hdl;
15737 15761 uint64_t sas_wwn;
15738 15762 uint64_t smp_sas_wwn;
15739 15763 uint8_t physport;
15740 15764 uint8_t phy_id;
15741 15765 uint16_t pdev_hdl;
15742 15766 uint8_t numphys = 0;
15743 15767 uint16_t i = 0;
15744 15768 char phymask[MPTSAS_MAX_PHYS];
15745 15769 char *iport = NULL;
15746 15770 mptsas_phymask_t phy_mask = 0;
15747 15771 uint16_t attached_devhdl;
15748 15772 uint16_t bay_num, enclosure, io_flags;
15749 15773
15750 15774 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
15751 15775
15752 15776 /*
15753 15777 * Probe smp device, prevent the node of removed device from being
15754 15778 * configured succesfully
15755 15779 */
15756 15780 if (mptsas_probe_smp(pdip, smp_node->m_addr.mta_wwn) != NDI_SUCCESS) {
15757 15781 return (DDI_FAILURE);
15758 15782 }
15759 15783
15760 15784 if ((*smp_dip = mptsas_find_smp_child(pdip, wwn_str)) != NULL) {
15761 15785 return (DDI_SUCCESS);
15762 15786 }
15763 15787
15764 15788 ndi_rtn = ndi_devi_alloc(pdip, "smp", DEVI_SID_NODEID, smp_dip);
15765 15789
15766 15790 /*
15767 15791 * if lun alloc success, set props
15768 15792 */
15769 15793 if (ndi_rtn == NDI_SUCCESS) {
15770 15794 /*
15771 15795 * Set the flavor of the child to be SMP flavored
15772 15796 */
15773 15797 ndi_flavor_set(*smp_dip, SCSA_FLAVOR_SMP);
15774 15798
15775 15799 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15776 15800 *smp_dip, SMP_WWN, wwn_str) !=
15777 15801 DDI_PROP_SUCCESS) {
15778 15802 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15779 15803 "property for smp device %s (sas_wwn)",
15780 15804 wwn_str);
15781 15805 ndi_rtn = NDI_FAILURE;
15782 15806 goto smp_create_done;
15783 15807 }
15784 15808 (void) sprintf(wwn_str, "w%"PRIx64, smp_node->m_addr.mta_wwn);
15785 15809 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15786 15810 *smp_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str) !=
15787 15811 DDI_PROP_SUCCESS) {
15788 15812 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15789 15813 "property for iport target-port %s (sas_wwn)",
15790 15814 wwn_str);
15791 15815 ndi_rtn = NDI_FAILURE;
15792 15816 goto smp_create_done;
15793 15817 }
15794 15818
15795 15819 mutex_enter(&mpt->m_mutex);
15796 15820
15797 15821 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
15798 15822 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | smp_node->m_devhdl;
15799 15823 rval = mptsas_get_sas_expander_page0(mpt, page_address,
15800 15824 &dev_info);
15801 15825 if (rval != DDI_SUCCESS) {
15802 15826 mutex_exit(&mpt->m_mutex);
15803 15827 mptsas_log(mpt, CE_WARN,
15804 15828 "mptsas unable to get expander "
15805 15829 "parent device info for %x", page_address);
15806 15830 ndi_rtn = NDI_FAILURE;
15807 15831 goto smp_create_done;
15808 15832 }
15809 15833
15810 15834 smp_node->m_pdevhdl = dev_info.m_pdevhdl;
15811 15835 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15812 15836 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15813 15837 (uint32_t)dev_info.m_pdevhdl;
15814 15838 rval = mptsas_get_sas_device_page0(mpt, page_address,
15815 15839 &dev_hdl, &sas_wwn, &smp_node->m_pdevinfo, &physport,
15816 15840 &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15817 15841 if (rval != DDI_SUCCESS) {
15818 15842 mutex_exit(&mpt->m_mutex);
15819 15843 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15820 15844 "device info for %x", page_address);
15821 15845 ndi_rtn = NDI_FAILURE;
15822 15846 goto smp_create_done;
15823 15847 }
15824 15848
15825 15849 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15826 15850 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15827 15851 (uint32_t)dev_info.m_devhdl;
15828 15852 rval = mptsas_get_sas_device_page0(mpt, page_address,
15829 15853 &dev_hdl, &smp_sas_wwn, &smp_node->m_deviceinfo,
15830 15854 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure,
15831 15855 &io_flags);
15832 15856 if (rval != DDI_SUCCESS) {
15833 15857 mutex_exit(&mpt->m_mutex);
15834 15858 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15835 15859 "device info for %x", page_address);
15836 15860 ndi_rtn = NDI_FAILURE;
15837 15861 goto smp_create_done;
15838 15862 }
15839 15863 mutex_exit(&mpt->m_mutex);
15840 15864
15841 15865 /*
15842 15866 * If this smp direct attached to the controller
15843 15867 * set the attached-port to the base wwid
15844 15868 */
15845 15869 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15846 15870 != DEVINFO_DIRECT_ATTACHED) {
15847 15871 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
15848 15872 sas_wwn);
15849 15873 } else {
15850 15874 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
15851 15875 mpt->un.m_base_wwid);
15852 15876 }
15853 15877
15854 15878 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15855 15879 *smp_dip, SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwn_str) !=
15856 15880 DDI_PROP_SUCCESS) {
15857 15881 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15858 15882 "property for smp attached-port %s (sas_wwn)",
15859 15883 attached_wwn_str);
15860 15884 ndi_rtn = NDI_FAILURE;
15861 15885 goto smp_create_done;
15862 15886 }
15863 15887
15864 15888 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15865 15889 *smp_dip, SMP_PROP) != DDI_PROP_SUCCESS) {
15866 15890 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15867 15891 "create property for SMP %s (SMP_PROP) ",
15868 15892 wwn_str);
15869 15893 ndi_rtn = NDI_FAILURE;
15870 15894 goto smp_create_done;
15871 15895 }
15872 15896
15873 15897 /*
15874 15898 * check the smp to see whether it direct
15875 15899 * attached to the controller
15876 15900 */
15877 15901 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15878 15902 != DEVINFO_DIRECT_ATTACHED) {
15879 15903 goto smp_create_done;
15880 15904 }
15881 15905 numphys = ddi_prop_get_int(DDI_DEV_T_ANY, pdip,
15882 15906 DDI_PROP_DONTPASS, MPTSAS_NUM_PHYS, -1);
15883 15907 if (numphys > 0) {
15884 15908 goto smp_create_done;
15885 15909 }
15886 15910 /*
15887 15911 * this iport is an old iport, we need to
15888 15912 * reconfig the props for it.
15889 15913 */
15890 15914 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
15891 15915 MPTSAS_VIRTUAL_PORT, 0) !=
15892 15916 DDI_PROP_SUCCESS) {
15893 15917 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
15894 15918 MPTSAS_VIRTUAL_PORT);
15895 15919 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
15896 15920 "prop update failed");
15897 15921 goto smp_create_done;
15898 15922 }
15899 15923
15900 15924 mutex_enter(&mpt->m_mutex);
15901 15925 numphys = 0;
15902 15926 iport = ddi_get_name_addr(pdip);
15903 15927 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15904 15928 bzero(phymask, sizeof (phymask));
15905 15929 (void) sprintf(phymask,
15906 15930 "%x", mpt->m_phy_info[i].phy_mask);
15907 15931 if (strcmp(phymask, iport) == 0) {
15908 15932 phy_mask = mpt->m_phy_info[i].phy_mask;
15909 15933 break;
15910 15934 }
15911 15935 }
15912 15936
15913 15937 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15914 15938 if ((phy_mask >> i) & 0x01) {
15915 15939 numphys++;
15916 15940 }
15917 15941 }
15918 15942 /*
15919 15943 * Update PHY info for smhba
15920 15944 */
15921 15945 if (mptsas_smhba_phy_init(mpt)) {
15922 15946 mutex_exit(&mpt->m_mutex);
15923 15947 mptsas_log(mpt, CE_WARN, "mptsas phy update "
15924 15948 "failed");
15925 15949 goto smp_create_done;
15926 15950 }
15927 15951 mutex_exit(&mpt->m_mutex);
15928 15952
15929 15953 mptsas_smhba_set_all_phy_props(mpt, pdip, numphys, phy_mask,
15930 15954 &attached_devhdl);
15931 15955
15932 15956 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
15933 15957 MPTSAS_NUM_PHYS, numphys) !=
15934 15958 DDI_PROP_SUCCESS) {
15935 15959 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
15936 15960 MPTSAS_NUM_PHYS);
15937 15961 mptsas_log(mpt, CE_WARN, "mptsas update "
15938 15962 "num phys props failed");
15939 15963 goto smp_create_done;
15940 15964 }
15941 15965 /*
15942 15966 * Add parent's props for SMHBA support
15943 15967 */
15944 15968 if (ddi_prop_update_string(DDI_DEV_T_NONE, pdip,
15945 15969 SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15946 15970 DDI_PROP_SUCCESS) {
15947 15971 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
15948 15972 SCSI_ADDR_PROP_ATTACHED_PORT);
15949 15973 mptsas_log(mpt, CE_WARN, "mptsas update iport"
15950 15974 "attached-port failed");
15951 15975 goto smp_create_done;
15952 15976 }
15953 15977
15954 15978 smp_create_done:
15955 15979 /*
15956 15980 * If props were setup ok, online the lun
15957 15981 */
15958 15982 if (ndi_rtn == NDI_SUCCESS) {
15959 15983 /*
15960 15984 * Try to online the new node
15961 15985 */
15962 15986 ndi_rtn = ndi_devi_online(*smp_dip, NDI_ONLINE_ATTACH);
15963 15987 }
15964 15988
15965 15989 /*
15966 15990 * If success set rtn flag, else unwire alloc'd lun
15967 15991 */
15968 15992 if (ndi_rtn != NDI_SUCCESS) {
15969 15993 NDBG12(("mptsas unable to online "
15970 15994 "SMP target %s", wwn_str));
15971 15995 ndi_prop_remove_all(*smp_dip);
15972 15996 (void) ndi_devi_free(*smp_dip);
15973 15997 }
15974 15998 }
15975 15999
15976 16000 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15977 16001 }
15978 16002
15979 16003 /* smp transport routine */
15980 16004 static int mptsas_smp_start(struct smp_pkt *smp_pkt)
15981 16005 {
15982 16006 uint64_t wwn;
15983 16007 Mpi2SmpPassthroughRequest_t req;
15984 16008 Mpi2SmpPassthroughReply_t rep;
15985 16009 uint32_t direction = 0;
15986 16010 mptsas_t *mpt;
15987 16011 int ret;
15988 16012 uint64_t tmp64;
15989 16013
15990 16014 mpt = (mptsas_t *)smp_pkt->smp_pkt_address->
15991 16015 smp_a_hba_tran->smp_tran_hba_private;
15992 16016
15993 16017 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
15994 16018 /*
15995 16019 * Need to compose a SMP request message
15996 16020 * and call mptsas_do_passthru() function
15997 16021 */
15998 16022 bzero(&req, sizeof (req));
15999 16023 bzero(&rep, sizeof (rep));
16000 16024 req.PassthroughFlags = 0;
16001 16025 req.PhysicalPort = 0xff;
16002 16026 req.ChainOffset = 0;
16003 16027 req.Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
16004 16028
16005 16029 if ((smp_pkt->smp_pkt_reqsize & 0xffff0000ul) != 0) {
16006 16030 smp_pkt->smp_pkt_reason = ERANGE;
16007 16031 return (DDI_FAILURE);
16008 16032 }
16009 16033 req.RequestDataLength = LE_16((uint16_t)(smp_pkt->smp_pkt_reqsize - 4));
16010 16034
16011 16035 req.MsgFlags = 0;
16012 16036 tmp64 = LE_64(wwn);
16013 16037 bcopy(&tmp64, &req.SASAddress, SAS_WWN_BYTE_SIZE);
16014 16038 if (smp_pkt->smp_pkt_rspsize > 0) {
16015 16039 direction |= MPTSAS_PASS_THRU_DIRECTION_READ;
16016 16040 }
16017 16041 if (smp_pkt->smp_pkt_reqsize > 0) {
16018 16042 direction |= MPTSAS_PASS_THRU_DIRECTION_WRITE;
16019 16043 }
16020 16044
16021 16045 mutex_enter(&mpt->m_mutex);
16022 16046 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep,
16023 16047 (uint8_t *)smp_pkt->smp_pkt_rsp,
16024 16048 offsetof(Mpi2SmpPassthroughRequest_t, SGL), sizeof (rep),
16025 16049 smp_pkt->smp_pkt_rspsize - 4, direction,
16026 16050 (uint8_t *)smp_pkt->smp_pkt_req, smp_pkt->smp_pkt_reqsize - 4,
16027 16051 smp_pkt->smp_pkt_timeout, FKIOCTL);
16028 16052 mutex_exit(&mpt->m_mutex);
16029 16053 if (ret != 0) {
16030 16054 cmn_err(CE_WARN, "smp_start do passthru error %d", ret);
16031 16055 smp_pkt->smp_pkt_reason = (uchar_t)(ret);
16032 16056 return (DDI_FAILURE);
16033 16057 }
16034 16058 /* do passthrough success, check the smp status */
16035 16059 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
16036 16060 switch (LE_16(rep.IOCStatus)) {
16037 16061 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
16038 16062 smp_pkt->smp_pkt_reason = ENODEV;
16039 16063 break;
16040 16064 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
16041 16065 smp_pkt->smp_pkt_reason = EOVERFLOW;
16042 16066 break;
16043 16067 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
16044 16068 smp_pkt->smp_pkt_reason = EIO;
16045 16069 break;
16046 16070 default:
16047 16071 mptsas_log(mpt, CE_NOTE, "smp_start: get unknown ioc"
16048 16072 "status:%x", LE_16(rep.IOCStatus));
16049 16073 smp_pkt->smp_pkt_reason = EIO;
16050 16074 break;
16051 16075 }
16052 16076 return (DDI_FAILURE);
16053 16077 }
16054 16078 if (rep.SASStatus != MPI2_SASSTATUS_SUCCESS) {
16055 16079 mptsas_log(mpt, CE_NOTE, "smp_start: get error SAS status:%x",
16056 16080 rep.SASStatus);
16057 16081 smp_pkt->smp_pkt_reason = EIO;
16058 16082 return (DDI_FAILURE);
16059 16083 }
16060 16084
16061 16085 return (DDI_SUCCESS);
16062 16086 }
16063 16087
16064 16088 /*
16065 16089 * If we didn't get a match, we need to get sas page0 for each device, and
16066 16090 * untill we get a match. If failed, return NULL
16067 16091 */
16068 16092 static mptsas_target_t *
16069 16093 mptsas_phy_to_tgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint8_t phy)
16070 16094 {
16071 16095 int i, j = 0;
16072 16096 int rval = 0;
16073 16097 uint16_t cur_handle;
16074 16098 uint32_t page_address;
16075 16099 mptsas_target_t *ptgt = NULL;
16076 16100
16077 16101 /*
16078 16102 * PHY named device must be direct attached and attaches to
16079 16103 * narrow port, if the iport is not parent of the device which
16080 16104 * we are looking for.
16081 16105 */
16082 16106 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
16083 16107 if ((1 << i) & phymask)
16084 16108 j++;
16085 16109 }
16086 16110
16087 16111 if (j > 1)
16088 16112 return (NULL);
16089 16113
16090 16114 /*
16091 16115 * Must be a narrow port and single device attached to the narrow port
16092 16116 * So the physical port num of device which is equal to the iport's
16093 16117 * port num is the device what we are looking for.
16094 16118 */
16095 16119
16096 16120 if (mpt->m_phy_info[phy].phy_mask != phymask)
16097 16121 return (NULL);
16098 16122
16099 16123 mutex_enter(&mpt->m_mutex);
16100 16124
16101 16125 ptgt = refhash_linear_search(mpt->m_targets, mptsas_target_eval_nowwn,
16102 16126 &phy);
16103 16127 if (ptgt != NULL) {
16104 16128 mutex_exit(&mpt->m_mutex);
16105 16129 return (ptgt);
16106 16130 }
16107 16131
16108 16132 if (mpt->m_done_traverse_dev) {
16109 16133 mutex_exit(&mpt->m_mutex);
16110 16134 return (NULL);
16111 16135 }
16112 16136
16113 16137 /* If didn't get a match, come here */
16114 16138 cur_handle = mpt->m_dev_handle;
16115 16139 for (; ; ) {
16116 16140 ptgt = NULL;
16117 16141 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
16118 16142 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)cur_handle;
16119 16143 rval = mptsas_get_target_device_info(mpt, page_address,
16120 16144 &cur_handle, &ptgt);
16121 16145 if ((rval == DEV_INFO_FAIL_PAGE0) ||
16122 16146 (rval == DEV_INFO_FAIL_ALLOC)) {
16123 16147 break;
16124 16148 }
16125 16149 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
16126 16150 (rval == DEV_INFO_PHYS_DISK)) {
16127 16151 continue;
16128 16152 }
16129 16153 mpt->m_dev_handle = cur_handle;
16130 16154
16131 16155 if ((ptgt->m_addr.mta_wwn == 0) && (ptgt->m_phynum == phy)) {
16132 16156 break;
16133 16157 }
16134 16158 }
16135 16159
16136 16160 mutex_exit(&mpt->m_mutex);
16137 16161 return (ptgt);
16138 16162 }
16139 16163
16140 16164 /*
16141 16165 * The ptgt->m_addr.mta_wwn contains the wwid for each disk.
16142 16166 * For Raid volumes, we need to check m_raidvol[x].m_raidwwid
16143 16167 * If we didn't get a match, we need to get sas page0 for each device, and
16144 16168 * untill we get a match
16145 16169 * If failed, return NULL
16146 16170 */
16147 16171 static mptsas_target_t *
16148 16172 mptsas_wwid_to_ptgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
16149 16173 {
16150 16174 int rval = 0;
16151 16175 uint16_t cur_handle;
16152 16176 uint32_t page_address;
16153 16177 mptsas_target_t *tmp_tgt = NULL;
16154 16178 mptsas_target_addr_t addr;
16155 16179
16156 16180 addr.mta_wwn = wwid;
16157 16181 addr.mta_phymask = phymask;
16158 16182 mutex_enter(&mpt->m_mutex);
16159 16183 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
16160 16184 if (tmp_tgt != NULL) {
16161 16185 mutex_exit(&mpt->m_mutex);
16162 16186 return (tmp_tgt);
16163 16187 }
16164 16188
16165 16189 if (phymask == 0) {
16166 16190 /*
16167 16191 * It's IR volume
16168 16192 */
16169 16193 rval = mptsas_get_raid_info(mpt);
16170 16194 if (rval) {
16171 16195 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
16172 16196 }
16173 16197 mutex_exit(&mpt->m_mutex);
16174 16198 return (tmp_tgt);
16175 16199 }
16176 16200
16177 16201 if (mpt->m_done_traverse_dev) {
16178 16202 mutex_exit(&mpt->m_mutex);
16179 16203 return (NULL);
16180 16204 }
16181 16205
16182 16206 /* If didn't get a match, come here */
16183 16207 cur_handle = mpt->m_dev_handle;
16184 16208 for (;;) {
16185 16209 tmp_tgt = NULL;
16186 16210 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
16187 16211 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | cur_handle;
16188 16212 rval = mptsas_get_target_device_info(mpt, page_address,
16189 16213 &cur_handle, &tmp_tgt);
16190 16214 if ((rval == DEV_INFO_FAIL_PAGE0) ||
16191 16215 (rval == DEV_INFO_FAIL_ALLOC)) {
16192 16216 tmp_tgt = NULL;
16193 16217 break;
16194 16218 }
16195 16219 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
16196 16220 (rval == DEV_INFO_PHYS_DISK)) {
16197 16221 continue;
16198 16222 }
16199 16223 mpt->m_dev_handle = cur_handle;
16200 16224 if ((tmp_tgt->m_addr.mta_wwn) &&
16201 16225 (tmp_tgt->m_addr.mta_wwn == wwid) &&
16202 16226 (tmp_tgt->m_addr.mta_phymask == phymask)) {
16203 16227 break;
16204 16228 }
16205 16229 }
16206 16230
16207 16231 mutex_exit(&mpt->m_mutex);
16208 16232 return (tmp_tgt);
16209 16233 }
16210 16234
16211 16235 static mptsas_smp_t *
16212 16236 mptsas_wwid_to_psmp(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
16213 16237 {
16214 16238 int rval = 0;
16215 16239 uint16_t cur_handle;
16216 16240 uint32_t page_address;
16217 16241 mptsas_smp_t smp_node, *psmp = NULL;
16218 16242 mptsas_target_addr_t addr;
16219 16243
16220 16244 addr.mta_wwn = wwid;
16221 16245 addr.mta_phymask = phymask;
16222 16246 mutex_enter(&mpt->m_mutex);
16223 16247 psmp = refhash_lookup(mpt->m_smp_targets, &addr);
16224 16248 if (psmp != NULL) {
16225 16249 mutex_exit(&mpt->m_mutex);
16226 16250 return (psmp);
16227 16251 }
16228 16252
16229 16253 if (mpt->m_done_traverse_smp) {
16230 16254 mutex_exit(&mpt->m_mutex);
16231 16255 return (NULL);
16232 16256 }
16233 16257
16234 16258 /* If didn't get a match, come here */
16235 16259 cur_handle = mpt->m_smp_devhdl;
16236 16260 for (;;) {
16237 16261 psmp = NULL;
16238 16262 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
16239 16263 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)cur_handle;
16240 16264 rval = mptsas_get_sas_expander_page0(mpt, page_address,
16241 16265 &smp_node);
16242 16266 if (rval != DDI_SUCCESS) {
16243 16267 break;
16244 16268 }
16245 16269 mpt->m_smp_devhdl = cur_handle = smp_node.m_devhdl;
16246 16270 psmp = mptsas_smp_alloc(mpt, &smp_node);
16247 16271 ASSERT(psmp);
16248 16272 if ((psmp->m_addr.mta_wwn) && (psmp->m_addr.mta_wwn == wwid) &&
16249 16273 (psmp->m_addr.mta_phymask == phymask)) {
16250 16274 break;
16251 16275 }
16252 16276 }
16253 16277
16254 16278 mutex_exit(&mpt->m_mutex);
16255 16279 return (psmp);
16256 16280 }
16257 16281
16258 16282 mptsas_target_t *
16259 16283 mptsas_tgt_alloc(mptsas_t *mpt, uint16_t devhdl, uint64_t wwid,
16260 16284 uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum)
16261 16285 {
16262 16286 mptsas_target_t *tmp_tgt = NULL;
16263 16287 mptsas_target_addr_t addr;
16264 16288
16265 16289 addr.mta_wwn = wwid;
16266 16290 addr.mta_phymask = phymask;
16267 16291 tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
16268 16292 if (tmp_tgt != NULL) {
16269 16293 NDBG20(("Hash item already exist"));
16270 16294 tmp_tgt->m_deviceinfo = devinfo;
16271 16295 tmp_tgt->m_devhdl = devhdl; /* XXX - duplicate? */
16272 16296 return (tmp_tgt);
16273 16297 }
16274 16298 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
16275 16299 if (tmp_tgt == NULL) {
16276 16300 cmn_err(CE_WARN, "Fatal, allocated tgt failed");
16277 16301 return (NULL);
16278 16302 }
16279 16303 tmp_tgt->m_devhdl = devhdl;
16280 16304 tmp_tgt->m_addr.mta_wwn = wwid;
16281 16305 tmp_tgt->m_deviceinfo = devinfo;
16282 16306 tmp_tgt->m_addr.mta_phymask = phymask;
16283 16307 tmp_tgt->m_phynum = phynum;
16284 16308 /* Initialized the tgt structure */
16285 16309 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
16286 16310 tmp_tgt->m_qfull_retry_interval =
16287 16311 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
16288 16312 tmp_tgt->m_t_throttle = MAX_THROTTLE;
16289 16313 TAILQ_INIT(&tmp_tgt->m_active_cmdq);
16290 16314
16291 16315 refhash_insert(mpt->m_targets, tmp_tgt);
16292 16316
16293 16317 return (tmp_tgt);
16294 16318 }
16295 16319
16296 16320 static void
16297 16321 mptsas_smp_target_copy(mptsas_smp_t *src, mptsas_smp_t *dst)
16298 16322 {
16299 16323 dst->m_devhdl = src->m_devhdl;
16300 16324 dst->m_deviceinfo = src->m_deviceinfo;
16301 16325 dst->m_pdevhdl = src->m_pdevhdl;
16302 16326 dst->m_pdevinfo = src->m_pdevinfo;
16303 16327 }
16304 16328
16305 16329 static mptsas_smp_t *
16306 16330 mptsas_smp_alloc(mptsas_t *mpt, mptsas_smp_t *data)
16307 16331 {
16308 16332 mptsas_target_addr_t addr;
16309 16333 mptsas_smp_t *ret_data;
16310 16334
16311 16335 addr.mta_wwn = data->m_addr.mta_wwn;
16312 16336 addr.mta_phymask = data->m_addr.mta_phymask;
16313 16337 ret_data = refhash_lookup(mpt->m_smp_targets, &addr);
16314 16338 /*
16315 16339 * If there's already a matching SMP target, update its fields
16316 16340 * in place. Since the address is not changing, it's safe to do
16317 16341 * this. We cannot just bcopy() here because the structure we've
16318 16342 * been given has invalid hash links.
16319 16343 */
16320 16344 if (ret_data != NULL) {
16321 16345 mptsas_smp_target_copy(data, ret_data);
16322 16346 return (ret_data);
16323 16347 }
16324 16348
16325 16349 ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
16326 16350 bcopy(data, ret_data, sizeof (mptsas_smp_t));
16327 16351 refhash_insert(mpt->m_smp_targets, ret_data);
16328 16352 return (ret_data);
16329 16353 }
16330 16354
16331 16355 /*
16332 16356 * Functions for SGPIO LED support
16333 16357 */
16334 16358 static dev_info_t *
16335 16359 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
16336 16360 {
16337 16361 dev_info_t *dip;
16338 16362 int prop;
16339 16363 dip = e_ddi_hold_devi_by_dev(dev, 0);
16340 16364 if (dip == NULL)
16341 16365 return (dip);
16342 16366 prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
16343 16367 "phymask", 0);
16344 16368 *phymask = (mptsas_phymask_t)prop;
16345 16369 ddi_release_devi(dip);
16346 16370 return (dip);
16347 16371 }
16348 16372 static mptsas_target_t *
16349 16373 mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask)
16350 16374 {
16351 16375 uint8_t phynum;
16352 16376 uint64_t wwn;
16353 16377 int lun;
16354 16378 mptsas_target_t *ptgt = NULL;
16355 16379
16356 16380 if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) {
16357 16381 return (NULL);
16358 16382 }
16359 16383 if (addr[0] == 'w') {
16360 16384 ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn);
16361 16385 } else {
16362 16386 ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum);
16363 16387 }
16364 16388 return (ptgt);
16365 16389 }
16366 16390
16367 16391 static int
16368 16392 mptsas_flush_led_status(mptsas_t *mpt, mptsas_target_t *ptgt)
16369 16393 {
16370 16394 uint32_t slotstatus = 0;
16371 16395
16372 16396 /* Build an MPI2 Slot Status based on our view of the world */
16373 16397 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_IDENT - 1)))
16374 16398 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST;
16375 16399 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_FAIL - 1)))
16376 16400 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT;
16377 16401 if (ptgt->m_led_status & (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1)))
16378 16402 slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
16379 16403
16380 16404 /* Write it to the controller */
16381 16405 NDBG14(("mptsas_ioctl: set LED status %x for slot %x",
16382 16406 slotstatus, ptgt->m_slot_num));
16383 16407 return (mptsas_send_sep(mpt, ptgt, &slotstatus,
16384 16408 MPI2_SEP_REQ_ACTION_WRITE_STATUS));
16385 16409 }
16386 16410
16387 16411 /*
16388 16412 * send sep request, use enclosure/slot addressing
16389 16413 */
16390 16414 static int
16391 16415 mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
16392 16416 uint32_t *status, uint8_t act)
16393 16417 {
16394 16418 Mpi2SepRequest_t req;
16395 16419 Mpi2SepReply_t rep;
16396 16420 int ret;
16397 16421
16398 16422 ASSERT(mutex_owned(&mpt->m_mutex));
16399 16423
16400 16424 /*
16401 16425 * We only support SEP control of directly-attached targets, in which
16402 16426 * case the "SEP" we're talking to is a virtual one contained within
16403 16427 * the HBA itself. This is necessary because DA targets typically have
16404 16428 * no other mechanism for LED control. Targets for which a separate
16405 16429 * enclosure service processor exists should be controlled via ses(7d)
16406 16430 * or sgen(7d). Furthermore, since such requests can time out, they
16407 16431 * should be made in user context rather than in response to
16408 16432 * asynchronous fabric changes.
16409 16433 *
16410 16434 * In addition, we do not support this operation for RAID volumes,
16411 16435 * since there is no slot associated with them.
16412 16436 */
16413 16437 if (!(ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED) ||
16414 16438 ptgt->m_addr.mta_phymask == 0) {
16415 16439 return (ENOTTY);
16416 16440 }
16417 16441
16418 16442 bzero(&req, sizeof (req));
16419 16443 bzero(&rep, sizeof (rep));
16420 16444
16421 16445 req.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
16422 16446 req.Action = act;
16423 16447 req.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
16424 16448 req.EnclosureHandle = LE_16(ptgt->m_enclosure);
16425 16449 req.Slot = LE_16(ptgt->m_slot_num);
16426 16450 if (act == MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16427 16451 req.SlotStatus = LE_32(*status);
16428 16452 }
16429 16453 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
16430 16454 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
16431 16455 if (ret != 0) {
16432 16456 mptsas_log(mpt, CE_NOTE, "mptsas_send_sep: passthru SEP "
16433 16457 "Processor Request message error %d", ret);
16434 16458 return (ret);
16435 16459 }
16436 16460 /* do passthrough success, check the ioc status */
16437 16461 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
16438 16462 mptsas_log(mpt, CE_NOTE, "send_sep act %x: ioc "
16439 16463 "status:%x loginfo %x", act, LE_16(rep.IOCStatus),
16440 16464 LE_32(rep.IOCLogInfo));
16441 16465 switch (LE_16(rep.IOCStatus) & MPI2_IOCSTATUS_MASK) {
16442 16466 case MPI2_IOCSTATUS_INVALID_FUNCTION:
16443 16467 case MPI2_IOCSTATUS_INVALID_VPID:
16444 16468 case MPI2_IOCSTATUS_INVALID_FIELD:
16445 16469 case MPI2_IOCSTATUS_INVALID_STATE:
16446 16470 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
16447 16471 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
16448 16472 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
16449 16473 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
16450 16474 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
16451 16475 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
16452 16476 return (EINVAL);
16453 16477 case MPI2_IOCSTATUS_BUSY:
16454 16478 return (EBUSY);
16455 16479 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
16456 16480 return (EAGAIN);
16457 16481 case MPI2_IOCSTATUS_INVALID_SGL:
16458 16482 case MPI2_IOCSTATUS_INTERNAL_ERROR:
16459 16483 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
16460 16484 default:
16461 16485 return (EIO);
16462 16486 }
16463 16487 }
16464 16488 if (act != MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16465 16489 *status = LE_32(rep.SlotStatus);
16466 16490 }
16467 16491
16468 16492 return (0);
16469 16493 }
16470 16494
16471 16495 int
16472 16496 mptsas_dma_addr_create(mptsas_t *mpt, ddi_dma_attr_t dma_attr,
16473 16497 ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp, caddr_t *dma_memp,
16474 16498 uint32_t alloc_size, ddi_dma_cookie_t *cookiep)
16475 16499 {
16476 16500 ddi_dma_cookie_t new_cookie;
16477 16501 size_t alloc_len;
16478 16502 uint_t ncookie;
16479 16503
16480 16504 if (cookiep == NULL)
16481 16505 cookiep = &new_cookie;
16482 16506
16483 16507 if (ddi_dma_alloc_handle(mpt->m_dip, &dma_attr, DDI_DMA_SLEEP,
16484 16508 NULL, dma_hdp) != DDI_SUCCESS) {
16485 16509 return (FALSE);
16486 16510 }
16487 16511
16488 16512 if (ddi_dma_mem_alloc(*dma_hdp, alloc_size, &mpt->m_dev_acc_attr,
16489 16513 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, dma_memp, &alloc_len,
16490 16514 acc_hdp) != DDI_SUCCESS) {
16491 16515 ddi_dma_free_handle(dma_hdp);
16492 16516 *dma_hdp = NULL;
16493 16517 return (FALSE);
16494 16518 }
16495 16519
16496 16520 if (ddi_dma_addr_bind_handle(*dma_hdp, NULL, *dma_memp, alloc_len,
16497 16521 (DDI_DMA_RDWR | DDI_DMA_CONSISTENT), DDI_DMA_SLEEP, NULL,
16498 16522 cookiep, &ncookie) != DDI_DMA_MAPPED) {
16499 16523 (void) ddi_dma_mem_free(acc_hdp);
16500 16524 ddi_dma_free_handle(dma_hdp);
16501 16525 *dma_hdp = NULL;
16502 16526 return (FALSE);
16503 16527 }
16504 16528
16505 16529 return (TRUE);
16506 16530 }
16507 16531
16508 16532 void
16509 16533 mptsas_dma_addr_destroy(ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp)
16510 16534 {
16511 16535 if (*dma_hdp == NULL)
16512 16536 return;
16513 16537
16514 16538 (void) ddi_dma_unbind_handle(*dma_hdp);
16515 16539 (void) ddi_dma_mem_free(acc_hdp);
16516 16540 ddi_dma_free_handle(dma_hdp);
16517 16541 *dma_hdp = NULL;
16518 16542 }
↓ open down ↓ |
3769 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX