Print this page
XXXX Nexenta fixes for mpt_sas(7d)
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c
+++ new/usr/src/uts/common/io/scsi/adapters/mpt_sas/mptsas.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
25 25 */
26 26
27 27 /*
28 28 * Copyright (c) 2000 to 2010, LSI Corporation.
29 29 * All rights reserved.
30 30 *
31 31 * Redistribution and use in source and binary forms of all code within
32 32 * this file that is exclusively owned by LSI, with or without
33 33 * modification, is permitted provided that, in addition to the CDDL 1.0
34 34 * License requirements, the following conditions are met:
35 35 *
36 36 * Neither the name of the author nor the names of its contributors may be
37 37 * used to endorse or promote products derived from this software without
38 38 * specific prior written permission.
39 39 *
40 40 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41 41 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42 42 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
43 43 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
44 44 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
45 45 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
46 46 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
47 47 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
48 48 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
49 49 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
50 50 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
51 51 * DAMAGE.
52 52 */
53 53
54 54 /*
55 55 * mptsas - This is a driver based on LSI Logic's MPT2.0 interface.
56 56 *
57 57 */
58 58
59 59 #if defined(lint) || defined(DEBUG)
↓ open down ↓ |
59 lines elided |
↑ open up ↑ |
60 60 #define MPTSAS_DEBUG
61 61 #endif
62 62
63 63 /*
64 64 * standard header files.
65 65 */
66 66 #include <sys/note.h>
67 67 #include <sys/scsi/scsi.h>
68 68 #include <sys/pci.h>
69 69 #include <sys/file.h>
70 -#include <sys/cpuvar.h>
71 70 #include <sys/policy.h>
72 71 #include <sys/sysevent.h>
73 72 #include <sys/sysevent/eventdefs.h>
74 73 #include <sys/sysevent/dr.h>
75 74 #include <sys/sata/sata_defs.h>
76 75 #include <sys/scsi/generic/sas.h>
77 76 #include <sys/scsi/impl/scsi_sas.h>
78 77
79 78 #pragma pack(1)
80 79 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_type.h>
81 80 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2.h>
82 81 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_cnfg.h>
83 82 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_init.h>
84 83 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_ioc.h>
85 84 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_sas.h>
86 85 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_tool.h>
87 86 #include <sys/scsi/adapters/mpt_sas/mpi/mpi2_raid.h>
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
88 87 #pragma pack()
89 88
90 89 /*
91 90 * private header files.
92 91 *
93 92 */
94 93 #include <sys/scsi/impl/scsi_reset_notify.h>
95 94 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
96 95 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
97 96 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
98 -
99 97 #include <sys/raidioctl.h>
100 98
101 99 #include <sys/fs/dv_node.h> /* devfs_clean */
102 100
103 101 /*
104 102 * FMA header files
105 103 */
106 104 #include <sys/ddifm.h>
107 105 #include <sys/fm/protocol.h>
108 106 #include <sys/fm/util.h>
109 107 #include <sys/fm/io/ddi.h>
110 108
111 109 /*
112 - * For anyone who would modify the code in mptsas_driver, it must be awared
113 - * that from snv_145 where CR6910752(mpt_sas driver performance can be
114 - * improved) is integrated, the per_instance mutex m_mutex is not hold
115 - * in the key IO code path, including mptsas_scsi_start(), mptsas_intr()
116 - * and all of the recursive functions called in them, so don't
117 - * make it for granted that all operations are sync/exclude correctly. Before
118 - * doing any modification in key code path, and even other code path such as
119 - * DR, watchsubr, ioctl, passthrough etc, make sure the elements modified have
120 - * no releationship to elements shown in the fastpath
121 - * (function mptsas_handle_io_fastpath()) in ISR and its recursive functions.
122 - * otherwise, you have to use the new introduced mutex to protect them.
123 - * As to how to do correctly, refer to the comments in mptsas_intr().
124 - */
125 -
126 -/*
127 110 * autoconfiguration data and routines.
128 111 */
129 112 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
130 113 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
131 114 static int mptsas_power(dev_info_t *dip, int component, int level);
132 115
133 116 /*
134 117 * cb_ops function
135 118 */
136 119 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
137 120 cred_t *credp, int *rval);
138 121 #ifdef __sparc
139 122 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd);
140 123 #else /* __sparc */
141 124 static int mptsas_quiesce(dev_info_t *devi);
142 125 #endif /* __sparc */
143 126
144 127 /*
145 128 * Resource initilaization for hardware
146 129 */
147 130 static void mptsas_setup_cmd_reg(mptsas_t *mpt);
148 131 static void mptsas_disable_bus_master(mptsas_t *mpt);
149 132 static void mptsas_hba_fini(mptsas_t *mpt);
150 133 static void mptsas_cfg_fini(mptsas_t *mptsas_blkp);
151 134 static int mptsas_hba_setup(mptsas_t *mpt);
152 135 static void mptsas_hba_teardown(mptsas_t *mpt);
153 136 static int mptsas_config_space_init(mptsas_t *mpt);
154 137 static void mptsas_config_space_fini(mptsas_t *mpt);
155 138 static void mptsas_iport_register(mptsas_t *mpt);
156 139 static int mptsas_smp_setup(mptsas_t *mpt);
157 140 static void mptsas_smp_teardown(mptsas_t *mpt);
158 141 static int mptsas_cache_create(mptsas_t *mpt);
159 142 static void mptsas_cache_destroy(mptsas_t *mpt);
160 143 static int mptsas_alloc_request_frames(mptsas_t *mpt);
161 144 static int mptsas_alloc_reply_frames(mptsas_t *mpt);
162 145 static int mptsas_alloc_free_queue(mptsas_t *mpt);
163 146 static int mptsas_alloc_post_queue(mptsas_t *mpt);
164 147 static void mptsas_alloc_reply_args(mptsas_t *mpt);
165 148 static int mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
166 149 static void mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
167 150 static int mptsas_init_chip(mptsas_t *mpt, int first_time);
168 151
169 152 /*
170 153 * SCSA function prototypes
171 154 */
172 155 static int mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
173 156 static int mptsas_scsi_reset(struct scsi_address *ap, int level);
174 157 static int mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
175 158 static int mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
176 159 static int mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
177 160 int tgtonly);
178 161 static void mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
179 162 static struct scsi_pkt *mptsas_scsi_init_pkt(struct scsi_address *ap,
180 163 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
181 164 int tgtlen, int flags, int (*callback)(), caddr_t arg);
182 165 static void mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
183 166 static void mptsas_scsi_destroy_pkt(struct scsi_address *ap,
184 167 struct scsi_pkt *pkt);
185 168 static int mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
186 169 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
187 170 static void mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
188 171 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
189 172 static int mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
190 173 void (*callback)(caddr_t), caddr_t arg);
191 174 static int mptsas_get_name(struct scsi_device *sd, char *name, int len);
192 175 static int mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len);
193 176 static int mptsas_scsi_quiesce(dev_info_t *dip);
194 177 static int mptsas_scsi_unquiesce(dev_info_t *dip);
195 178 static int mptsas_bus_config(dev_info_t *pdip, uint_t flags,
196 179 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
197 180
198 181 /*
199 182 * SMP functions
200 183 */
201 184 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
202 185
203 186 /*
204 187 * internal function prototypes.
205 188 */
206 189 static void mptsas_list_add(mptsas_t *mpt);
207 190 static void mptsas_list_del(mptsas_t *mpt);
208 191
↓ open down ↓ |
72 lines elided |
↑ open up ↑ |
209 192 static int mptsas_quiesce_bus(mptsas_t *mpt);
210 193 static int mptsas_unquiesce_bus(mptsas_t *mpt);
211 194
212 195 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
213 196 static void mptsas_free_handshake_msg(mptsas_t *mpt);
214 197
215 198 static void mptsas_ncmds_checkdrain(void *arg);
216 199
217 200 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
218 201 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
202 +static int mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
203 +static void mptsas_accept_tx_waitq(mptsas_t *mpt);
219 204
220 205 static int mptsas_do_detach(dev_info_t *dev);
221 206 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
222 207 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
223 208 struct scsi_pkt *pkt);
224 209 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp);
225 210
226 211 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
227 212 static void mptsas_handle_event(void *args);
228 213 static int mptsas_handle_event_sync(void *args);
229 214 static void mptsas_handle_dr(void *args);
230 215 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
231 216 dev_info_t *pdip);
232 217
233 218 static void mptsas_restart_cmd(void *);
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
234 219
235 220 static void mptsas_flush_hba(mptsas_t *mpt);
236 221 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
237 222 uint8_t tasktype);
238 223 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
239 224 uchar_t reason, uint_t stat);
240 225
241 226 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
242 227 static void mptsas_process_intr(mptsas_t *mpt,
243 228 pMpi2ReplyDescriptorsUnion_t reply_desc_union);
244 -static int mptsas_handle_io_fastpath(mptsas_t *mpt, uint16_t SMID);
245 229 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
246 230 pMpi2ReplyDescriptorsUnion_t reply_desc);
247 231 static void mptsas_handle_address_reply(mptsas_t *mpt,
248 232 pMpi2ReplyDescriptorsUnion_t reply_desc);
249 233 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
250 234 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
251 235 uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
252 236
253 237 static void mptsas_watch(void *arg);
254 238 static void mptsas_watchsubr(mptsas_t *mpt);
255 239 static void mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl);
240 +static void mptsas_kill_target(mptsas_t *mpt, mptsas_target_t *ptgt);
256 241
257 242 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
258 243 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
259 244 uint8_t *data, uint32_t request_size, uint32_t reply_size,
260 245 uint32_t data_size, uint32_t direction, uint8_t *dataout,
261 246 uint32_t dataout_size, short timeout, int mode);
262 247 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
263 248
264 249 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt,
265 250 uint32_t unique_id);
266 251 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd);
267 252 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt,
268 253 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
269 254 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt,
270 255 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
271 256 uint32_t diag_type);
272 257 static int mptsas_diag_register(mptsas_t *mpt,
273 258 mptsas_fw_diag_register_t *diag_register, uint32_t *return_code);
274 259 static int mptsas_diag_unregister(mptsas_t *mpt,
275 260 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
276 261 static int mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
277 262 uint32_t *return_code);
278 263 static int mptsas_diag_read_buffer(mptsas_t *mpt,
279 264 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
280 265 uint32_t *return_code, int ioctl_mode);
281 266 static int mptsas_diag_release(mptsas_t *mpt,
282 267 mptsas_fw_diag_release_t *diag_release, uint32_t *return_code);
283 268 static int mptsas_do_diag_action(mptsas_t *mpt, uint32_t action,
284 269 uint8_t *diag_action, uint32_t length, uint32_t *return_code,
285 270 int ioctl_mode);
286 271 static int mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *data,
287 272 int mode);
288 273
289 274 static int mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
290 275 int cmdlen, int tgtlen, int statuslen, int kf);
291 276 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
292 277
293 278 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
294 279 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
295 280
296 281 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
297 282 int kmflags);
298 283 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
299 284
300 285 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
↓ open down ↓ |
35 lines elided |
↑ open up ↑ |
301 286 mptsas_cmd_t *cmd);
302 287 static void mptsas_check_task_mgt(mptsas_t *mpt,
303 288 pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
304 289 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
305 290 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
306 291 int *resid);
307 292
308 293 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
309 294 static void mptsas_free_active_slots(mptsas_t *mpt);
310 295 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
311 -static int mptsas_start_cmd0(mptsas_t *mpt, mptsas_cmd_t *cmd);
312 296
313 297 static void mptsas_restart_hba(mptsas_t *mpt);
298 +static void mptsas_restart_waitq(mptsas_t *mpt);
314 299
315 300 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
316 301 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);
317 -static inline void mptsas_doneq_add0(mptsas_t *mpt, mptsas_cmd_t *cmd);
318 302 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
319 303
320 304 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
321 305 static void mptsas_doneq_empty(mptsas_t *mpt);
322 306 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
323 307
324 308 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
325 309 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
310 +static mptsas_cmd_t *mptsas_tx_waitq_rm(mptsas_t *mpt);
311 +static void mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
326 312
313 +
327 314 static void mptsas_start_watch_reset_delay();
328 315 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
329 316 static void mptsas_watch_reset_delay(void *arg);
330 317 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
331 318
332 -static int mptsas_outstanding_cmds_n(mptsas_t *mpt);
333 319 /*
334 320 * helper functions
335 321 */
336 322 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
337 323
338 324 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
339 325 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
340 326 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
341 327 int lun);
342 328 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
343 329 int lun);
344 330 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
345 331 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
346 332
347 333 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
348 334 int *lun);
349 335 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
350 336
351 337 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt, int phymask,
352 338 uint8_t phy);
353 339 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask,
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
354 340 uint64_t wwid);
355 341 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask,
356 342 uint64_t wwid);
357 343
358 344 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
359 345 uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
360 346
361 347 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
362 348 uint16_t *handle, mptsas_target_t **pptgt);
363 349 static void mptsas_update_phymask(mptsas_t *mpt);
364 -static inline void mptsas_remove_cmd0(mptsas_t *mpt, mptsas_cmd_t *cmd);
365 350
366 -static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
367 - uint32_t *status, uint8_t cmd);
368 351 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
369 352 mptsas_phymask_t *phymask);
370 -static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr,
371 - mptsas_phymask_t phymask);
372 -static int mptsas_set_led_status(mptsas_t *mpt, mptsas_target_t *ptgt,
373 - uint32_t slotstatus);
374 353
375 354
376 355 /*
377 356 * Enumeration / DR functions
378 357 */
379 358 static void mptsas_config_all(dev_info_t *pdip);
380 359 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
381 360 dev_info_t **lundip);
382 361 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
383 362 dev_info_t **lundip);
384 363
385 364 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
386 365 static int mptsas_offline_target(dev_info_t *pdip, char *name);
387 366
388 367 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
389 368 dev_info_t **dip);
390 369
391 370 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
392 371 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
393 372 dev_info_t **dip, mptsas_target_t *ptgt);
394 373
395 374 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
396 375 dev_info_t **dip, mptsas_target_t *ptgt, int lun);
397 376
398 377 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
399 378 char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
400 379 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
401 380 char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
402 381 int lun);
403 382
404 383 static void mptsas_offline_missed_luns(dev_info_t *pdip,
405 384 uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
406 385 static int mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
407 386 mdi_pathinfo_t *rpip, uint_t flags);
408 387
409 388 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
410 389 dev_info_t **smp_dip);
411 390 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
412 391 uint_t flags);
413 392
414 393 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
415 394 int mode, int *rval);
416 395 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
417 396 int mode, int *rval);
418 397 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
419 398 int mode, int *rval);
420 399 static void mptsas_record_event(void *args);
421 400 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
422 401 int mode);
423 402
↓ open down ↓ |
40 lines elided |
↑ open up ↑ |
424 403 static void mptsas_hash_init(mptsas_hash_table_t *hashtab);
425 404 static void mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen);
426 405 static void mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data);
427 406 static void * mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1,
428 407 mptsas_phymask_t key2);
429 408 static void * mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1,
430 409 mptsas_phymask_t key2);
431 410 static void * mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos);
432 411
433 412 mptsas_target_t *mptsas_tgt_alloc(mptsas_hash_table_t *, uint16_t, uint64_t,
434 - uint32_t, mptsas_phymask_t, uint8_t, mptsas_t *);
413 + uint32_t, mptsas_phymask_t, uint8_t);
435 414 static mptsas_smp_t *mptsas_smp_alloc(mptsas_hash_table_t *hashtab,
436 415 mptsas_smp_t *data);
437 416 static void mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
438 417 mptsas_phymask_t phymask);
439 418 static void mptsas_tgt_free(mptsas_hash_table_t *, uint64_t, mptsas_phymask_t);
440 419 static void * mptsas_search_by_devhdl(mptsas_hash_table_t *, uint16_t);
441 420 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
442 421 dev_info_t **smp_dip);
443 422
444 423 /*
445 424 * Power management functions
446 425 */
447 426 static int mptsas_get_pci_cap(mptsas_t *mpt);
448 427 static int mptsas_init_pm(mptsas_t *mpt);
449 428
450 429 /*
451 430 * MPT MSI tunable:
452 431 *
453 432 * By default MSI is enabled on all supported platforms.
454 433 */
455 434 boolean_t mptsas_enable_msi = B_TRUE;
456 435 boolean_t mptsas_physical_bind_failed_page_83 = B_FALSE;
457 436
458 437 static int mptsas_register_intrs(mptsas_t *);
459 438 static void mptsas_unregister_intrs(mptsas_t *);
460 439 static int mptsas_add_intrs(mptsas_t *, int);
461 440 static void mptsas_rem_intrs(mptsas_t *);
462 441
463 442 /*
464 443 * FMA Prototypes
465 444 */
466 445 static void mptsas_fm_init(mptsas_t *mpt);
467 446 static void mptsas_fm_fini(mptsas_t *mpt);
468 447 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
469 448
470 449 extern pri_t minclsyspri, maxclsyspri;
471 450
472 451 /*
473 452 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
↓ open down ↓ |
29 lines elided |
↑ open up ↑ |
474 453 * under this device that the paths to a physical device are created when
475 454 * MPxIO is used.
476 455 */
477 456 extern dev_info_t *scsi_vhci_dip;
478 457
479 458 /*
480 459 * Tunable timeout value for Inquiry VPD page 0x83
481 460 * By default the value is 30 seconds.
482 461 */
483 462 int mptsas_inq83_retry_timeout = 30;
463 +/*
464 + * Maximum number of command timeouts (0 - 255) considered acceptable.
465 + */
466 +int mptsas_timeout_threshold = 2;
467 +/*
468 + * Timeouts exceeding threshold within this period are considered excessive.
469 + */
470 +int mptsas_timeout_interval = 30;
484 471
485 472 /*
486 473 * This is used to allocate memory for message frame storage, not for
487 474 * data I/O DMA. All message frames must be stored in the first 4G of
488 475 * physical memory.
489 476 */
490 477 ddi_dma_attr_t mptsas_dma_attrs = {
491 478 DMA_ATTR_V0, /* attribute layout version */
492 479 0x0ull, /* address low - should be 0 (longlong) */
493 480 0xffffffffull, /* address high - 32-bit max range */
494 481 0x00ffffffull, /* count max - max DMA object size */
495 482 4, /* allocation alignment requirements */
496 483 0x78, /* burstsizes - binary encoded values */
497 484 1, /* minxfer - gran. of DMA engine */
498 485 0x00ffffffull, /* maxxfer - gran. of DMA engine */
499 486 0xffffffffull, /* max segment size (DMA boundary) */
500 487 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
501 488 512, /* granularity - device transfer size */
502 489 0 /* flags, set to 0 */
503 490 };
504 491
505 492 /*
506 493 * This is used for data I/O DMA memory allocation. (full 64-bit DMA
507 494 * physical addresses are supported.)
508 495 */
509 496 ddi_dma_attr_t mptsas_dma_attrs64 = {
510 497 DMA_ATTR_V0, /* attribute layout version */
511 498 0x0ull, /* address low - should be 0 (longlong) */
512 499 0xffffffffffffffffull, /* address high - 64-bit max */
513 500 0x00ffffffull, /* count max - max DMA object size */
514 501 4, /* allocation alignment requirements */
515 502 0x78, /* burstsizes - binary encoded values */
516 503 1, /* minxfer - gran. of DMA engine */
517 504 0x00ffffffull, /* maxxfer - gran. of DMA engine */
518 505 0xffffffffull, /* max segment size (DMA boundary) */
519 506 MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length */
520 507 512, /* granularity - device transfer size */
521 508 DDI_DMA_RELAXED_ORDERING /* flags, enable relaxed ordering */
522 509 };
523 510
524 511 ddi_device_acc_attr_t mptsas_dev_attr = {
525 512 DDI_DEVICE_ATTR_V1,
526 513 DDI_STRUCTURE_LE_ACC,
527 514 DDI_STRICTORDER_ACC,
528 515 DDI_DEFAULT_ACC
529 516 };
530 517
531 518 static struct cb_ops mptsas_cb_ops = {
532 519 scsi_hba_open, /* open */
533 520 scsi_hba_close, /* close */
534 521 nodev, /* strategy */
535 522 nodev, /* print */
536 523 nodev, /* dump */
537 524 nodev, /* read */
538 525 nodev, /* write */
539 526 mptsas_ioctl, /* ioctl */
540 527 nodev, /* devmap */
541 528 nodev, /* mmap */
542 529 nodev, /* segmap */
543 530 nochpoll, /* chpoll */
544 531 ddi_prop_op, /* cb_prop_op */
545 532 NULL, /* streamtab */
546 533 D_MP, /* cb_flag */
547 534 CB_REV, /* rev */
548 535 nodev, /* aread */
549 536 nodev /* awrite */
550 537 };
551 538
552 539 static struct dev_ops mptsas_ops = {
553 540 DEVO_REV, /* devo_rev, */
554 541 0, /* refcnt */
555 542 ddi_no_info, /* info */
556 543 nulldev, /* identify */
557 544 nulldev, /* probe */
558 545 mptsas_attach, /* attach */
559 546 mptsas_detach, /* detach */
560 547 #ifdef __sparc
561 548 mptsas_reset,
562 549 #else
563 550 nodev, /* reset */
564 551 #endif /* __sparc */
565 552 &mptsas_cb_ops, /* driver operations */
566 553 NULL, /* bus operations */
567 554 mptsas_power, /* power management */
568 555 #ifdef __sparc
569 556 ddi_quiesce_not_needed
570 557 #else
571 558 mptsas_quiesce /* quiesce */
572 559 #endif /* __sparc */
573 560 };
574 561
575 562
576 563 #define MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24"
577 564
578 565 static struct modldrv modldrv = {
579 566 &mod_driverops, /* Type of module. This one is a driver */
580 567 MPTSAS_MOD_STRING, /* Name of the module. */
581 568 &mptsas_ops, /* driver ops */
582 569 };
583 570
584 571 static struct modlinkage modlinkage = {
585 572 MODREV_1, &modldrv, NULL
586 573 };
587 574 #define TARGET_PROP "target"
588 575 #define LUN_PROP "lun"
589 576 #define LUN64_PROP "lun64"
590 577 #define SAS_PROP "sas-mpt"
591 578 #define MDI_GUID "wwn"
592 579 #define NDI_GUID "guid"
593 580 #define MPTSAS_DEV_GONE "mptsas_dev_gone"
594 581
595 582 /*
596 583 * Local static data
597 584 */
598 585 #if defined(MPTSAS_DEBUG)
599 586 uint32_t mptsas_debug_flags = 0;
600 587 #endif /* defined(MPTSAS_DEBUG) */
601 588 uint32_t mptsas_debug_resets = 0;
602 589
603 590 static kmutex_t mptsas_global_mutex;
604 591 static void *mptsas_state; /* soft state ptr */
605 592 static krwlock_t mptsas_global_rwlock;
606 593
607 594 static kmutex_t mptsas_log_mutex;
608 595 static char mptsas_log_buf[256];
609 596 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex, mptsas_log_buf))
610 597
611 598 static mptsas_t *mptsas_head, *mptsas_tail;
612 599 static clock_t mptsas_scsi_watchdog_tick;
613 600 static clock_t mptsas_tick;
614 601 static timeout_id_t mptsas_reset_watch;
615 602 static timeout_id_t mptsas_timeout_id;
616 603 static int mptsas_timeouts_enabled = 0;
617 604 /*
618 605 * warlock directives
619 606 */
620 607 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \
621 608 mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status))
622 609 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt))
623 610 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
624 611 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private))
625 612 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private))
626 613
627 614 /*
628 615 * SM - HBA statics
629 616 */
630 617 char *mptsas_driver_rev = MPTSAS_MOD_STRING;
631 618
632 619 #ifdef MPTSAS_DEBUG
633 620 void debug_enter(char *);
634 621 #endif
635 622
636 623 /*
637 624 * Notes:
638 625 * - scsi_hba_init(9F) initializes SCSI HBA modules
639 626 * - must call scsi_hba_fini(9F) if modload() fails
640 627 */
641 628 int
642 629 _init(void)
643 630 {
644 631 int status;
645 632 /* CONSTCOND */
646 633 ASSERT(NO_COMPETING_THREADS);
647 634
648 635 NDBG0(("_init"));
649 636
650 637 status = ddi_soft_state_init(&mptsas_state, MPTSAS_SIZE,
651 638 MPTSAS_INITIAL_SOFT_SPACE);
652 639 if (status != 0) {
653 640 return (status);
654 641 }
655 642
656 643 if ((status = scsi_hba_init(&modlinkage)) != 0) {
657 644 ddi_soft_state_fini(&mptsas_state);
658 645 return (status);
659 646 }
660 647
661 648 mutex_init(&mptsas_global_mutex, NULL, MUTEX_DRIVER, NULL);
662 649 rw_init(&mptsas_global_rwlock, NULL, RW_DRIVER, NULL);
663 650 mutex_init(&mptsas_log_mutex, NULL, MUTEX_DRIVER, NULL);
664 651
665 652 if ((status = mod_install(&modlinkage)) != 0) {
666 653 mutex_destroy(&mptsas_log_mutex);
667 654 rw_destroy(&mptsas_global_rwlock);
668 655 mutex_destroy(&mptsas_global_mutex);
669 656 ddi_soft_state_fini(&mptsas_state);
670 657 scsi_hba_fini(&modlinkage);
671 658 }
672 659
673 660 return (status);
674 661 }
675 662
676 663 /*
677 664 * Notes:
678 665 * - scsi_hba_fini(9F) uninitializes SCSI HBA modules
679 666 */
680 667 int
681 668 _fini(void)
682 669 {
683 670 int status;
684 671 /* CONSTCOND */
685 672 ASSERT(NO_COMPETING_THREADS);
686 673
687 674 NDBG0(("_fini"));
688 675
689 676 if ((status = mod_remove(&modlinkage)) == 0) {
690 677 ddi_soft_state_fini(&mptsas_state);
691 678 scsi_hba_fini(&modlinkage);
692 679 mutex_destroy(&mptsas_global_mutex);
693 680 rw_destroy(&mptsas_global_rwlock);
694 681 mutex_destroy(&mptsas_log_mutex);
695 682 }
696 683 return (status);
697 684 }
698 685
699 686 /*
700 687 * The loadable-module _info(9E) entry point
701 688 */
702 689 int
703 690 _info(struct modinfo *modinfop)
704 691 {
705 692 /* CONSTCOND */
706 693 ASSERT(NO_COMPETING_THREADS);
707 694 NDBG0(("mptsas _info"));
708 695
709 696 return (mod_info(&modlinkage, modinfop));
710 697 }
711 698
712 699
713 700 static int
714 701 mptsas_iport_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
715 702 {
716 703 dev_info_t *pdip;
717 704 mptsas_t *mpt;
718 705 scsi_hba_tran_t *hba_tran;
719 706 char *iport = NULL;
720 707 char phymask[MPTSAS_MAX_PHYS];
721 708 mptsas_phymask_t phy_mask = 0;
722 709 int dynamic_port = 0;
723 710 uint32_t page_address;
724 711 char initiator_wwnstr[MPTSAS_WWN_STRLEN];
725 712 int rval = DDI_FAILURE;
726 713 int i = 0;
727 714 uint8_t numphys = 0;
728 715 uint8_t phy_id;
729 716 uint8_t phy_port = 0;
730 717 uint16_t attached_devhdl = 0;
731 718 uint32_t dev_info;
732 719 uint64_t attached_sas_wwn;
733 720 uint16_t dev_hdl;
734 721 uint16_t pdev_hdl;
735 722 uint16_t bay_num, enclosure;
736 723 char attached_wwnstr[MPTSAS_WWN_STRLEN];
737 724
738 725 /* CONSTCOND */
739 726 ASSERT(NO_COMPETING_THREADS);
740 727
741 728 switch (cmd) {
742 729 case DDI_ATTACH:
743 730 break;
744 731
745 732 case DDI_RESUME:
746 733 /*
747 734 * If this a scsi-iport node, nothing to do here.
748 735 */
749 736 return (DDI_SUCCESS);
750 737
751 738 default:
752 739 return (DDI_FAILURE);
753 740 }
754 741
755 742 pdip = ddi_get_parent(dip);
756 743
757 744 if ((hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SCSI_DEVICE)) ==
758 745 NULL) {
759 746 cmn_err(CE_WARN, "Failed attach iport because fail to "
760 747 "get tran vector for the HBA node");
761 748 return (DDI_FAILURE);
762 749 }
763 750
764 751 mpt = TRAN2MPT(hba_tran);
765 752 ASSERT(mpt != NULL);
766 753 if (mpt == NULL)
767 754 return (DDI_FAILURE);
768 755
769 756 if ((hba_tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) ==
770 757 NULL) {
771 758 mptsas_log(mpt, CE_WARN, "Failed attach iport because fail to "
772 759 "get tran vector for the iport node");
773 760 return (DDI_FAILURE);
774 761 }
775 762
776 763 /*
777 764 * Overwrite parent's tran_hba_private to iport's tran vector
778 765 */
779 766 hba_tran->tran_hba_private = mpt;
780 767
781 768 ddi_report_dev(dip);
782 769
783 770 /*
784 771 * Get SAS address for initiator port according dev_handle
785 772 */
786 773 iport = ddi_get_name_addr(dip);
787 774 if (iport && strncmp(iport, "v0", 2) == 0) {
788 775 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
789 776 MPTSAS_VIRTUAL_PORT, 1) !=
790 777 DDI_PROP_SUCCESS) {
791 778 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
792 779 MPTSAS_VIRTUAL_PORT);
793 780 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
794 781 "prop update failed");
795 782 return (DDI_FAILURE);
796 783 }
797 784 return (DDI_SUCCESS);
798 785 }
799 786
800 787 mutex_enter(&mpt->m_mutex);
801 788 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
802 789 bzero(phymask, sizeof (phymask));
803 790 (void) sprintf(phymask,
804 791 "%x", mpt->m_phy_info[i].phy_mask);
805 792 if (strcmp(phymask, iport) == 0) {
806 793 break;
807 794 }
808 795 }
809 796
810 797 if (i == MPTSAS_MAX_PHYS) {
811 798 mptsas_log(mpt, CE_WARN, "Failed attach port %s because port"
812 799 "seems not exist", iport);
813 800 mutex_exit(&mpt->m_mutex);
814 801 return (DDI_FAILURE);
815 802 }
816 803
817 804 phy_mask = mpt->m_phy_info[i].phy_mask;
818 805
819 806 if (mpt->m_phy_info[i].port_flags & AUTO_PORT_CONFIGURATION)
820 807 dynamic_port = 1;
821 808 else
822 809 dynamic_port = 0;
823 810
824 811 /*
825 812 * Update PHY info for smhba
826 813 */
827 814 if (mptsas_smhba_phy_init(mpt)) {
828 815 mutex_exit(&mpt->m_mutex);
829 816 mptsas_log(mpt, CE_WARN, "mptsas phy update "
830 817 "failed");
831 818 return (DDI_FAILURE);
832 819 }
833 820
834 821 mutex_exit(&mpt->m_mutex);
835 822
836 823 numphys = 0;
837 824 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
838 825 if ((phy_mask >> i) & 0x01) {
839 826 numphys++;
840 827 }
841 828 }
842 829
843 830 bzero(initiator_wwnstr, sizeof (initiator_wwnstr));
844 831 (void) sprintf(initiator_wwnstr, "w%016"PRIx64,
845 832 mpt->un.m_base_wwid);
846 833
847 834 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
848 835 SCSI_ADDR_PROP_INITIATOR_PORT, initiator_wwnstr) !=
849 836 DDI_PROP_SUCCESS) {
850 837 (void) ddi_prop_remove(DDI_DEV_T_NONE,
851 838 dip, SCSI_ADDR_PROP_INITIATOR_PORT);
852 839 mptsas_log(mpt, CE_WARN, "mptsas Initiator port "
853 840 "prop update failed");
854 841 return (DDI_FAILURE);
855 842 }
856 843 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
857 844 MPTSAS_NUM_PHYS, numphys) !=
858 845 DDI_PROP_SUCCESS) {
859 846 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, MPTSAS_NUM_PHYS);
860 847 return (DDI_FAILURE);
861 848 }
862 849
863 850 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
864 851 "phymask", phy_mask) !=
865 852 DDI_PROP_SUCCESS) {
866 853 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "phymask");
867 854 mptsas_log(mpt, CE_WARN, "mptsas phy mask "
868 855 "prop update failed");
869 856 return (DDI_FAILURE);
870 857 }
871 858
872 859 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
873 860 "dynamic-port", dynamic_port) !=
874 861 DDI_PROP_SUCCESS) {
875 862 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "dynamic-port");
876 863 mptsas_log(mpt, CE_WARN, "mptsas dynamic port "
877 864 "prop update failed");
878 865 return (DDI_FAILURE);
879 866 }
880 867 if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
881 868 MPTSAS_VIRTUAL_PORT, 0) !=
882 869 DDI_PROP_SUCCESS) {
883 870 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
884 871 MPTSAS_VIRTUAL_PORT);
885 872 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
886 873 "prop update failed");
887 874 return (DDI_FAILURE);
888 875 }
889 876 mptsas_smhba_set_phy_props(mpt,
890 877 iport, dip, numphys, &attached_devhdl);
891 878
892 879 mutex_enter(&mpt->m_mutex);
893 880 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
894 881 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)attached_devhdl;
895 882 rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
896 883 &attached_sas_wwn, &dev_info, &phy_port, &phy_id,
897 884 &pdev_hdl, &bay_num, &enclosure);
898 885 if (rval != DDI_SUCCESS) {
899 886 mptsas_log(mpt, CE_WARN,
900 887 "Failed to get device page0 for handle:%d",
901 888 attached_devhdl);
902 889 mutex_exit(&mpt->m_mutex);
903 890 return (DDI_FAILURE);
904 891 }
905 892
906 893 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
907 894 bzero(phymask, sizeof (phymask));
908 895 (void) sprintf(phymask, "%x", mpt->m_phy_info[i].phy_mask);
909 896 if (strcmp(phymask, iport) == 0) {
910 897 (void) sprintf(&mpt->m_phy_info[i].smhba_info.path[0],
911 898 "%x",
912 899 mpt->m_phy_info[i].phy_mask);
913 900 }
914 901 }
915 902 mutex_exit(&mpt->m_mutex);
916 903
917 904 bzero(attached_wwnstr, sizeof (attached_wwnstr));
918 905 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
919 906 attached_sas_wwn);
920 907 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
921 908 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
922 909 DDI_PROP_SUCCESS) {
923 910 (void) ddi_prop_remove(DDI_DEV_T_NONE,
924 911 dip, SCSI_ADDR_PROP_ATTACHED_PORT);
925 912 return (DDI_FAILURE);
926 913 }
927 914
928 915 /* Create kstats for each phy on this iport */
929 916
930 917 mptsas_create_phy_stats(mpt, iport, dip);
931 918
932 919 /*
933 920 * register sas hba iport with mdi (MPxIO/vhci)
934 921 */
935 922 if (mdi_phci_register(MDI_HCI_CLASS_SCSI,
936 923 dip, 0) == MDI_SUCCESS) {
937 924 mpt->m_mpxio_enable = TRUE;
938 925 }
939 926 return (DDI_SUCCESS);
940 927 }
941 928
942 929 /*
943 930 * Notes:
944 931 * Set up all device state and allocate data structures,
945 932 * mutexes, condition variables, etc. for device operation.
946 933 * Add interrupts needed.
947 934 * Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
948 935 */
949 936 static int
950 937 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
951 938 {
952 939 mptsas_t *mpt = NULL;
953 940 int instance, i, j;
954 941 int doneq_thread_num;
955 942 char intr_added = 0;
956 943 char map_setup = 0;
957 944 char config_setup = 0;
958 945 char hba_attach_setup = 0;
959 946 char smp_attach_setup = 0;
960 947 char mutex_init_done = 0;
961 948 char event_taskq_create = 0;
962 949 char dr_taskq_create = 0;
963 950 char doneq_thread_create = 0;
964 951 scsi_hba_tran_t *hba_tran;
965 952 uint_t mem_bar = MEM_SPACE;
966 953 int rval = DDI_FAILURE;
967 954
968 955 /* CONSTCOND */
969 956 ASSERT(NO_COMPETING_THREADS);
970 957
971 958 if (scsi_hba_iport_unit_address(dip)) {
972 959 return (mptsas_iport_attach(dip, cmd));
973 960 }
974 961
975 962 switch (cmd) {
976 963 case DDI_ATTACH:
977 964 break;
978 965
979 966 case DDI_RESUME:
980 967 if ((hba_tran = ddi_get_driver_private(dip)) == NULL)
981 968 return (DDI_FAILURE);
982 969
983 970 mpt = TRAN2MPT(hba_tran);
984 971
985 972 if (!mpt) {
986 973 return (DDI_FAILURE);
987 974 }
988 975
989 976 /*
990 977 * Reset hardware and softc to "no outstanding commands"
991 978 * Note that a check condition can result on first command
992 979 * to a target.
993 980 */
994 981 mutex_enter(&mpt->m_mutex);
995 982
996 983 /*
997 984 * raise power.
998 985 */
999 986 if (mpt->m_options & MPTSAS_OPT_PM) {
1000 987 mutex_exit(&mpt->m_mutex);
1001 988 (void) pm_busy_component(dip, 0);
1002 989 rval = pm_power_has_changed(dip, 0, PM_LEVEL_D0);
1003 990 if (rval == DDI_SUCCESS) {
1004 991 mutex_enter(&mpt->m_mutex);
1005 992 } else {
1006 993 /*
1007 994 * The pm_raise_power() call above failed,
1008 995 * and that can only occur if we were unable
1009 996 * to reset the hardware. This is probably
1010 997 * due to unhealty hardware, and because
1011 998 * important filesystems(such as the root
1012 999 * filesystem) could be on the attached disks,
1013 1000 * it would not be a good idea to continue,
1014 1001 * as we won't be entirely certain we are
1015 1002 * writing correct data. So we panic() here
1016 1003 * to not only prevent possible data corruption,
1017 1004 * but to give developers or end users a hope
1018 1005 * of identifying and correcting any problems.
1019 1006 */
1020 1007 fm_panic("mptsas could not reset hardware "
1021 1008 "during resume");
1022 1009 }
1023 1010 }
1024 1011
1025 1012 mpt->m_suspended = 0;
1026 1013
1027 1014 /*
1028 1015 * Reinitialize ioc
1029 1016 */
1030 1017 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1031 1018 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
1032 1019 mutex_exit(&mpt->m_mutex);
1033 1020 if (mpt->m_options & MPTSAS_OPT_PM) {
1034 1021 (void) pm_idle_component(dip, 0);
1035 1022 }
1036 1023 fm_panic("mptsas init chip fail during resume");
1037 1024 }
1038 1025 /*
1039 1026 * mptsas_update_driver_data needs interrupts so enable them
1040 1027 * first.
1041 1028 */
1042 1029 MPTSAS_ENABLE_INTR(mpt);
1043 1030 mptsas_update_driver_data(mpt);
1044 1031
1045 1032 /* start requests, if possible */
1046 1033 mptsas_restart_hba(mpt);
1047 1034
1048 1035 mutex_exit(&mpt->m_mutex);
1049 1036
1050 1037 /*
1051 1038 * Restart watch thread
1052 1039 */
1053 1040 mutex_enter(&mptsas_global_mutex);
1054 1041 if (mptsas_timeout_id == 0) {
1055 1042 mptsas_timeout_id = timeout(mptsas_watch, NULL,
1056 1043 mptsas_tick);
1057 1044 mptsas_timeouts_enabled = 1;
1058 1045 }
1059 1046 mutex_exit(&mptsas_global_mutex);
1060 1047
1061 1048 /* report idle status to pm framework */
1062 1049 if (mpt->m_options & MPTSAS_OPT_PM) {
1063 1050 (void) pm_idle_component(dip, 0);
1064 1051 }
1065 1052
1066 1053 return (DDI_SUCCESS);
1067 1054
1068 1055 default:
1069 1056 return (DDI_FAILURE);
1070 1057
1071 1058 }
1072 1059
1073 1060 instance = ddi_get_instance(dip);
1074 1061
1075 1062 /*
1076 1063 * Allocate softc information.
1077 1064 */
1078 1065 if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
1079 1066 mptsas_log(NULL, CE_WARN,
1080 1067 "mptsas%d: cannot allocate soft state", instance);
1081 1068 goto fail;
1082 1069 }
1083 1070
1084 1071 mpt = ddi_get_soft_state(mptsas_state, instance);
1085 1072
1086 1073 if (mpt == NULL) {
1087 1074 mptsas_log(NULL, CE_WARN,
1088 1075 "mptsas%d: cannot get soft state", instance);
1089 1076 goto fail;
1090 1077 }
1091 1078
1092 1079 /* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
1093 1080 scsi_size_clean(dip);
1094 1081
1095 1082 mpt->m_dip = dip;
1096 1083 mpt->m_instance = instance;
1097 1084
1098 1085 /* Make a per-instance copy of the structures */
1099 1086 mpt->m_io_dma_attr = mptsas_dma_attrs64;
1100 1087 mpt->m_msg_dma_attr = mptsas_dma_attrs;
1101 1088 mpt->m_reg_acc_attr = mptsas_dev_attr;
1102 1089 mpt->m_dev_acc_attr = mptsas_dev_attr;
1103 1090
1104 1091 /*
1105 1092 * Initialize FMA
1106 1093 */
1107 1094 mpt->m_fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, mpt->m_dip,
1108 1095 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
1109 1096 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1110 1097 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1111 1098
1112 1099 mptsas_fm_init(mpt);
1113 1100
1114 1101 if (mptsas_alloc_handshake_msg(mpt,
1115 1102 sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) {
1116 1103 mptsas_log(mpt, CE_WARN, "cannot initialize handshake msg.");
1117 1104 goto fail;
1118 1105 }
1119 1106
1120 1107 /*
1121 1108 * Setup configuration space
1122 1109 */
1123 1110 if (mptsas_config_space_init(mpt) == FALSE) {
1124 1111 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init failed");
1125 1112 goto fail;
1126 1113 }
1127 1114 config_setup++;
1128 1115
1129 1116 if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&mpt->m_reg,
1130 1117 0, 0, &mpt->m_reg_acc_attr, &mpt->m_datap) != DDI_SUCCESS) {
1131 1118 mptsas_log(mpt, CE_WARN, "map setup failed");
1132 1119 goto fail;
1133 1120 }
1134 1121 map_setup++;
1135 1122
1136 1123 /*
1137 1124 * A taskq is created for dealing with the event handler
1138 1125 */
1139 1126 if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
1140 1127 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1141 1128 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
1142 1129 goto fail;
1143 1130 }
1144 1131 event_taskq_create++;
1145 1132
1146 1133 /*
1147 1134 * A taskq is created for dealing with dr events
1148 1135 */
1149 1136 if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1150 1137 "mptsas_dr_taskq",
1151 1138 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1152 1139 mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1153 1140 "failed");
1154 1141 goto fail;
1155 1142 }
1156 1143 dr_taskq_create++;
1157 1144
1158 1145 mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1159 1146 0, "mptsas_doneq_thread_threshold_prop", 10);
1160 1147 mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1161 1148 0, "mptsas_doneq_length_threshold_prop", 8);
1162 1149 mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1163 1150 0, "mptsas_doneq_thread_n_prop", 8);
1164 1151
1165 1152 if (mpt->m_doneq_thread_n) {
1166 1153 cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1167 1154 mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1168 1155
1169 1156 mutex_enter(&mpt->m_doneq_mutex);
1170 1157 mpt->m_doneq_thread_id =
1171 1158 kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1172 1159 * mpt->m_doneq_thread_n, KM_SLEEP);
1173 1160
1174 1161 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1175 1162 cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1176 1163 CV_DRIVER, NULL);
1177 1164 mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1178 1165 MUTEX_DRIVER, NULL);
1179 1166 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1180 1167 mpt->m_doneq_thread_id[j].flag |=
1181 1168 MPTSAS_DONEQ_THREAD_ACTIVE;
1182 1169 mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1183 1170 mpt->m_doneq_thread_id[j].arg.t = j;
1184 1171 mpt->m_doneq_thread_id[j].threadp =
1185 1172 thread_create(NULL, 0, mptsas_doneq_thread,
1186 1173 &mpt->m_doneq_thread_id[j].arg,
1187 1174 0, &p0, TS_RUN, minclsyspri);
1188 1175 mpt->m_doneq_thread_id[j].donetail =
1189 1176 &mpt->m_doneq_thread_id[j].doneq;
↓ open down ↓ |
696 lines elided |
↑ open up ↑ |
1190 1177 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1191 1178 }
1192 1179 mutex_exit(&mpt->m_doneq_mutex);
1193 1180 doneq_thread_create++;
1194 1181 }
1195 1182
1196 1183 /* Initialize mutex used in interrupt handler */
1197 1184 mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1198 1185 DDI_INTR_PRI(mpt->m_intr_pri));
1199 1186 mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1200 - mutex_init(&mpt->m_intr_mutex, NULL, MUTEX_DRIVER,
1187 + mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1201 1188 DDI_INTR_PRI(mpt->m_intr_pri));
1202 1189 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1203 1190 mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1204 1191 NULL, MUTEX_DRIVER,
1205 1192 DDI_INTR_PRI(mpt->m_intr_pri));
1206 1193 }
1207 1194
1208 1195 cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1209 1196 cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1210 1197 cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1211 1198 cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1212 1199 cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1213 1200 mutex_init_done++;
1214 1201
1215 1202 /*
1216 1203 * Disable hardware interrupt since we're not ready to
1217 1204 * handle it yet.
1218 1205 */
1219 1206 MPTSAS_DISABLE_INTR(mpt);
1220 1207 if (mptsas_register_intrs(mpt) == FALSE)
1221 1208 goto fail;
1222 1209 intr_added++;
1223 1210
1224 1211 mutex_enter(&mpt->m_mutex);
1225 1212 /*
1226 1213 * Initialize power management component
1227 1214 */
1228 1215 if (mpt->m_options & MPTSAS_OPT_PM) {
1229 1216 if (mptsas_init_pm(mpt)) {
1230 1217 mutex_exit(&mpt->m_mutex);
1231 1218 mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1232 1219 "failed");
1233 1220 goto fail;
1234 1221 }
1235 1222 }
1236 1223
1237 1224 /*
1238 1225 * Initialize chip using Message Unit Reset, if allowed
1239 1226 */
1240 1227 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1241 1228 if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1242 1229 mutex_exit(&mpt->m_mutex);
1243 1230 mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1244 1231 goto fail;
1245 1232 }
1246 1233
1247 1234 /*
1248 1235 * Fill in the phy_info structure and get the base WWID
1249 1236 */
1250 1237 if (mptsas_get_manufacture_page5(mpt) == DDI_FAILURE) {
1251 1238 mptsas_log(mpt, CE_WARN,
1252 1239 "mptsas_get_manufacture_page5 failed!");
1253 1240 goto fail;
1254 1241 }
1255 1242
1256 1243 if (mptsas_get_sas_io_unit_page_hndshk(mpt)) {
1257 1244 mptsas_log(mpt, CE_WARN,
1258 1245 "mptsas_get_sas_io_unit_page_hndshk failed!");
1259 1246 goto fail;
1260 1247 }
1261 1248
1262 1249 if (mptsas_get_manufacture_page0(mpt) == DDI_FAILURE) {
1263 1250 mptsas_log(mpt, CE_WARN,
1264 1251 "mptsas_get_manufacture_page0 failed!");
1265 1252 goto fail;
1266 1253 }
1267 1254
1268 1255 mutex_exit(&mpt->m_mutex);
1269 1256
1270 1257 /*
1271 1258 * Register the iport for multiple port HBA
1272 1259 */
1273 1260 mptsas_iport_register(mpt);
1274 1261
1275 1262 /*
1276 1263 * initialize SCSI HBA transport structure
1277 1264 */
1278 1265 if (mptsas_hba_setup(mpt) == FALSE)
1279 1266 goto fail;
1280 1267 hba_attach_setup++;
1281 1268
1282 1269 if (mptsas_smp_setup(mpt) == FALSE)
1283 1270 goto fail;
1284 1271 smp_attach_setup++;
1285 1272
1286 1273 if (mptsas_cache_create(mpt) == FALSE)
1287 1274 goto fail;
1288 1275
1289 1276 mpt->m_scsi_reset_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1290 1277 dip, 0, "scsi-reset-delay", SCSI_DEFAULT_RESET_DELAY);
1291 1278 if (mpt->m_scsi_reset_delay == 0) {
1292 1279 mptsas_log(mpt, CE_NOTE,
↓ open down ↓ |
82 lines elided |
↑ open up ↑ |
1293 1280 "scsi_reset_delay of 0 is not recommended,"
1294 1281 " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1295 1282 mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1296 1283 }
1297 1284
1298 1285 /*
1299 1286 * Initialize the wait and done FIFO queue
1300 1287 */
1301 1288 mpt->m_donetail = &mpt->m_doneq;
1302 1289 mpt->m_waitqtail = &mpt->m_waitq;
1290 + mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
1291 + mpt->m_tx_draining = 0;
1303 1292
1304 1293 /*
1305 1294 * ioc cmd queue initialize
1306 1295 */
1307 1296 mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1308 1297 mpt->m_dev_handle = 0xFFFF;
1309 1298
1310 1299 MPTSAS_ENABLE_INTR(mpt);
1311 1300
1312 1301 /*
1313 1302 * enable event notification
1314 1303 */
1315 1304 mutex_enter(&mpt->m_mutex);
1316 1305 if (mptsas_ioc_enable_event_notification(mpt)) {
1317 1306 mutex_exit(&mpt->m_mutex);
1318 1307 goto fail;
1319 1308 }
1320 1309 mutex_exit(&mpt->m_mutex);
1321 1310
1322 1311 /*
1323 1312 * Initialize PHY info for smhba
1324 1313 */
1325 1314 if (mptsas_smhba_setup(mpt)) {
1326 1315 mptsas_log(mpt, CE_WARN, "mptsas phy initialization "
1327 1316 "failed");
1328 1317 goto fail;
1329 1318 }
1330 1319
1331 1320 /* Check all dma handles allocated in attach */
1332 1321 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl)
1333 1322 != DDI_SUCCESS) ||
1334 1323 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl)
1335 1324 != DDI_SUCCESS) ||
1336 1325 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl)
1337 1326 != DDI_SUCCESS) ||
1338 1327 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl)
1339 1328 != DDI_SUCCESS) ||
1340 1329 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl)
1341 1330 != DDI_SUCCESS)) {
1342 1331 goto fail;
1343 1332 }
1344 1333
1345 1334 /* Check all acc handles allocated in attach */
1346 1335 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
1347 1336 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl)
1348 1337 != DDI_SUCCESS) ||
1349 1338 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl)
1350 1339 != DDI_SUCCESS) ||
1351 1340 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl)
1352 1341 != DDI_SUCCESS) ||
1353 1342 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl)
1354 1343 != DDI_SUCCESS) ||
1355 1344 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl)
1356 1345 != DDI_SUCCESS) ||
1357 1346 (mptsas_check_acc_handle(mpt->m_config_handle)
1358 1347 != DDI_SUCCESS)) {
1359 1348 goto fail;
1360 1349 }
1361 1350
1362 1351 /*
1363 1352 * After this point, we are not going to fail the attach.
1364 1353 */
1365 1354 /*
1366 1355 * used for mptsas_watch
1367 1356 */
1368 1357 mptsas_list_add(mpt);
1369 1358
1370 1359 mutex_enter(&mptsas_global_mutex);
1371 1360 if (mptsas_timeouts_enabled == 0) {
1372 1361 mptsas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
1373 1362 dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
1374 1363
1375 1364 mptsas_tick = mptsas_scsi_watchdog_tick *
1376 1365 drv_usectohz((clock_t)1000000);
1377 1366
1378 1367 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
1379 1368 mptsas_timeouts_enabled = 1;
1380 1369 }
1381 1370 mutex_exit(&mptsas_global_mutex);
1382 1371
1383 1372 /* Print message of HBA present */
1384 1373 ddi_report_dev(dip);
1385 1374
1386 1375 /* report idle status to pm framework */
1387 1376 if (mpt->m_options & MPTSAS_OPT_PM) {
1388 1377 (void) pm_idle_component(dip, 0);
1389 1378 }
1390 1379
1391 1380 return (DDI_SUCCESS);
1392 1381
1393 1382 fail:
1394 1383 mptsas_log(mpt, CE_WARN, "attach failed");
1395 1384 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
1396 1385 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
1397 1386 if (mpt) {
1398 1387 mutex_enter(&mptsas_global_mutex);
1399 1388
1400 1389 if (mptsas_timeout_id && (mptsas_head == NULL)) {
1401 1390 timeout_id_t tid = mptsas_timeout_id;
1402 1391 mptsas_timeouts_enabled = 0;
1403 1392 mptsas_timeout_id = 0;
1404 1393 mutex_exit(&mptsas_global_mutex);
1405 1394 (void) untimeout(tid);
1406 1395 mutex_enter(&mptsas_global_mutex);
1407 1396 }
1408 1397 mutex_exit(&mptsas_global_mutex);
1409 1398 /* deallocate in reverse order */
1410 1399 mptsas_cache_destroy(mpt);
1411 1400
1412 1401 if (smp_attach_setup) {
1413 1402 mptsas_smp_teardown(mpt);
1414 1403 }
1415 1404 if (hba_attach_setup) {
1416 1405 mptsas_hba_teardown(mpt);
1417 1406 }
1418 1407
1419 1408 if (mpt->m_active) {
1420 1409 mptsas_hash_uninit(&mpt->m_active->m_smptbl,
1421 1410 sizeof (mptsas_smp_t));
1422 1411 mptsas_hash_uninit(&mpt->m_active->m_tgttbl,
1423 1412 sizeof (mptsas_target_t));
1424 1413 mptsas_free_active_slots(mpt);
1425 1414 }
1426 1415 if (intr_added) {
1427 1416 mptsas_unregister_intrs(mpt);
1428 1417 }
1429 1418
1430 1419 if (doneq_thread_create) {
1431 1420 mutex_enter(&mpt->m_doneq_mutex);
1432 1421 doneq_thread_num = mpt->m_doneq_thread_n;
1433 1422 for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1434 1423 mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1435 1424 mpt->m_doneq_thread_id[j].flag &=
1436 1425 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1437 1426 cv_signal(&mpt->m_doneq_thread_id[j].cv);
1438 1427 mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1439 1428 }
1440 1429 while (mpt->m_doneq_thread_n) {
1441 1430 cv_wait(&mpt->m_doneq_thread_cv,
1442 1431 &mpt->m_doneq_mutex);
1443 1432 }
1444 1433 for (j = 0; j < doneq_thread_num; j++) {
1445 1434 cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1446 1435 mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1447 1436 }
1448 1437 kmem_free(mpt->m_doneq_thread_id,
1449 1438 sizeof (mptsas_doneq_thread_list_t)
1450 1439 * doneq_thread_num);
1451 1440 mutex_exit(&mpt->m_doneq_mutex);
↓ open down ↓ |
139 lines elided |
↑ open up ↑ |
1452 1441 cv_destroy(&mpt->m_doneq_thread_cv);
1453 1442 mutex_destroy(&mpt->m_doneq_mutex);
1454 1443 }
1455 1444 if (event_taskq_create) {
1456 1445 ddi_taskq_destroy(mpt->m_event_taskq);
1457 1446 }
1458 1447 if (dr_taskq_create) {
1459 1448 ddi_taskq_destroy(mpt->m_dr_taskq);
1460 1449 }
1461 1450 if (mutex_init_done) {
1462 - mutex_destroy(&mpt->m_intr_mutex);
1451 + mutex_destroy(&mpt->m_tx_waitq_mutex);
1463 1452 mutex_destroy(&mpt->m_passthru_mutex);
1464 1453 mutex_destroy(&mpt->m_mutex);
1465 1454 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1466 1455 mutex_destroy(
1467 1456 &mpt->m_phy_info[i].smhba_info.phy_mutex);
1468 1457 }
1469 1458 cv_destroy(&mpt->m_cv);
1470 1459 cv_destroy(&mpt->m_passthru_cv);
1471 1460 cv_destroy(&mpt->m_fw_cv);
1472 1461 cv_destroy(&mpt->m_config_cv);
1473 1462 cv_destroy(&mpt->m_fw_diag_cv);
1474 1463 }
1475 1464
1476 1465 if (map_setup) {
1477 1466 mptsas_cfg_fini(mpt);
1478 1467 }
1479 1468 if (config_setup) {
1480 1469 mptsas_config_space_fini(mpt);
1481 1470 }
1482 1471 mptsas_free_handshake_msg(mpt);
1483 1472 mptsas_hba_fini(mpt);
1484 1473
1485 1474 mptsas_fm_fini(mpt);
1486 1475 ddi_soft_state_free(mptsas_state, instance);
1487 1476 ddi_prop_remove_all(dip);
1488 1477 }
1489 1478 return (DDI_FAILURE);
1490 1479 }
1491 1480
1492 1481 static int
1493 1482 mptsas_suspend(dev_info_t *devi)
1494 1483 {
1495 1484 mptsas_t *mpt, *g;
1496 1485 scsi_hba_tran_t *tran;
1497 1486
1498 1487 if (scsi_hba_iport_unit_address(devi)) {
1499 1488 return (DDI_SUCCESS);
1500 1489 }
1501 1490
1502 1491 if ((tran = ddi_get_driver_private(devi)) == NULL)
1503 1492 return (DDI_SUCCESS);
1504 1493
1505 1494 mpt = TRAN2MPT(tran);
1506 1495 if (!mpt) {
1507 1496 return (DDI_SUCCESS);
1508 1497 }
1509 1498
1510 1499 mutex_enter(&mpt->m_mutex);
1511 1500
1512 1501 if (mpt->m_suspended++) {
1513 1502 mutex_exit(&mpt->m_mutex);
1514 1503 return (DDI_SUCCESS);
1515 1504 }
1516 1505
1517 1506 /*
1518 1507 * Cancel timeout threads for this mpt
1519 1508 */
1520 1509 if (mpt->m_quiesce_timeid) {
1521 1510 timeout_id_t tid = mpt->m_quiesce_timeid;
1522 1511 mpt->m_quiesce_timeid = 0;
1523 1512 mutex_exit(&mpt->m_mutex);
1524 1513 (void) untimeout(tid);
1525 1514 mutex_enter(&mpt->m_mutex);
1526 1515 }
1527 1516
1528 1517 if (mpt->m_restart_cmd_timeid) {
1529 1518 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1530 1519 mpt->m_restart_cmd_timeid = 0;
1531 1520 mutex_exit(&mpt->m_mutex);
1532 1521 (void) untimeout(tid);
1533 1522 mutex_enter(&mpt->m_mutex);
1534 1523 }
1535 1524
1536 1525 mutex_exit(&mpt->m_mutex);
1537 1526
1538 1527 (void) pm_idle_component(mpt->m_dip, 0);
1539 1528
1540 1529 /*
1541 1530 * Cancel watch threads if all mpts suspended
1542 1531 */
1543 1532 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1544 1533 for (g = mptsas_head; g != NULL; g = g->m_next) {
1545 1534 if (!g->m_suspended)
1546 1535 break;
1547 1536 }
1548 1537 rw_exit(&mptsas_global_rwlock);
1549 1538
1550 1539 mutex_enter(&mptsas_global_mutex);
1551 1540 if (g == NULL) {
1552 1541 timeout_id_t tid;
1553 1542
1554 1543 mptsas_timeouts_enabled = 0;
1555 1544 if (mptsas_timeout_id) {
1556 1545 tid = mptsas_timeout_id;
1557 1546 mptsas_timeout_id = 0;
1558 1547 mutex_exit(&mptsas_global_mutex);
1559 1548 (void) untimeout(tid);
1560 1549 mutex_enter(&mptsas_global_mutex);
1561 1550 }
1562 1551 if (mptsas_reset_watch) {
1563 1552 tid = mptsas_reset_watch;
1564 1553 mptsas_reset_watch = 0;
1565 1554 mutex_exit(&mptsas_global_mutex);
1566 1555 (void) untimeout(tid);
1567 1556 mutex_enter(&mptsas_global_mutex);
1568 1557 }
1569 1558 }
1570 1559 mutex_exit(&mptsas_global_mutex);
1571 1560
1572 1561 mutex_enter(&mpt->m_mutex);
1573 1562
1574 1563 /*
1575 1564 * If this mpt is not in full power(PM_LEVEL_D0), just return.
1576 1565 */
1577 1566 if ((mpt->m_options & MPTSAS_OPT_PM) &&
1578 1567 (mpt->m_power_level != PM_LEVEL_D0)) {
1579 1568 mutex_exit(&mpt->m_mutex);
1580 1569 return (DDI_SUCCESS);
1581 1570 }
1582 1571
1583 1572 /* Disable HBA interrupts in hardware */
1584 1573 MPTSAS_DISABLE_INTR(mpt);
1585 1574 /*
1586 1575 * Send RAID action system shutdown to sync IR
1587 1576 */
1588 1577 mptsas_raid_action_system_shutdown(mpt);
1589 1578
1590 1579 mutex_exit(&mpt->m_mutex);
1591 1580
1592 1581 /* drain the taskq */
1593 1582 ddi_taskq_wait(mpt->m_event_taskq);
1594 1583 ddi_taskq_wait(mpt->m_dr_taskq);
1595 1584
1596 1585 return (DDI_SUCCESS);
1597 1586 }
1598 1587
1599 1588 #ifdef __sparc
1600 1589 /*ARGSUSED*/
1601 1590 static int
1602 1591 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd)
1603 1592 {
1604 1593 mptsas_t *mpt;
1605 1594 scsi_hba_tran_t *tran;
1606 1595
1607 1596 /*
1608 1597 * If this call is for iport, just return.
1609 1598 */
1610 1599 if (scsi_hba_iport_unit_address(devi))
1611 1600 return (DDI_SUCCESS);
1612 1601
1613 1602 if ((tran = ddi_get_driver_private(devi)) == NULL)
1614 1603 return (DDI_SUCCESS);
1615 1604
1616 1605 if ((mpt = TRAN2MPT(tran)) == NULL)
1617 1606 return (DDI_SUCCESS);
1618 1607
1619 1608 /*
1620 1609 * Send RAID action system shutdown to sync IR. Disable HBA
1621 1610 * interrupts in hardware first.
1622 1611 */
1623 1612 MPTSAS_DISABLE_INTR(mpt);
1624 1613 mptsas_raid_action_system_shutdown(mpt);
1625 1614
1626 1615 return (DDI_SUCCESS);
1627 1616 }
1628 1617 #else /* __sparc */
1629 1618 /*
1630 1619 * quiesce(9E) entry point.
1631 1620 *
1632 1621 * This function is called when the system is single-threaded at high
1633 1622 * PIL with preemption disabled. Therefore, this function must not be
1634 1623 * blocked.
1635 1624 *
1636 1625 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1637 1626 * DDI_FAILURE indicates an error condition and should almost never happen.
1638 1627 */
1639 1628 static int
1640 1629 mptsas_quiesce(dev_info_t *devi)
1641 1630 {
1642 1631 mptsas_t *mpt;
1643 1632 scsi_hba_tran_t *tran;
1644 1633
1645 1634 /*
1646 1635 * If this call is for iport, just return.
1647 1636 */
1648 1637 if (scsi_hba_iport_unit_address(devi))
1649 1638 return (DDI_SUCCESS);
1650 1639
1651 1640 if ((tran = ddi_get_driver_private(devi)) == NULL)
1652 1641 return (DDI_SUCCESS);
1653 1642
1654 1643 if ((mpt = TRAN2MPT(tran)) == NULL)
1655 1644 return (DDI_SUCCESS);
1656 1645
1657 1646 /* Disable HBA interrupts in hardware */
1658 1647 MPTSAS_DISABLE_INTR(mpt);
1659 1648 /* Send RAID action system shutdonw to sync IR */
1660 1649 mptsas_raid_action_system_shutdown(mpt);
1661 1650
1662 1651 return (DDI_SUCCESS);
1663 1652 }
1664 1653 #endif /* __sparc */
1665 1654
1666 1655 /*
1667 1656 * detach(9E). Remove all device allocations and system resources;
1668 1657 * disable device interrupts.
1669 1658 * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
1670 1659 */
1671 1660 static int
1672 1661 mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1673 1662 {
1674 1663 /* CONSTCOND */
1675 1664 ASSERT(NO_COMPETING_THREADS);
1676 1665 NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi, (void *)cmd));
1677 1666
1678 1667 switch (cmd) {
1679 1668 case DDI_DETACH:
1680 1669 return (mptsas_do_detach(devi));
1681 1670
1682 1671 case DDI_SUSPEND:
1683 1672 return (mptsas_suspend(devi));
1684 1673
1685 1674 default:
1686 1675 return (DDI_FAILURE);
1687 1676 }
1688 1677 /* NOTREACHED */
1689 1678 }
1690 1679
1691 1680 static int
1692 1681 mptsas_do_detach(dev_info_t *dip)
1693 1682 {
1694 1683 mptsas_t *mpt;
1695 1684 scsi_hba_tran_t *tran;
1696 1685 int circ = 0;
1697 1686 int circ1 = 0;
1698 1687 mdi_pathinfo_t *pip = NULL;
1699 1688 int i;
1700 1689 int doneq_thread_num = 0;
1701 1690
1702 1691 NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1703 1692
1704 1693 if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1705 1694 return (DDI_FAILURE);
1706 1695
1707 1696 mpt = TRAN2MPT(tran);
1708 1697 if (!mpt) {
1709 1698 return (DDI_FAILURE);
1710 1699 }
1711 1700 /*
1712 1701 * Still have pathinfo child, should not detach mpt driver
1713 1702 */
1714 1703 if (scsi_hba_iport_unit_address(dip)) {
1715 1704 if (mpt->m_mpxio_enable) {
1716 1705 /*
1717 1706 * MPxIO enabled for the iport
1718 1707 */
1719 1708 ndi_devi_enter(scsi_vhci_dip, &circ1);
1720 1709 ndi_devi_enter(dip, &circ);
1721 1710 while (pip = mdi_get_next_client_path(dip, NULL)) {
1722 1711 if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1723 1712 continue;
1724 1713 }
1725 1714 ndi_devi_exit(dip, circ);
1726 1715 ndi_devi_exit(scsi_vhci_dip, circ1);
1727 1716 NDBG12(("detach failed because of "
1728 1717 "outstanding path info"));
1729 1718 return (DDI_FAILURE);
1730 1719 }
1731 1720 ndi_devi_exit(dip, circ);
1732 1721 ndi_devi_exit(scsi_vhci_dip, circ1);
1733 1722 (void) mdi_phci_unregister(dip, 0);
1734 1723 }
1735 1724
1736 1725 ddi_prop_remove_all(dip);
1737 1726
1738 1727 return (DDI_SUCCESS);
1739 1728 }
1740 1729
1741 1730 /* Make sure power level is D0 before accessing registers */
1742 1731 if (mpt->m_options & MPTSAS_OPT_PM) {
1743 1732 (void) pm_busy_component(dip, 0);
1744 1733 if (mpt->m_power_level != PM_LEVEL_D0) {
1745 1734 if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1746 1735 DDI_SUCCESS) {
1747 1736 mptsas_log(mpt, CE_WARN,
1748 1737 "mptsas%d: Raise power request failed.",
1749 1738 mpt->m_instance);
1750 1739 (void) pm_idle_component(dip, 0);
1751 1740 return (DDI_FAILURE);
1752 1741 }
1753 1742 }
1754 1743 }
1755 1744
1756 1745 /*
1757 1746 * Send RAID action system shutdown to sync IR. After action, send a
1758 1747 * Message Unit Reset. Since after that DMA resource will be freed,
1759 1748 * set ioc to READY state will avoid HBA initiated DMA operation.
1760 1749 */
1761 1750 mutex_enter(&mpt->m_mutex);
1762 1751 MPTSAS_DISABLE_INTR(mpt);
1763 1752 mptsas_raid_action_system_shutdown(mpt);
1764 1753 mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1765 1754 (void) mptsas_ioc_reset(mpt, FALSE);
1766 1755 mutex_exit(&mpt->m_mutex);
1767 1756 mptsas_rem_intrs(mpt);
1768 1757 ddi_taskq_destroy(mpt->m_event_taskq);
1769 1758 ddi_taskq_destroy(mpt->m_dr_taskq);
1770 1759
1771 1760 if (mpt->m_doneq_thread_n) {
1772 1761 mutex_enter(&mpt->m_doneq_mutex);
1773 1762 doneq_thread_num = mpt->m_doneq_thread_n;
1774 1763 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1775 1764 mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1776 1765 mpt->m_doneq_thread_id[i].flag &=
1777 1766 (~MPTSAS_DONEQ_THREAD_ACTIVE);
1778 1767 cv_signal(&mpt->m_doneq_thread_id[i].cv);
1779 1768 mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1780 1769 }
1781 1770 while (mpt->m_doneq_thread_n) {
1782 1771 cv_wait(&mpt->m_doneq_thread_cv,
1783 1772 &mpt->m_doneq_mutex);
1784 1773 }
1785 1774 for (i = 0; i < doneq_thread_num; i++) {
1786 1775 cv_destroy(&mpt->m_doneq_thread_id[i].cv);
1787 1776 mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);
1788 1777 }
1789 1778 kmem_free(mpt->m_doneq_thread_id,
1790 1779 sizeof (mptsas_doneq_thread_list_t)
1791 1780 * doneq_thread_num);
1792 1781 mutex_exit(&mpt->m_doneq_mutex);
1793 1782 cv_destroy(&mpt->m_doneq_thread_cv);
1794 1783 mutex_destroy(&mpt->m_doneq_mutex);
1795 1784 }
1796 1785
1797 1786 scsi_hba_reset_notify_tear_down(mpt->m_reset_notify_listf);
1798 1787
1799 1788 mptsas_list_del(mpt);
1800 1789
1801 1790 /*
1802 1791 * Cancel timeout threads for this mpt
1803 1792 */
1804 1793 mutex_enter(&mpt->m_mutex);
1805 1794 if (mpt->m_quiesce_timeid) {
1806 1795 timeout_id_t tid = mpt->m_quiesce_timeid;
1807 1796 mpt->m_quiesce_timeid = 0;
1808 1797 mutex_exit(&mpt->m_mutex);
1809 1798 (void) untimeout(tid);
1810 1799 mutex_enter(&mpt->m_mutex);
1811 1800 }
1812 1801
1813 1802 if (mpt->m_restart_cmd_timeid) {
1814 1803 timeout_id_t tid = mpt->m_restart_cmd_timeid;
1815 1804 mpt->m_restart_cmd_timeid = 0;
1816 1805 mutex_exit(&mpt->m_mutex);
1817 1806 (void) untimeout(tid);
1818 1807 mutex_enter(&mpt->m_mutex);
1819 1808 }
1820 1809
1821 1810 mutex_exit(&mpt->m_mutex);
1822 1811
1823 1812 /*
1824 1813 * last mpt? ... if active, CANCEL watch threads.
1825 1814 */
1826 1815 mutex_enter(&mptsas_global_mutex);
1827 1816 if (mptsas_head == NULL) {
1828 1817 timeout_id_t tid;
1829 1818 /*
1830 1819 * Clear mptsas_timeouts_enable so that the watch thread
1831 1820 * gets restarted on DDI_ATTACH
1832 1821 */
1833 1822 mptsas_timeouts_enabled = 0;
1834 1823 if (mptsas_timeout_id) {
1835 1824 tid = mptsas_timeout_id;
1836 1825 mptsas_timeout_id = 0;
1837 1826 mutex_exit(&mptsas_global_mutex);
1838 1827 (void) untimeout(tid);
1839 1828 mutex_enter(&mptsas_global_mutex);
1840 1829 }
1841 1830 if (mptsas_reset_watch) {
1842 1831 tid = mptsas_reset_watch;
1843 1832 mptsas_reset_watch = 0;
1844 1833 mutex_exit(&mptsas_global_mutex);
1845 1834 (void) untimeout(tid);
1846 1835 mutex_enter(&mptsas_global_mutex);
1847 1836 }
1848 1837 }
1849 1838 mutex_exit(&mptsas_global_mutex);
1850 1839
1851 1840 /*
1852 1841 * Delete Phy stats
1853 1842 */
1854 1843 mptsas_destroy_phy_stats(mpt);
1855 1844
1856 1845 /*
1857 1846 * Delete nt_active.
1858 1847 */
1859 1848 mutex_enter(&mpt->m_mutex);
1860 1849 mptsas_hash_uninit(&mpt->m_active->m_tgttbl, sizeof (mptsas_target_t));
1861 1850 mptsas_hash_uninit(&mpt->m_active->m_smptbl, sizeof (mptsas_smp_t));
1862 1851 mptsas_free_active_slots(mpt);
1863 1852 mutex_exit(&mpt->m_mutex);
1864 1853
1865 1854 /* deallocate everything that was allocated in mptsas_attach */
1866 1855 mptsas_cache_destroy(mpt);
1867 1856
1868 1857 mptsas_hba_fini(mpt);
1869 1858 mptsas_cfg_fini(mpt);
↓ open down ↓ |
397 lines elided |
↑ open up ↑ |
1870 1859
1871 1860 /* Lower the power informing PM Framework */
1872 1861 if (mpt->m_options & MPTSAS_OPT_PM) {
1873 1862 if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
1874 1863 mptsas_log(mpt, CE_WARN,
1875 1864 "!mptsas%d: Lower power request failed "
1876 1865 "during detach, ignoring.",
1877 1866 mpt->m_instance);
1878 1867 }
1879 1868
1880 - mutex_destroy(&mpt->m_intr_mutex);
1869 + mutex_destroy(&mpt->m_tx_waitq_mutex);
1881 1870 mutex_destroy(&mpt->m_passthru_mutex);
1882 1871 mutex_destroy(&mpt->m_mutex);
1883 1872 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1884 1873 mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
1885 1874 }
1886 1875 cv_destroy(&mpt->m_cv);
1887 1876 cv_destroy(&mpt->m_passthru_cv);
1888 1877 cv_destroy(&mpt->m_fw_cv);
1889 1878 cv_destroy(&mpt->m_config_cv);
1890 1879 cv_destroy(&mpt->m_fw_diag_cv);
1891 1880
1892 1881
1893 1882 mptsas_smp_teardown(mpt);
1894 1883 mptsas_hba_teardown(mpt);
1895 1884
1896 1885 mptsas_config_space_fini(mpt);
1897 1886
1898 1887 mptsas_free_handshake_msg(mpt);
1899 1888
1900 1889 mptsas_fm_fini(mpt);
1901 1890 ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
1902 1891 ddi_prop_remove_all(dip);
1903 1892
1904 1893 return (DDI_SUCCESS);
1905 1894 }
1906 1895
1907 1896 static void
1908 1897 mptsas_list_add(mptsas_t *mpt)
1909 1898 {
1910 1899 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1911 1900
1912 1901 if (mptsas_head == NULL) {
1913 1902 mptsas_head = mpt;
1914 1903 } else {
1915 1904 mptsas_tail->m_next = mpt;
1916 1905 }
1917 1906 mptsas_tail = mpt;
1918 1907 rw_exit(&mptsas_global_rwlock);
1919 1908 }
1920 1909
1921 1910 static void
1922 1911 mptsas_list_del(mptsas_t *mpt)
1923 1912 {
1924 1913 mptsas_t *m;
1925 1914 /*
1926 1915 * Remove device instance from the global linked list
1927 1916 */
1928 1917 rw_enter(&mptsas_global_rwlock, RW_WRITER);
1929 1918 if (mptsas_head == mpt) {
1930 1919 m = mptsas_head = mpt->m_next;
1931 1920 } else {
1932 1921 for (m = mptsas_head; m != NULL; m = m->m_next) {
1933 1922 if (m->m_next == mpt) {
1934 1923 m->m_next = mpt->m_next;
1935 1924 break;
1936 1925 }
1937 1926 }
1938 1927 if (m == NULL) {
1939 1928 mptsas_log(mpt, CE_PANIC, "Not in softc list!");
1940 1929 }
1941 1930 }
1942 1931
1943 1932 if (mptsas_tail == mpt) {
1944 1933 mptsas_tail = m;
1945 1934 }
1946 1935 rw_exit(&mptsas_global_rwlock);
1947 1936 }
1948 1937
1949 1938 static int
1950 1939 mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size)
1951 1940 {
1952 1941 ddi_dma_attr_t task_dma_attrs;
1953 1942
1954 1943 task_dma_attrs = mpt->m_msg_dma_attr;
1955 1944 task_dma_attrs.dma_attr_sgllen = 1;
1956 1945 task_dma_attrs.dma_attr_granular = (uint32_t)(alloc_size);
1957 1946
1958 1947 /* allocate Task Management ddi_dma resources */
1959 1948 if (mptsas_dma_addr_create(mpt, task_dma_attrs,
1960 1949 &mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl, &mpt->m_hshk_memp,
1961 1950 alloc_size, NULL) == FALSE) {
1962 1951 return (DDI_FAILURE);
1963 1952 }
1964 1953 mpt->m_hshk_dma_size = alloc_size;
1965 1954
1966 1955 return (DDI_SUCCESS);
1967 1956 }
1968 1957
1969 1958 static void
1970 1959 mptsas_free_handshake_msg(mptsas_t *mpt)
1971 1960 {
1972 1961 mptsas_dma_addr_destroy(&mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl);
1973 1962 mpt->m_hshk_dma_size = 0;
1974 1963 }
1975 1964
1976 1965 static int
1977 1966 mptsas_hba_setup(mptsas_t *mpt)
1978 1967 {
1979 1968 scsi_hba_tran_t *hba_tran;
1980 1969 int tran_flags;
1981 1970
1982 1971 /* Allocate a transport structure */
1983 1972 hba_tran = mpt->m_tran = scsi_hba_tran_alloc(mpt->m_dip,
1984 1973 SCSI_HBA_CANSLEEP);
1985 1974 ASSERT(mpt->m_tran != NULL);
1986 1975
1987 1976 hba_tran->tran_hba_private = mpt;
1988 1977 hba_tran->tran_tgt_private = NULL;
1989 1978
1990 1979 hba_tran->tran_tgt_init = mptsas_scsi_tgt_init;
1991 1980 hba_tran->tran_tgt_free = mptsas_scsi_tgt_free;
1992 1981
1993 1982 hba_tran->tran_start = mptsas_scsi_start;
1994 1983 hba_tran->tran_reset = mptsas_scsi_reset;
1995 1984 hba_tran->tran_abort = mptsas_scsi_abort;
1996 1985 hba_tran->tran_getcap = mptsas_scsi_getcap;
1997 1986 hba_tran->tran_setcap = mptsas_scsi_setcap;
1998 1987 hba_tran->tran_init_pkt = mptsas_scsi_init_pkt;
1999 1988 hba_tran->tran_destroy_pkt = mptsas_scsi_destroy_pkt;
2000 1989
2001 1990 hba_tran->tran_dmafree = mptsas_scsi_dmafree;
2002 1991 hba_tran->tran_sync_pkt = mptsas_scsi_sync_pkt;
2003 1992 hba_tran->tran_reset_notify = mptsas_scsi_reset_notify;
2004 1993
2005 1994 hba_tran->tran_get_bus_addr = mptsas_get_bus_addr;
2006 1995 hba_tran->tran_get_name = mptsas_get_name;
2007 1996
2008 1997 hba_tran->tran_quiesce = mptsas_scsi_quiesce;
2009 1998 hba_tran->tran_unquiesce = mptsas_scsi_unquiesce;
2010 1999 hba_tran->tran_bus_reset = NULL;
2011 2000
2012 2001 hba_tran->tran_add_eventcall = NULL;
2013 2002 hba_tran->tran_get_eventcookie = NULL;
2014 2003 hba_tran->tran_post_event = NULL;
2015 2004 hba_tran->tran_remove_eventcall = NULL;
2016 2005
2017 2006 hba_tran->tran_bus_config = mptsas_bus_config;
2018 2007
2019 2008 hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
2020 2009
2021 2010 /*
2022 2011 * All children of the HBA are iports. We need tran was cloned.
2023 2012 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
2024 2013 * inherited to iport's tran vector.
2025 2014 */
2026 2015 tran_flags = (SCSI_HBA_HBA | SCSI_HBA_TRAN_CLONE);
2027 2016
2028 2017 if (scsi_hba_attach_setup(mpt->m_dip, &mpt->m_msg_dma_attr,
2029 2018 hba_tran, tran_flags) != DDI_SUCCESS) {
2030 2019 mptsas_log(mpt, CE_WARN, "hba attach setup failed");
2031 2020 scsi_hba_tran_free(hba_tran);
2032 2021 mpt->m_tran = NULL;
2033 2022 return (FALSE);
2034 2023 }
2035 2024 return (TRUE);
2036 2025 }
2037 2026
2038 2027 static void
2039 2028 mptsas_hba_teardown(mptsas_t *mpt)
2040 2029 {
2041 2030 (void) scsi_hba_detach(mpt->m_dip);
2042 2031 if (mpt->m_tran != NULL) {
2043 2032 scsi_hba_tran_free(mpt->m_tran);
2044 2033 mpt->m_tran = NULL;
2045 2034 }
2046 2035 }
2047 2036
2048 2037 static void
2049 2038 mptsas_iport_register(mptsas_t *mpt)
2050 2039 {
2051 2040 int i, j;
2052 2041 mptsas_phymask_t mask = 0x0;
2053 2042 /*
2054 2043 * initial value of mask is 0
2055 2044 */
2056 2045 mutex_enter(&mpt->m_mutex);
2057 2046 for (i = 0; i < mpt->m_num_phys; i++) {
2058 2047 mptsas_phymask_t phy_mask = 0x0;
2059 2048 char phy_mask_name[MPTSAS_MAX_PHYS];
2060 2049 uint8_t current_port;
2061 2050
2062 2051 if (mpt->m_phy_info[i].attached_devhdl == 0)
2063 2052 continue;
2064 2053
2065 2054 bzero(phy_mask_name, sizeof (phy_mask_name));
2066 2055
2067 2056 current_port = mpt->m_phy_info[i].port_num;
2068 2057
2069 2058 if ((mask & (1 << i)) != 0)
2070 2059 continue;
2071 2060
2072 2061 for (j = 0; j < mpt->m_num_phys; j++) {
2073 2062 if (mpt->m_phy_info[j].attached_devhdl &&
2074 2063 (mpt->m_phy_info[j].port_num == current_port)) {
2075 2064 phy_mask |= (1 << j);
2076 2065 }
2077 2066 }
2078 2067 mask = mask | phy_mask;
2079 2068
2080 2069 for (j = 0; j < mpt->m_num_phys; j++) {
2081 2070 if ((phy_mask >> j) & 0x01) {
2082 2071 mpt->m_phy_info[j].phy_mask = phy_mask;
2083 2072 }
2084 2073 }
2085 2074
2086 2075 (void) sprintf(phy_mask_name, "%x", phy_mask);
2087 2076
2088 2077 mutex_exit(&mpt->m_mutex);
2089 2078 /*
2090 2079 * register a iport
2091 2080 */
2092 2081 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
2093 2082 mutex_enter(&mpt->m_mutex);
2094 2083 }
2095 2084 mutex_exit(&mpt->m_mutex);
2096 2085 /*
2097 2086 * register a virtual port for RAID volume always
2098 2087 */
2099 2088 (void) scsi_hba_iport_register(mpt->m_dip, "v0");
2100 2089
2101 2090 }
2102 2091
2103 2092 static int
2104 2093 mptsas_smp_setup(mptsas_t *mpt)
2105 2094 {
2106 2095 mpt->m_smptran = smp_hba_tran_alloc(mpt->m_dip);
2107 2096 ASSERT(mpt->m_smptran != NULL);
2108 2097 mpt->m_smptran->smp_tran_hba_private = mpt;
2109 2098 mpt->m_smptran->smp_tran_start = mptsas_smp_start;
2110 2099 if (smp_hba_attach_setup(mpt->m_dip, mpt->m_smptran) != DDI_SUCCESS) {
2111 2100 mptsas_log(mpt, CE_WARN, "smp attach setup failed");
2112 2101 smp_hba_tran_free(mpt->m_smptran);
2113 2102 mpt->m_smptran = NULL;
2114 2103 return (FALSE);
2115 2104 }
2116 2105 /*
2117 2106 * Initialize smp hash table
2118 2107 */
2119 2108 mptsas_hash_init(&mpt->m_active->m_smptbl);
2120 2109 mpt->m_smp_devhdl = 0xFFFF;
2121 2110
2122 2111 return (TRUE);
2123 2112 }
2124 2113
2125 2114 static void
2126 2115 mptsas_smp_teardown(mptsas_t *mpt)
2127 2116 {
2128 2117 (void) smp_hba_detach(mpt->m_dip);
2129 2118 if (mpt->m_smptran != NULL) {
2130 2119 smp_hba_tran_free(mpt->m_smptran);
2131 2120 mpt->m_smptran = NULL;
2132 2121 }
2133 2122 mpt->m_smp_devhdl = 0;
2134 2123 }
2135 2124
2136 2125 static int
2137 2126 mptsas_cache_create(mptsas_t *mpt)
2138 2127 {
2139 2128 int instance = mpt->m_instance;
2140 2129 char buf[64];
2141 2130
2142 2131 /*
2143 2132 * create kmem cache for packets
2144 2133 */
2145 2134 (void) sprintf(buf, "mptsas%d_cache", instance);
2146 2135 mpt->m_kmem_cache = kmem_cache_create(buf,
2147 2136 sizeof (struct mptsas_cmd) + scsi_pkt_size(), 8,
2148 2137 mptsas_kmem_cache_constructor, mptsas_kmem_cache_destructor,
2149 2138 NULL, (void *)mpt, NULL, 0);
2150 2139
2151 2140 if (mpt->m_kmem_cache == NULL) {
2152 2141 mptsas_log(mpt, CE_WARN, "creating kmem cache failed");
2153 2142 return (FALSE);
2154 2143 }
2155 2144
2156 2145 /*
2157 2146 * create kmem cache for extra SGL frames if SGL cannot
2158 2147 * be accomodated into main request frame.
2159 2148 */
2160 2149 (void) sprintf(buf, "mptsas%d_cache_frames", instance);
2161 2150 mpt->m_cache_frames = kmem_cache_create(buf,
2162 2151 sizeof (mptsas_cache_frames_t), 8,
2163 2152 mptsas_cache_frames_constructor, mptsas_cache_frames_destructor,
2164 2153 NULL, (void *)mpt, NULL, 0);
2165 2154
2166 2155 if (mpt->m_cache_frames == NULL) {
2167 2156 mptsas_log(mpt, CE_WARN, "creating cache for frames failed");
2168 2157 return (FALSE);
2169 2158 }
2170 2159
2171 2160 return (TRUE);
2172 2161 }
2173 2162
2174 2163 static void
2175 2164 mptsas_cache_destroy(mptsas_t *mpt)
2176 2165 {
2177 2166 /* deallocate in reverse order */
2178 2167 if (mpt->m_cache_frames) {
2179 2168 kmem_cache_destroy(mpt->m_cache_frames);
2180 2169 mpt->m_cache_frames = NULL;
2181 2170 }
2182 2171 if (mpt->m_kmem_cache) {
2183 2172 kmem_cache_destroy(mpt->m_kmem_cache);
2184 2173 mpt->m_kmem_cache = NULL;
2185 2174 }
2186 2175 }
2187 2176
2188 2177 static int
2189 2178 mptsas_power(dev_info_t *dip, int component, int level)
2190 2179 {
2191 2180 #ifndef __lock_lint
2192 2181 _NOTE(ARGUNUSED(component))
2193 2182 #endif
2194 2183 mptsas_t *mpt;
2195 2184 int rval = DDI_SUCCESS;
2196 2185 int polls = 0;
2197 2186 uint32_t ioc_status;
2198 2187
2199 2188 if (scsi_hba_iport_unit_address(dip) != 0)
2200 2189 return (DDI_SUCCESS);
2201 2190
2202 2191 mpt = ddi_get_soft_state(mptsas_state, ddi_get_instance(dip));
2203 2192 if (mpt == NULL) {
2204 2193 return (DDI_FAILURE);
2205 2194 }
2206 2195
2207 2196 mutex_enter(&mpt->m_mutex);
2208 2197
2209 2198 /*
2210 2199 * If the device is busy, don't lower its power level
2211 2200 */
2212 2201 if (mpt->m_busy && (mpt->m_power_level > level)) {
2213 2202 mutex_exit(&mpt->m_mutex);
2214 2203 return (DDI_FAILURE);
2215 2204 }
2216 2205 switch (level) {
2217 2206 case PM_LEVEL_D0:
2218 2207 NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
2219 2208 MPTSAS_POWER_ON(mpt);
2220 2209 /*
2221 2210 * Wait up to 30 seconds for IOC to come out of reset.
2222 2211 */
2223 2212 while (((ioc_status = ddi_get32(mpt->m_datap,
2224 2213 &mpt->m_reg->Doorbell)) &
2225 2214 MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2226 2215 if (polls++ > 3000) {
2227 2216 break;
2228 2217 }
2229 2218 delay(drv_usectohz(10000));
2230 2219 }
2231 2220 /*
2232 2221 * If IOC is not in operational state, try to hard reset it.
2233 2222 */
↓ open down ↓ |
343 lines elided |
↑ open up ↑ |
2234 2223 if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2235 2224 MPI2_IOC_STATE_OPERATIONAL) {
2236 2225 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2237 2226 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2238 2227 mptsas_log(mpt, CE_WARN,
2239 2228 "mptsas_power: hard reset failed");
2240 2229 mutex_exit(&mpt->m_mutex);
2241 2230 return (DDI_FAILURE);
2242 2231 }
2243 2232 }
2244 - mutex_enter(&mpt->m_intr_mutex);
2245 2233 mpt->m_power_level = PM_LEVEL_D0;
2246 - mutex_exit(&mpt->m_intr_mutex);
2247 2234 break;
2248 2235 case PM_LEVEL_D3:
2249 2236 NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2250 2237 MPTSAS_POWER_OFF(mpt);
2251 2238 break;
2252 2239 default:
2253 2240 mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2254 2241 mpt->m_instance, level);
2255 2242 rval = DDI_FAILURE;
2256 2243 break;
2257 2244 }
2258 2245 mutex_exit(&mpt->m_mutex);
2259 2246 return (rval);
2260 2247 }
2261 2248
2262 2249 /*
2263 2250 * Initialize configuration space and figure out which
2264 2251 * chip and revison of the chip the mpt driver is using.
2265 2252 */
2266 2253 static int
2267 2254 mptsas_config_space_init(mptsas_t *mpt)
2268 2255 {
2269 2256 NDBG0(("mptsas_config_space_init"));
2270 2257
2271 2258 if (mpt->m_config_handle != NULL)
2272 2259 return (TRUE);
2273 2260
2274 2261 if (pci_config_setup(mpt->m_dip,
2275 2262 &mpt->m_config_handle) != DDI_SUCCESS) {
2276 2263 mptsas_log(mpt, CE_WARN, "cannot map configuration space.");
2277 2264 return (FALSE);
2278 2265 }
2279 2266
2280 2267 /*
2281 2268 * This is a workaround for a XMITS ASIC bug which does not
2282 2269 * drive the CBE upper bits.
2283 2270 */
2284 2271 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) &
2285 2272 PCI_STAT_PERROR) {
2286 2273 pci_config_put16(mpt->m_config_handle, PCI_CONF_STAT,
2287 2274 PCI_STAT_PERROR);
2288 2275 }
2289 2276
2290 2277 mptsas_setup_cmd_reg(mpt);
2291 2278
2292 2279 /*
2293 2280 * Get the chip device id:
2294 2281 */
2295 2282 mpt->m_devid = pci_config_get16(mpt->m_config_handle, PCI_CONF_DEVID);
2296 2283
2297 2284 /*
2298 2285 * Save the revision.
2299 2286 */
2300 2287 mpt->m_revid = pci_config_get8(mpt->m_config_handle, PCI_CONF_REVID);
2301 2288
2302 2289 /*
2303 2290 * Save the SubSystem Vendor and Device IDs
2304 2291 */
2305 2292 mpt->m_svid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBVENID);
2306 2293 mpt->m_ssid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBSYSID);
2307 2294
2308 2295 /*
2309 2296 * Set the latency timer to 0x40 as specified by the upa -> pci
2310 2297 * bridge chip design team. This may be done by the sparc pci
2311 2298 * bus nexus driver, but the driver should make sure the latency
2312 2299 * timer is correct for performance reasons.
2313 2300 */
2314 2301 pci_config_put8(mpt->m_config_handle, PCI_CONF_LATENCY_TIMER,
2315 2302 MPTSAS_LATENCY_TIMER);
2316 2303
2317 2304 (void) mptsas_get_pci_cap(mpt);
2318 2305 return (TRUE);
2319 2306 }
2320 2307
2321 2308 static void
2322 2309 mptsas_config_space_fini(mptsas_t *mpt)
2323 2310 {
2324 2311 if (mpt->m_config_handle != NULL) {
2325 2312 mptsas_disable_bus_master(mpt);
2326 2313 pci_config_teardown(&mpt->m_config_handle);
2327 2314 mpt->m_config_handle = NULL;
2328 2315 }
2329 2316 }
2330 2317
2331 2318 static void
2332 2319 mptsas_setup_cmd_reg(mptsas_t *mpt)
2333 2320 {
2334 2321 ushort_t cmdreg;
2335 2322
2336 2323 /*
2337 2324 * Set the command register to the needed values.
2338 2325 */
2339 2326 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2340 2327 cmdreg |= (PCI_COMM_ME | PCI_COMM_SERR_ENABLE |
2341 2328 PCI_COMM_PARITY_DETECT | PCI_COMM_MAE);
2342 2329 cmdreg &= ~PCI_COMM_IO;
2343 2330 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2344 2331 }
2345 2332
2346 2333 static void
2347 2334 mptsas_disable_bus_master(mptsas_t *mpt)
2348 2335 {
2349 2336 ushort_t cmdreg;
2350 2337
2351 2338 /*
2352 2339 * Clear the master enable bit in the PCI command register.
2353 2340 * This prevents any bus mastering activity like DMA.
2354 2341 */
2355 2342 cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2356 2343 cmdreg &= ~PCI_COMM_ME;
2357 2344 pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2358 2345 }
2359 2346
2360 2347 int
2361 2348 mptsas_dma_alloc(mptsas_t *mpt, mptsas_dma_alloc_state_t *dma_statep)
2362 2349 {
2363 2350 ddi_dma_attr_t attrs;
2364 2351
2365 2352 attrs = mpt->m_io_dma_attr;
2366 2353 attrs.dma_attr_sgllen = 1;
2367 2354
2368 2355 ASSERT(dma_statep != NULL);
2369 2356
2370 2357 if (mptsas_dma_addr_create(mpt, attrs, &dma_statep->handle,
2371 2358 &dma_statep->accessp, &dma_statep->memp, dma_statep->size,
2372 2359 &dma_statep->cookie) == FALSE) {
2373 2360 return (DDI_FAILURE);
2374 2361 }
2375 2362
2376 2363 return (DDI_SUCCESS);
2377 2364 }
2378 2365
2379 2366 void
2380 2367 mptsas_dma_free(mptsas_dma_alloc_state_t *dma_statep)
2381 2368 {
2382 2369 ASSERT(dma_statep != NULL);
2383 2370 mptsas_dma_addr_destroy(&dma_statep->handle, &dma_statep->accessp);
2384 2371 dma_statep->size = 0;
2385 2372 }
2386 2373
2387 2374 int
2388 2375 mptsas_do_dma(mptsas_t *mpt, uint32_t size, int var, int (*callback)())
2389 2376 {
2390 2377 ddi_dma_attr_t attrs;
2391 2378 ddi_dma_handle_t dma_handle;
2392 2379 caddr_t memp;
2393 2380 ddi_acc_handle_t accessp;
2394 2381 int rval;
2395 2382
2396 2383 ASSERT(mutex_owned(&mpt->m_mutex));
2397 2384
2398 2385 attrs = mpt->m_msg_dma_attr;
2399 2386 attrs.dma_attr_sgllen = 1;
2400 2387 attrs.dma_attr_granular = size;
2401 2388
2402 2389 if (mptsas_dma_addr_create(mpt, attrs, &dma_handle,
2403 2390 &accessp, &memp, size, NULL) == FALSE) {
2404 2391 return (DDI_FAILURE);
2405 2392 }
2406 2393
2407 2394 rval = (*callback) (mpt, memp, var, accessp);
2408 2395
2409 2396 if ((mptsas_check_dma_handle(dma_handle) != DDI_SUCCESS) ||
2410 2397 (mptsas_check_acc_handle(accessp) != DDI_SUCCESS)) {
2411 2398 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
2412 2399 rval = DDI_FAILURE;
2413 2400 }
2414 2401
2415 2402 mptsas_dma_addr_destroy(&dma_handle, &accessp);
2416 2403 return (rval);
2417 2404
2418 2405 }
2419 2406
2420 2407 static int
2421 2408 mptsas_alloc_request_frames(mptsas_t *mpt)
2422 2409 {
2423 2410 ddi_dma_attr_t frame_dma_attrs;
2424 2411 caddr_t memp;
2425 2412 ddi_dma_cookie_t cookie;
2426 2413 size_t mem_size;
2427 2414
2428 2415 /*
2429 2416 * re-alloc when it has already alloced
2430 2417 */
2431 2418 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2432 2419 &mpt->m_acc_req_frame_hdl);
2433 2420
2434 2421 /*
2435 2422 * The size of the request frame pool is:
2436 2423 * Number of Request Frames * Request Frame Size
2437 2424 */
2438 2425 mem_size = mpt->m_max_requests * mpt->m_req_frame_size;
2439 2426
2440 2427 /*
2441 2428 * set the DMA attributes. System Request Message Frames must be
2442 2429 * aligned on a 16-byte boundry.
2443 2430 */
2444 2431 frame_dma_attrs = mpt->m_msg_dma_attr;
2445 2432 frame_dma_attrs.dma_attr_align = 16;
2446 2433 frame_dma_attrs.dma_attr_sgllen = 1;
2447 2434
2448 2435 /*
2449 2436 * allocate the request frame pool.
2450 2437 */
2451 2438 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2452 2439 &mpt->m_dma_req_frame_hdl, &mpt->m_acc_req_frame_hdl, &memp,
2453 2440 mem_size, &cookie) == FALSE) {
2454 2441 return (DDI_FAILURE);
2455 2442 }
2456 2443
2457 2444 /*
2458 2445 * Store the request frame memory address. This chip uses this
2459 2446 * address to dma to and from the driver's frame. The second
2460 2447 * address is the address mpt uses to fill in the frame.
2461 2448 */
2462 2449 mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2463 2450 mpt->m_req_frame = memp;
2464 2451
2465 2452 /*
2466 2453 * Clear the request frame pool.
2467 2454 */
2468 2455 bzero(mpt->m_req_frame, mem_size);
2469 2456
2470 2457 return (DDI_SUCCESS);
2471 2458 }
2472 2459
2473 2460 static int
2474 2461 mptsas_alloc_reply_frames(mptsas_t *mpt)
2475 2462 {
2476 2463 ddi_dma_attr_t frame_dma_attrs;
2477 2464 caddr_t memp;
2478 2465 ddi_dma_cookie_t cookie;
2479 2466 size_t mem_size;
2480 2467
2481 2468 /*
2482 2469 * re-alloc when it has already alloced
2483 2470 */
2484 2471 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2485 2472 &mpt->m_acc_reply_frame_hdl);
2486 2473
2487 2474 /*
2488 2475 * The size of the reply frame pool is:
2489 2476 * Number of Reply Frames * Reply Frame Size
2490 2477 */
2491 2478 mem_size = mpt->m_max_replies * mpt->m_reply_frame_size;
2492 2479
2493 2480 /*
2494 2481 * set the DMA attributes. System Reply Message Frames must be
2495 2482 * aligned on a 4-byte boundry. This is the default.
2496 2483 */
2497 2484 frame_dma_attrs = mpt->m_msg_dma_attr;
2498 2485 frame_dma_attrs.dma_attr_sgllen = 1;
2499 2486
2500 2487 /*
2501 2488 * allocate the reply frame pool
2502 2489 */
2503 2490 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2504 2491 &mpt->m_dma_reply_frame_hdl, &mpt->m_acc_reply_frame_hdl, &memp,
2505 2492 mem_size, &cookie) == FALSE) {
2506 2493 return (DDI_FAILURE);
2507 2494 }
2508 2495
2509 2496 /*
2510 2497 * Store the reply frame memory address. This chip uses this
2511 2498 * address to dma to and from the driver's frame. The second
2512 2499 * address is the address mpt uses to process the frame.
2513 2500 */
2514 2501 mpt->m_reply_frame_dma_addr = cookie.dmac_laddress;
2515 2502 mpt->m_reply_frame = memp;
2516 2503
2517 2504 /*
2518 2505 * Clear the reply frame pool.
2519 2506 */
2520 2507 bzero(mpt->m_reply_frame, mem_size);
2521 2508
2522 2509 return (DDI_SUCCESS);
2523 2510 }
2524 2511
2525 2512 static int
2526 2513 mptsas_alloc_free_queue(mptsas_t *mpt)
2527 2514 {
2528 2515 ddi_dma_attr_t frame_dma_attrs;
2529 2516 caddr_t memp;
2530 2517 ddi_dma_cookie_t cookie;
2531 2518 size_t mem_size;
2532 2519
2533 2520 /*
2534 2521 * re-alloc when it has already alloced
2535 2522 */
2536 2523 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2537 2524 &mpt->m_acc_free_queue_hdl);
2538 2525
2539 2526 /*
2540 2527 * The reply free queue size is:
2541 2528 * Reply Free Queue Depth * 4
2542 2529 * The "4" is the size of one 32 bit address (low part of 64-bit
2543 2530 * address)
2544 2531 */
2545 2532 mem_size = mpt->m_free_queue_depth * 4;
2546 2533
2547 2534 /*
2548 2535 * set the DMA attributes The Reply Free Queue must be aligned on a
2549 2536 * 16-byte boundry.
2550 2537 */
2551 2538 frame_dma_attrs = mpt->m_msg_dma_attr;
2552 2539 frame_dma_attrs.dma_attr_align = 16;
2553 2540 frame_dma_attrs.dma_attr_sgllen = 1;
2554 2541
2555 2542 /*
2556 2543 * allocate the reply free queue
2557 2544 */
2558 2545 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2559 2546 &mpt->m_dma_free_queue_hdl, &mpt->m_acc_free_queue_hdl, &memp,
2560 2547 mem_size, &cookie) == FALSE) {
2561 2548 return (DDI_FAILURE);
2562 2549 }
2563 2550
2564 2551 /*
2565 2552 * Store the reply free queue memory address. This chip uses this
2566 2553 * address to read from the reply free queue. The second address
2567 2554 * is the address mpt uses to manage the queue.
2568 2555 */
2569 2556 mpt->m_free_queue_dma_addr = cookie.dmac_laddress;
2570 2557 mpt->m_free_queue = memp;
2571 2558
2572 2559 /*
2573 2560 * Clear the reply free queue memory.
2574 2561 */
2575 2562 bzero(mpt->m_free_queue, mem_size);
2576 2563
2577 2564 return (DDI_SUCCESS);
2578 2565 }
2579 2566
2580 2567 static int
2581 2568 mptsas_alloc_post_queue(mptsas_t *mpt)
2582 2569 {
2583 2570 ddi_dma_attr_t frame_dma_attrs;
2584 2571 caddr_t memp;
2585 2572 ddi_dma_cookie_t cookie;
2586 2573 size_t mem_size;
2587 2574
2588 2575 /*
2589 2576 * re-alloc when it has already alloced
2590 2577 */
2591 2578 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2592 2579 &mpt->m_acc_post_queue_hdl);
2593 2580
2594 2581 /*
2595 2582 * The reply descriptor post queue size is:
2596 2583 * Reply Descriptor Post Queue Depth * 8
2597 2584 * The "8" is the size of each descriptor (8 bytes or 64 bits).
2598 2585 */
2599 2586 mem_size = mpt->m_post_queue_depth * 8;
2600 2587
2601 2588 /*
2602 2589 * set the DMA attributes. The Reply Descriptor Post Queue must be
2603 2590 * aligned on a 16-byte boundry.
2604 2591 */
2605 2592 frame_dma_attrs = mpt->m_msg_dma_attr;
2606 2593 frame_dma_attrs.dma_attr_align = 16;
2607 2594 frame_dma_attrs.dma_attr_sgllen = 1;
2608 2595
2609 2596 /*
2610 2597 * allocate the reply post queue
2611 2598 */
2612 2599 if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2613 2600 &mpt->m_dma_post_queue_hdl, &mpt->m_acc_post_queue_hdl, &memp,
2614 2601 mem_size, &cookie) == FALSE) {
2615 2602 return (DDI_FAILURE);
2616 2603 }
2617 2604
2618 2605 /*
2619 2606 * Store the reply descriptor post queue memory address. This chip
2620 2607 * uses this address to write to the reply descriptor post queue. The
2621 2608 * second address is the address mpt uses to manage the queue.
2622 2609 */
2623 2610 mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
2624 2611 mpt->m_post_queue = memp;
2625 2612
2626 2613 /*
↓ open down ↓ |
370 lines elided |
↑ open up ↑ |
2627 2614 * Clear the reply post queue memory.
2628 2615 */
2629 2616 bzero(mpt->m_post_queue, mem_size);
2630 2617
2631 2618 return (DDI_SUCCESS);
2632 2619 }
2633 2620
2634 2621 static void
2635 2622 mptsas_alloc_reply_args(mptsas_t *mpt)
2636 2623 {
2637 - if (mpt->m_replyh_args != NULL) {
2638 - kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
2639 - * mpt->m_max_replies);
2640 - mpt->m_replyh_args = NULL;
2624 + if (mpt->m_replyh_args == NULL) {
2625 + mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
2626 + mpt->m_max_replies, KM_SLEEP);
2641 2627 }
2642 - mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
2643 - mpt->m_max_replies, KM_SLEEP);
2644 2628 }
2645 2629
2646 2630 static int
2647 2631 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2648 2632 {
2649 2633 mptsas_cache_frames_t *frames = NULL;
2650 2634 if (cmd->cmd_extra_frames == NULL) {
2651 2635 frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
2652 2636 if (frames == NULL) {
2653 2637 return (DDI_FAILURE);
2654 2638 }
2655 2639 cmd->cmd_extra_frames = frames;
2656 2640 }
2657 2641 return (DDI_SUCCESS);
2658 2642 }
2659 2643
2660 2644 static void
2661 2645 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
2662 2646 {
2663 2647 if (cmd->cmd_extra_frames) {
2664 2648 kmem_cache_free(mpt->m_cache_frames,
2665 2649 (void *)cmd->cmd_extra_frames);
2666 2650 cmd->cmd_extra_frames = NULL;
2667 2651 }
2668 2652 }
2669 2653
2670 2654 static void
2671 2655 mptsas_cfg_fini(mptsas_t *mpt)
2672 2656 {
2673 2657 NDBG0(("mptsas_cfg_fini"));
2674 2658 ddi_regs_map_free(&mpt->m_datap);
2675 2659 }
2676 2660
2677 2661 static void
2678 2662 mptsas_hba_fini(mptsas_t *mpt)
2679 2663 {
2680 2664 NDBG0(("mptsas_hba_fini"));
2681 2665
2682 2666 /*
2683 2667 * Free up any allocated memory
2684 2668 */
2685 2669 mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2686 2670 &mpt->m_acc_req_frame_hdl);
2687 2671
2688 2672 mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2689 2673 &mpt->m_acc_reply_frame_hdl);
2690 2674
2691 2675 mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2692 2676 &mpt->m_acc_free_queue_hdl);
2693 2677
2694 2678 mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2695 2679 &mpt->m_acc_post_queue_hdl);
2696 2680
2697 2681 if (mpt->m_replyh_args != NULL) {
2698 2682 kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
2699 2683 * mpt->m_max_replies);
2700 2684 }
2701 2685 }
2702 2686
2703 2687 static int
2704 2688 mptsas_name_child(dev_info_t *lun_dip, char *name, int len)
2705 2689 {
2706 2690 int lun = 0;
2707 2691 char *sas_wwn = NULL;
2708 2692 int phynum = -1;
2709 2693 int reallen = 0;
2710 2694
2711 2695 /* Get the target num */
2712 2696 lun = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
2713 2697 LUN_PROP, 0);
2714 2698
2715 2699 if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
2716 2700 DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
2717 2701 /*
2718 2702 * Stick in the address of form "pPHY,LUN"
2719 2703 */
2720 2704 reallen = snprintf(name, len, "p%x,%x", phynum, lun);
2721 2705 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
2722 2706 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn)
2723 2707 == DDI_PROP_SUCCESS) {
2724 2708 /*
2725 2709 * Stick in the address of the form "wWWN,LUN"
2726 2710 */
2727 2711 reallen = snprintf(name, len, "%s,%x", sas_wwn, lun);
2728 2712 ddi_prop_free(sas_wwn);
2729 2713 } else {
2730 2714 return (DDI_FAILURE);
2731 2715 }
2732 2716
2733 2717 ASSERT(reallen < len);
2734 2718 if (reallen >= len) {
2735 2719 mptsas_log(0, CE_WARN, "!mptsas_get_name: name parameter "
2736 2720 "length too small, it needs to be %d bytes", reallen + 1);
2737 2721 }
2738 2722 return (DDI_SUCCESS);
2739 2723 }
2740 2724
2741 2725 /*
2742 2726 * tran_tgt_init(9E) - target device instance initialization
2743 2727 */
2744 2728 static int
2745 2729 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2746 2730 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2747 2731 {
2748 2732 #ifndef __lock_lint
2749 2733 _NOTE(ARGUNUSED(hba_tran))
2750 2734 #endif
2751 2735
2752 2736 /*
2753 2737 * At this point, the scsi_device structure already exists
2754 2738 * and has been initialized.
2755 2739 *
2756 2740 * Use this function to allocate target-private data structures,
2757 2741 * if needed by this HBA. Add revised flow-control and queue
2758 2742 * properties for child here, if desired and if you can tell they
2759 2743 * support tagged queueing by now.
2760 2744 */
2761 2745 mptsas_t *mpt;
2762 2746 int lun = sd->sd_address.a_lun;
2763 2747 mdi_pathinfo_t *pip = NULL;
2764 2748 mptsas_tgt_private_t *tgt_private = NULL;
2765 2749 mptsas_target_t *ptgt = NULL;
2766 2750 char *psas_wwn = NULL;
2767 2751 int phymask = 0;
2768 2752 uint64_t sas_wwn = 0;
2769 2753 mpt = SDEV2MPT(sd);
2770 2754
2771 2755 ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
2772 2756
2773 2757 NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
2774 2758 (void *)hba_dip, (void *)tgt_dip, lun));
2775 2759
2776 2760 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
2777 2761 (void) ndi_merge_node(tgt_dip, mptsas_name_child);
2778 2762 ddi_set_name_addr(tgt_dip, NULL);
2779 2763 return (DDI_FAILURE);
2780 2764 }
2781 2765 /*
2782 2766 * phymask is 0 means the virtual port for RAID
2783 2767 */
2784 2768 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
2785 2769 "phymask", 0);
2786 2770 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2787 2771 if ((pip = (void *)(sd->sd_private)) == NULL) {
2788 2772 /*
2789 2773 * Very bad news if this occurs. Somehow scsi_vhci has
2790 2774 * lost the pathinfo node for this target.
2791 2775 */
2792 2776 return (DDI_NOT_WELL_FORMED);
2793 2777 }
2794 2778
2795 2779 if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
2796 2780 DDI_PROP_SUCCESS) {
2797 2781 mptsas_log(mpt, CE_WARN, "Get lun property failed\n");
2798 2782 return (DDI_FAILURE);
2799 2783 }
2800 2784
2801 2785 if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
2802 2786 &psas_wwn) == MDI_SUCCESS) {
2803 2787 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2804 2788 sas_wwn = 0;
2805 2789 }
2806 2790 (void) mdi_prop_free(psas_wwn);
2807 2791 }
2808 2792 } else {
2809 2793 lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
2810 2794 DDI_PROP_DONTPASS, LUN_PROP, 0);
2811 2795 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
2812 2796 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
2813 2797 DDI_PROP_SUCCESS) {
2814 2798 if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
2815 2799 sas_wwn = 0;
2816 2800 }
2817 2801 ddi_prop_free(psas_wwn);
2818 2802 } else {
2819 2803 sas_wwn = 0;
2820 2804 }
2821 2805 }
2822 2806 ASSERT((sas_wwn != 0) || (phymask != 0));
2823 2807 mutex_enter(&mpt->m_mutex);
2824 2808 ptgt = mptsas_hash_search(&mpt->m_active->m_tgttbl, sas_wwn, phymask);
2825 2809 mutex_exit(&mpt->m_mutex);
2826 2810 if (ptgt == NULL) {
2827 2811 mptsas_log(mpt, CE_WARN, "!tgt_init: target doesn't exist or "
2828 2812 "gone already! phymask:%x, saswwn %"PRIx64, phymask,
2829 2813 sas_wwn);
2830 2814 return (DDI_FAILURE);
2831 2815 }
2832 2816 if (hba_tran->tran_tgt_private == NULL) {
2833 2817 tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
2834 2818 KM_SLEEP);
2835 2819 tgt_private->t_lun = lun;
2836 2820 tgt_private->t_private = ptgt;
2837 2821 hba_tran->tran_tgt_private = tgt_private;
2838 2822 }
2839 2823
2840 2824 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
2841 2825 return (DDI_SUCCESS);
2842 2826 }
2843 2827 mutex_enter(&mpt->m_mutex);
2844 2828
2845 2829 if (ptgt->m_deviceinfo &
2846 2830 (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
2847 2831 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
2848 2832 uchar_t *inq89 = NULL;
2849 2833 int inq89_len = 0x238;
2850 2834 int reallen = 0;
2851 2835 int rval = 0;
2852 2836 struct sata_id *sid = NULL;
2853 2837 char model[SATA_ID_MODEL_LEN + 1];
2854 2838 char fw[SATA_ID_FW_LEN + 1];
2855 2839 char *vid, *pid;
2856 2840 int i;
2857 2841
2858 2842 mutex_exit(&mpt->m_mutex);
2859 2843 /*
2860 2844 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
2861 2845 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
2862 2846 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
2863 2847 */
2864 2848 inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
2865 2849 rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
2866 2850 inq89, inq89_len, &reallen, 1);
2867 2851
2868 2852 if (rval != 0) {
2869 2853 if (inq89 != NULL) {
2870 2854 kmem_free(inq89, inq89_len);
2871 2855 }
2872 2856
2873 2857 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
2874 2858 "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
2875 2859 return (DDI_SUCCESS);
2876 2860 }
2877 2861 sid = (void *)(&inq89[60]);
2878 2862
2879 2863 swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
2880 2864 swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
2881 2865
2882 2866 model[SATA_ID_MODEL_LEN] = 0;
2883 2867 fw[SATA_ID_FW_LEN] = 0;
2884 2868
2885 2869 /*
2886 2870 * split model into into vid/pid
2887 2871 */
2888 2872 for (i = 0, pid = model; i < SATA_ID_MODEL_LEN; i++, pid++)
2889 2873 if ((*pid == ' ') || (*pid == '\t'))
2890 2874 break;
2891 2875 if (i < SATA_ID_MODEL_LEN) {
2892 2876 vid = model;
2893 2877 /*
2894 2878 * terminate vid, establish pid
2895 2879 */
2896 2880 *pid++ = 0;
2897 2881 } else {
2898 2882 /*
2899 2883 * vid will stay "ATA ", the rule is same
2900 2884 * as sata framework implementation.
2901 2885 */
2902 2886 vid = NULL;
2903 2887 /*
2904 2888 * model is all pid
2905 2889 */
2906 2890 pid = model;
2907 2891 }
2908 2892
2909 2893 /*
2910 2894 * override SCSA "inquiry-*" properties
2911 2895 */
2912 2896 if (vid)
2913 2897 (void) scsi_device_prop_update_inqstring(sd,
2914 2898 INQUIRY_VENDOR_ID, vid, strlen(vid));
2915 2899 if (pid)
2916 2900 (void) scsi_device_prop_update_inqstring(sd,
2917 2901 INQUIRY_PRODUCT_ID, pid, strlen(pid));
2918 2902 (void) scsi_device_prop_update_inqstring(sd,
2919 2903 INQUIRY_REVISION_ID, fw, strlen(fw));
2920 2904
2921 2905 if (inq89 != NULL) {
2922 2906 kmem_free(inq89, inq89_len);
2923 2907 }
2924 2908 } else {
2925 2909 mutex_exit(&mpt->m_mutex);
2926 2910 }
2927 2911
2928 2912 return (DDI_SUCCESS);
2929 2913 }
2930 2914 /*
2931 2915 * tran_tgt_free(9E) - target device instance deallocation
2932 2916 */
2933 2917 static void
2934 2918 mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
2935 2919 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
2936 2920 {
2937 2921 #ifndef __lock_lint
2938 2922 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran, sd))
2939 2923 #endif
2940 2924
2941 2925 mptsas_tgt_private_t *tgt_private = hba_tran->tran_tgt_private;
2942 2926
2943 2927 if (tgt_private != NULL) {
2944 2928 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
2945 2929 hba_tran->tran_tgt_private = NULL;
2946 2930 }
2947 2931 }
2948 2932
2949 2933 /*
2950 2934 * scsi_pkt handling
2951 2935 *
2952 2936 * Visible to the external world via the transport structure.
2953 2937 */
2954 2938
2955 2939 /*
2956 2940 * Notes:
2957 2941 * - transport the command to the addressed SCSI target/lun device
2958 2942 * - normal operation is to schedule the command to be transported,
2959 2943 * and return TRAN_ACCEPT if this is successful.
2960 2944 * - if NO_INTR, tran_start must poll device for command completion
2961 2945 */
2962 2946 static int
2963 2947 mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
2964 2948 {
2965 2949 #ifndef __lock_lint
2966 2950 _NOTE(ARGUNUSED(ap))
2967 2951 #endif
2968 2952 mptsas_t *mpt = PKT2MPT(pkt);
2969 2953 mptsas_cmd_t *cmd = PKT2CMD(pkt);
2970 2954 int rval;
2971 2955 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
2972 2956
2973 2957 NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt));
2974 2958 ASSERT(ptgt);
2975 2959 if (ptgt == NULL)
2976 2960 return (TRAN_FATAL_ERROR);
2977 2961
2978 2962 /*
2979 2963 * prepare the pkt before taking mutex.
2980 2964 */
2981 2965 rval = mptsas_prepare_pkt(cmd);
2982 2966 if (rval != TRAN_ACCEPT) {
2983 2967 return (rval);
2984 2968 }
2985 2969
2986 2970 /*
2987 2971 * Send the command to target/lun, however your HBA requires it.
2988 2972 * If busy, return TRAN_BUSY; if there's some other formatting error
2989 2973 * in the packet, return TRAN_BADPKT; otherwise, fall through to the
2990 2974 * return of TRAN_ACCEPT.
2991 2975 *
↓ open down ↓ |
338 lines elided |
↑ open up ↑ |
2992 2976 * Remember that access to shared resources, including the mptsas_t
2993 2977 * data structure and the HBA hardware registers, must be protected
2994 2978 * with mutexes, here and everywhere.
2995 2979 *
2996 2980 * Also remember that at interrupt time, you'll get an argument
2997 2981 * to the interrupt handler which is a pointer to your mptsas_t
2998 2982 * structure; you'll have to remember which commands are outstanding
2999 2983 * and which scsi_pkt is the currently-running command so the
3000 2984 * interrupt handler can refer to the pkt to set completion
3001 2985 * status, call the target driver back through pkt_comp, etc.
2986 + *
2987 + * If the instance lock is held by other thread, don't spin to wait
2988 + * for it. Instead, queue the cmd and next time when the instance lock
2989 + * is not held, accept all the queued cmd. A extra tx_waitq is
2990 + * introduced to protect the queue.
2991 + *
2992 + * The polled cmd will not be queud and accepted as usual.
2993 + *
2994 + * Under the tx_waitq mutex, record whether a thread is draining
2995 + * the tx_waitq. An IO requesting thread that finds the instance
2996 + * mutex contended appends to the tx_waitq and while holding the
2997 + * tx_wait mutex, if the draining flag is not set, sets it and then
2998 + * proceeds to spin for the instance mutex. This scheme ensures that
2999 + * the last cmd in a burst be processed.
3000 + *
3001 + * we enable this feature only when the helper threads are enabled,
3002 + * at which we think the loads are heavy.
3003 + *
3004 + * per instance mutex m_tx_waitq_mutex is introduced to protect the
3005 + * m_tx_waitqtail, m_tx_waitq, m_tx_draining.
3002 3006 */
3003 3007
3004 - mutex_enter(&ptgt->m_tgt_intr_mutex);
3008 + if (mpt->m_doneq_thread_n) {
3009 + if (mutex_tryenter(&mpt->m_mutex) != 0) {
3010 + rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3011 + mutex_exit(&mpt->m_mutex);
3012 + } else if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3013 + mutex_enter(&mpt->m_mutex);
3014 + rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3015 + mutex_exit(&mpt->m_mutex);
3016 + } else {
3017 + mutex_enter(&mpt->m_tx_waitq_mutex);
3018 + /*
3019 + * ptgt->m_dr_flag is protected by m_mutex or
3020 + * m_tx_waitq_mutex. In this case, m_tx_waitq_mutex
3021 + * is acquired.
3022 + */
3023 + if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3024 + if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3025 + /*
3026 + * The command should be allowed to
3027 + * retry by returning TRAN_BUSY to
3028 + * to stall the I/O's which come from
3029 + * scsi_vhci since the device/path is
3030 + * in unstable state now.
3031 + */
3032 + mutex_exit(&mpt->m_tx_waitq_mutex);
3033 + return (TRAN_BUSY);
3034 + } else {
3035 + /*
3036 + * The device is offline, just fail the
3037 + * command by returning
3038 + * TRAN_FATAL_ERROR.
3039 + */
3040 + mutex_exit(&mpt->m_tx_waitq_mutex);
3041 + return (TRAN_FATAL_ERROR);
3042 + }
3043 + }
3044 + if (mpt->m_tx_draining) {
3045 + cmd->cmd_flags |= CFLAG_TXQ;
3046 + *mpt->m_tx_waitqtail = cmd;
3047 + mpt->m_tx_waitqtail = &cmd->cmd_linkp;
3048 + mutex_exit(&mpt->m_tx_waitq_mutex);
3049 + } else { /* drain the queue */
3050 + mpt->m_tx_draining = 1;
3051 + mutex_exit(&mpt->m_tx_waitq_mutex);
3052 + mutex_enter(&mpt->m_mutex);
3053 + rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3054 + mutex_exit(&mpt->m_mutex);
3055 + }
3056 + }
3057 + } else {
3058 + mutex_enter(&mpt->m_mutex);
3059 + /*
3060 + * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3061 + * in this case, m_mutex is acquired.
3062 + */
3063 + if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3064 + if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3065 + /*
3066 + * commands should be allowed to retry by
3067 + * returning TRAN_BUSY to stall the I/O's
3068 + * which come from scsi_vhci since the device/
3069 + * path is in unstable state now.
3070 + */
3071 + mutex_exit(&mpt->m_mutex);
3072 + return (TRAN_BUSY);
3073 + } else {
3074 + /*
3075 + * The device is offline, just fail the
3076 + * command by returning TRAN_FATAL_ERROR.
3077 + */
3078 + mutex_exit(&mpt->m_mutex);
3079 + return (TRAN_FATAL_ERROR);
3080 + }
3081 + }
3082 + rval = mptsas_accept_pkt(mpt, cmd);
3083 + mutex_exit(&mpt->m_mutex);
3084 + }
3085 +
3086 + return (rval);
3087 +}
3088 +
3089 +/*
3090 + * Accept all the queued cmds(if any) before accept the current one.
3091 + */
3092 +static int
3093 +mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3094 +{
3095 + int rval;
3096 + mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3097 +
3098 + ASSERT(mutex_owned(&mpt->m_mutex));
3099 + /*
3100 + * The call to mptsas_accept_tx_waitq() must always be performed
3101 + * because that is where mpt->m_tx_draining is cleared.
3102 + */
3103 + mutex_enter(&mpt->m_tx_waitq_mutex);
3104 + mptsas_accept_tx_waitq(mpt);
3105 + mutex_exit(&mpt->m_tx_waitq_mutex);
3106 + /*
3107 + * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3108 + * in this case, m_mutex is acquired.
3109 + */
3005 3110 if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3006 3111 if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3007 3112 /*
3008 - * commands should be allowed to retry by
3009 - * returning TRAN_BUSY to stall the I/O's
3010 - * which come from scsi_vhci since the device/
3011 - * path is in unstable state now.
3113 + * The command should be allowed to retry by returning
3114 + * TRAN_BUSY to stall the I/O's which come from
3115 + * scsi_vhci since the device/path is in unstable state
3116 + * now.
3012 3117 */
3013 - mutex_exit(&ptgt->m_tgt_intr_mutex);
3014 3118 return (TRAN_BUSY);
3015 3119 } else {
3016 3120 /*
3017 - * The device is offline, just fail the
3018 - * command by returning TRAN_FATAL_ERROR.
3121 + * The device is offline, just fail the command by
3122 + * return TRAN_FATAL_ERROR.
3019 3123 */
3020 - mutex_exit(&ptgt->m_tgt_intr_mutex);
3021 3124 return (TRAN_FATAL_ERROR);
3022 3125 }
3023 3126 }
3024 - mutex_exit(&ptgt->m_tgt_intr_mutex);
3025 3127 rval = mptsas_accept_pkt(mpt, cmd);
3026 3128
3027 3129 return (rval);
3028 3130 }
3029 3131
3030 3132 static int
3031 3133 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3032 3134 {
3033 3135 int rval = TRAN_ACCEPT;
3034 3136 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3035 3137
3036 3138 NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3037 3139
3140 + ASSERT(mutex_owned(&mpt->m_mutex));
3141 +
3038 3142 if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3039 3143 rval = mptsas_prepare_pkt(cmd);
3040 3144 if (rval != TRAN_ACCEPT) {
3041 3145 cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3042 3146 return (rval);
3043 3147 }
3044 3148 }
3045 3149
3046 3150 /*
3047 3151 * reset the throttle if we were draining
3048 3152 */
3049 - mutex_enter(&ptgt->m_tgt_intr_mutex);
3050 3153 if ((ptgt->m_t_ncmds == 0) &&
3051 3154 (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3052 3155 NDBG23(("reset throttle"));
3053 3156 ASSERT(ptgt->m_reset_delay == 0);
3054 3157 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3055 3158 }
3056 3159
3057 3160 /*
3161 + * If HBA is being reset, the DevHandles are being re-initialized,
3162 + * which means that they could be invalid even if the target is still
3163 + * attached. Check if being reset and if DevHandle is being
3164 + * re-initialized. If this is the case, return BUSY so the I/O can be
3165 + * retried later.
3166 + */
3167 + if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3168 + mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
3169 + if (cmd->cmd_flags & CFLAG_TXQ) {
3170 + mptsas_doneq_add(mpt, cmd);
3171 + mptsas_doneq_empty(mpt);
3172 + return (rval);
3173 + } else {
3174 + return (TRAN_BUSY);
3175 + }
3176 + }
3177 +
3178 + /*
3058 3179 * If device handle has already been invalidated, just
3059 3180 * fail the command. In theory, command from scsi_vhci
3060 3181 * client is impossible send down command with invalid
3061 3182 * devhdl since devhdl is set after path offline, target
3062 3183 * driver is not suppose to select a offlined path.
3063 3184 */
3064 3185 if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3065 3186 NDBG20(("rejecting command, it might because invalid devhdl "
3066 3187 "request."));
3067 - mutex_exit(&ptgt->m_tgt_intr_mutex);
3068 - mutex_enter(&mpt->m_mutex);
3069 - /*
3070 - * If HBA is being reset, the DevHandles are being
3071 - * re-initialized, which means that they could be invalid
3072 - * even if the target is still attached. Check if being reset
3073 - * and if DevHandle is being re-initialized. If this is the
3074 - * case, return BUSY so the I/O can be retried later.
3075 - */
3076 - if (mpt->m_in_reset) {
3077 - mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
3078 - STAT_BUS_RESET);
3079 - if (cmd->cmd_flags & CFLAG_TXQ) {
3080 - mptsas_doneq_add(mpt, cmd);
3081 - mptsas_doneq_empty(mpt);
3082 - mutex_exit(&mpt->m_mutex);
3083 - return (rval);
3084 - } else {
3085 - mutex_exit(&mpt->m_mutex);
3086 - return (TRAN_BUSY);
3087 - }
3088 - }
3089 3188 mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3090 3189 if (cmd->cmd_flags & CFLAG_TXQ) {
3091 3190 mptsas_doneq_add(mpt, cmd);
3092 3191 mptsas_doneq_empty(mpt);
3093 - mutex_exit(&mpt->m_mutex);
3094 3192 return (rval);
3095 3193 } else {
3096 - mutex_exit(&mpt->m_mutex);
3097 3194 return (TRAN_FATAL_ERROR);
3098 3195 }
3099 3196 }
3100 - mutex_exit(&ptgt->m_tgt_intr_mutex);
3101 3197 /*
3102 3198 * The first case is the normal case. mpt gets a command from the
3103 3199 * target driver and starts it.
3104 3200 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3105 3201 * commands is m_max_requests - 2.
3106 3202 */
3107 - mutex_enter(&ptgt->m_tgt_intr_mutex);
3108 - if ((ptgt->m_t_throttle > HOLD_THROTTLE) &&
3203 + if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
3204 + (ptgt->m_t_throttle > HOLD_THROTTLE) &&
3109 3205 (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3110 3206 (ptgt->m_reset_delay == 0) &&
3111 3207 (ptgt->m_t_nwait == 0) &&
3112 3208 ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3113 - mutex_exit(&ptgt->m_tgt_intr_mutex);
3114 3209 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3115 - (void) mptsas_start_cmd0(mpt, cmd);
3210 + (void) mptsas_start_cmd(mpt, cmd);
3116 3211 } else {
3117 - mutex_enter(&mpt->m_mutex);
3118 3212 mptsas_waitq_add(mpt, cmd);
3119 - mutex_exit(&mpt->m_mutex);
3120 3213 }
3121 3214 } else {
3122 3215 /*
3123 3216 * Add this pkt to the work queue
3124 3217 */
3125 - mutex_exit(&ptgt->m_tgt_intr_mutex);
3126 - mutex_enter(&mpt->m_mutex);
3127 3218 mptsas_waitq_add(mpt, cmd);
3128 3219
3129 3220 if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3130 3221 (void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3131 3222
3132 3223 /*
3133 3224 * Only flush the doneq if this is not a TM
3134 3225 * cmd. For TM cmds the flushing of the
3135 3226 * doneq will be done in those routines.
3136 3227 */
3137 3228 if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
3138 3229 mptsas_doneq_empty(mpt);
3139 3230 }
3140 3231 }
3141 - mutex_exit(&mpt->m_mutex);
3142 3232 }
3143 3233 return (rval);
3144 3234 }
3145 3235
3146 3236 int
3147 3237 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3148 3238 {
3149 3239 mptsas_slots_t *slots;
3150 3240 int slot;
3151 3241 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3152 - mptsas_slot_free_e_t *pe;
3153 - int qn, qn_first;
3154 3242
3243 + ASSERT(mutex_owned(&mpt->m_mutex));
3155 3244 slots = mpt->m_active;
3156 3245
3157 3246 /*
3158 3247 * Account for reserved TM request slot and reserved SMID of 0.
3159 3248 */
3160 3249 ASSERT(slots->m_n_slots == (mpt->m_max_requests - 2));
3161 3250
3162 - qn = qn_first = CPU->cpu_seqid & (mpt->m_slot_freeq_pair_n - 1);
3251 + /*
3252 + * m_tags is equivalent to the SMID when sending requests. Since the
3253 + * SMID cannot be 0, start out at one if rolling over past the size
3254 + * of the request queue depth. Also, don't use the last SMID, which is
3255 + * reserved for TM requests.
3256 + */
3257 + slot = (slots->m_tags)++;
3258 + if (slots->m_tags > slots->m_n_slots) {
3259 + slots->m_tags = 1;
3260 + }
3163 3261
3164 -qpair_retry:
3165 - ASSERT(qn < mpt->m_slot_freeq_pair_n);
3166 - mutex_enter(&mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_mutex);
3167 - pe = list_head(&mpt->m_slot_freeq_pairp[qn].m_slot_allocq.
3168 - s.m_fq_list);
3169 - if (!pe) { /* switch the allocq and releq */
3170 - mutex_enter(&mpt->m_slot_freeq_pairp[qn].m_slot_releq.
3171 - s.m_fq_mutex);
3172 - if (mpt->m_slot_freeq_pairp[qn].m_slot_releq.s.m_fq_n) {
3173 - mpt->m_slot_freeq_pairp[qn].
3174 - m_slot_allocq.s.m_fq_n =
3175 - mpt->m_slot_freeq_pairp[qn].
3176 - m_slot_releq.s.m_fq_n;
3177 - mpt->m_slot_freeq_pairp[qn].
3178 - m_slot_allocq.s.m_fq_list.list_head.list_next =
3179 - mpt->m_slot_freeq_pairp[qn].
3180 - m_slot_releq.s.m_fq_list.list_head.list_next;
3181 - mpt->m_slot_freeq_pairp[qn].
3182 - m_slot_allocq.s.m_fq_list.list_head.list_prev =
3183 - mpt->m_slot_freeq_pairp[qn].
3184 - m_slot_releq.s.m_fq_list.list_head.list_prev;
3185 - mpt->m_slot_freeq_pairp[qn].
3186 - m_slot_releq.s.m_fq_list.list_head.list_prev->
3187 - list_next =
3188 - &mpt->m_slot_freeq_pairp[qn].
3189 - m_slot_allocq.s.m_fq_list.list_head;
3190 - mpt->m_slot_freeq_pairp[qn].
3191 - m_slot_releq.s.m_fq_list.list_head.list_next->
3192 - list_prev =
3193 - &mpt->m_slot_freeq_pairp[qn].
3194 - m_slot_allocq.s.m_fq_list.list_head;
3262 +alloc_tag:
3263 + /* Validate tag, should never fail. */
3264 + if (slots->m_slot[slot] == NULL) {
3265 + /*
3266 + * Make sure SMID is not using reserved value of 0
3267 + * and the TM request slot.
3268 + */
3269 + ASSERT((slot > 0) && (slot <= slots->m_n_slots));
3270 + cmd->cmd_slot = slot;
3271 + slots->m_slot[slot] = cmd;
3272 + mpt->m_ncmds++;
3195 3273
3196 - mpt->m_slot_freeq_pairp[qn].
3197 - m_slot_releq.s.m_fq_list.list_head.list_next =
3198 - mpt->m_slot_freeq_pairp[qn].
3199 - m_slot_releq.s.m_fq_list.list_head.list_prev =
3200 - &mpt->m_slot_freeq_pairp[qn].
3201 - m_slot_releq.s.m_fq_list.list_head;
3202 - mpt->m_slot_freeq_pairp[qn].
3203 - m_slot_releq.s.m_fq_n = 0;
3204 - } else {
3205 - mutex_exit(&mpt->m_slot_freeq_pairp[qn].
3206 - m_slot_releq.s.m_fq_mutex);
3207 - mutex_exit(&mpt->m_slot_freeq_pairp[qn].
3208 - m_slot_allocq.s.m_fq_mutex);
3209 - qn = (qn + 1) & (mpt->m_slot_freeq_pair_n - 1);
3210 - if (qn == qn_first)
3211 - return (FALSE);
3212 - else
3213 - goto qpair_retry;
3274 + /*
3275 + * only increment per target ncmds if this is not a
3276 + * command that has no target associated with it (i.e. a
3277 + * event acknoledgment)
3278 + */
3279 + if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3280 + ptgt->m_t_ncmds++;
3214 3281 }
3215 - mutex_exit(&mpt->m_slot_freeq_pairp[qn].
3216 - m_slot_releq.s.m_fq_mutex);
3217 - pe = list_head(&mpt->m_slot_freeq_pairp[qn].
3218 - m_slot_allocq.s.m_fq_list);
3219 - ASSERT(pe);
3220 - }
3221 - list_remove(&mpt->m_slot_freeq_pairp[qn].
3222 - m_slot_allocq.s.m_fq_list, pe);
3223 - slot = pe->slot;
3224 - /*
3225 - * Make sure SMID is not using reserved value of 0
3226 - * and the TM request slot.
3227 - */
3228 - ASSERT((slot > 0) && (slot <= slots->m_n_slots) &&
3229 - mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_n > 0);
3230 - cmd->cmd_slot = slot;
3231 - mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_n--;
3232 - ASSERT(mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_n >= 0);
3282 + cmd->cmd_active_timeout = cmd->cmd_pkt->pkt_time;
3233 3283
3234 - mutex_exit(&mpt->m_slot_freeq_pairp[qn].m_slot_allocq.s.m_fq_mutex);
3235 - /*
3236 - * only increment per target ncmds if this is not a
3237 - * command that has no target associated with it (i.e. a
3238 - * event acknoledgment)
3239 - */
3240 - if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3241 - mutex_enter(&ptgt->m_tgt_intr_mutex);
3242 - ptgt->m_t_ncmds++;
3243 - mutex_exit(&ptgt->m_tgt_intr_mutex);
3244 - }
3245 - cmd->cmd_active_timeout = cmd->cmd_pkt->pkt_time;
3284 + /*
3285 + * If initial timout is less than or equal to one tick, bump
3286 + * the timeout by a tick so that command doesn't timeout before
3287 + * its allotted time.
3288 + */
3289 + if (cmd->cmd_active_timeout <= mptsas_scsi_watchdog_tick) {
3290 + cmd->cmd_active_timeout += mptsas_scsi_watchdog_tick;
3291 + }
3292 + return (TRUE);
3293 + } else {
3294 + int i;
3246 3295
3247 - /*
3248 - * If initial timout is less than or equal to one tick, bump
3249 - * the timeout by a tick so that command doesn't timeout before
3250 - * its allotted time.
3251 - */
3252 - if (cmd->cmd_active_timeout <= mptsas_scsi_watchdog_tick) {
3253 - cmd->cmd_active_timeout += mptsas_scsi_watchdog_tick;
3296 + /*
3297 + * If slot in use, scan until a free one is found. Don't use 0
3298 + * or final slot, which is reserved for TM requests.
3299 + */
3300 + for (i = 0; i < slots->m_n_slots; i++) {
3301 + slot = slots->m_tags;
3302 + if (++(slots->m_tags) > slots->m_n_slots) {
3303 + slots->m_tags = 1;
3304 + }
3305 + if (slots->m_slot[slot] == NULL) {
3306 + NDBG22(("found free slot %d", slot));
3307 + goto alloc_tag;
3308 + }
3309 + }
3254 3310 }
3255 - return (TRUE);
3311 + return (FALSE);
3256 3312 }
3257 3313
3258 3314 /*
3259 3315 * prepare the pkt:
3260 3316 * the pkt may have been resubmitted or just reused so
3261 3317 * initialize some fields and do some checks.
3262 3318 */
3263 3319 static int
3264 3320 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3265 3321 {
3266 3322 struct scsi_pkt *pkt = CMD2PKT(cmd);
3267 3323
3268 3324 NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3269 3325
3270 3326 /*
3271 3327 * Reinitialize some fields that need it; the packet may
3272 3328 * have been resubmitted
3273 3329 */
3274 3330 pkt->pkt_reason = CMD_CMPLT;
3275 3331 pkt->pkt_state = 0;
3276 3332 pkt->pkt_statistics = 0;
3277 3333 pkt->pkt_resid = 0;
3278 3334 cmd->cmd_age = 0;
3279 3335 cmd->cmd_pkt_flags = pkt->pkt_flags;
3280 3336
3281 3337 /*
3282 3338 * zero status byte.
3283 3339 */
3284 3340 *(pkt->pkt_scbp) = 0;
3285 3341
3286 3342 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3287 3343 pkt->pkt_resid = cmd->cmd_dmacount;
3288 3344
3289 3345 /*
3290 3346 * consistent packets need to be sync'ed first
3291 3347 * (only for data going out)
3292 3348 */
3293 3349 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
3294 3350 (cmd->cmd_flags & CFLAG_DMASEND)) {
3295 3351 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
3296 3352 DDI_DMA_SYNC_FORDEV);
3297 3353 }
3298 3354 }
3299 3355
3300 3356 cmd->cmd_flags =
3301 3357 (cmd->cmd_flags & ~(CFLAG_TRANFLAG)) |
3302 3358 CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
3303 3359
3304 3360 return (TRAN_ACCEPT);
3305 3361 }
3306 3362
3307 3363 /*
3308 3364 * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3309 3365 *
3310 3366 * One of three possibilities:
3311 3367 * - allocate scsi_pkt
3312 3368 * - allocate scsi_pkt and DMA resources
↓ open down ↓ |
47 lines elided |
↑ open up ↑ |
3313 3369 * - allocate DMA resources to an already-allocated pkt
3314 3370 */
3315 3371 static struct scsi_pkt *
3316 3372 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3317 3373 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3318 3374 int (*callback)(), caddr_t arg)
3319 3375 {
3320 3376 mptsas_cmd_t *cmd, *new_cmd;
3321 3377 mptsas_t *mpt = ADDR2MPT(ap);
3322 3378 int failure = 1;
3323 -#ifndef __sparc
3324 3379 uint_t oldcookiec;
3325 -#endif /* __sparc */
3326 3380 mptsas_target_t *ptgt = NULL;
3327 3381 int rval;
3328 3382 mptsas_tgt_private_t *tgt_private;
3329 3383 int kf;
3330 3384
3331 3385 kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3332 3386
3333 3387 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3334 3388 tran_tgt_private;
3335 3389 ASSERT(tgt_private != NULL);
3336 3390 if (tgt_private == NULL) {
3337 3391 return (NULL);
3338 3392 }
3339 3393 ptgt = tgt_private->t_private;
3340 3394 ASSERT(ptgt != NULL);
3341 3395 if (ptgt == NULL)
3342 3396 return (NULL);
3343 3397 ap->a_target = ptgt->m_devhdl;
3344 3398 ap->a_lun = tgt_private->t_lun;
3345 3399
3346 3400 ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3347 3401 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3348 3402 statuslen *= 100; tgtlen *= 4;
3349 3403 #endif
3350 3404 NDBG3(("mptsas_scsi_init_pkt:\n"
3351 3405 "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3352 3406 ap->a_target, (void *)pkt, (void *)bp,
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
3353 3407 cmdlen, statuslen, tgtlen, flags));
3354 3408
3355 3409 /*
3356 3410 * Allocate the new packet.
3357 3411 */
3358 3412 if (pkt == NULL) {
3359 3413 ddi_dma_handle_t save_dma_handle;
3360 3414 ddi_dma_handle_t save_arq_dma_handle;
3361 3415 struct buf *save_arq_bp;
3362 3416 ddi_dma_cookie_t save_arqcookie;
3363 -#ifdef __sparc
3364 - mptti_t *save_sg;
3365 -#endif /* __sparc */
3366 3417
3367 3418 cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3368 3419
3369 3420 if (cmd) {
3370 3421 save_dma_handle = cmd->cmd_dmahandle;
3371 3422 save_arq_dma_handle = cmd->cmd_arqhandle;
3372 3423 save_arq_bp = cmd->cmd_arq_buf;
3373 3424 save_arqcookie = cmd->cmd_arqcookie;
3374 -#ifdef __sparc
3375 - save_sg = cmd->cmd_sg;
3376 -#endif /* __sparc */
3377 3425 bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3378 3426 cmd->cmd_dmahandle = save_dma_handle;
3379 3427 cmd->cmd_arqhandle = save_arq_dma_handle;
3380 3428 cmd->cmd_arq_buf = save_arq_bp;
3381 3429 cmd->cmd_arqcookie = save_arqcookie;
3382 -#ifdef __sparc
3383 - cmd->cmd_sg = save_sg;
3384 -#endif /* __sparc */
3430 +
3385 3431 pkt = (void *)((uchar_t *)cmd +
3386 3432 sizeof (struct mptsas_cmd));
3387 3433 pkt->pkt_ha_private = (opaque_t)cmd;
3388 3434 pkt->pkt_address = *ap;
3389 3435 pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3390 3436 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3391 3437 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3392 3438 cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3393 3439 cmd->cmd_cdblen = (uchar_t)cmdlen;
3394 3440 cmd->cmd_scblen = statuslen;
3395 3441 cmd->cmd_rqslen = SENSE_LENGTH;
3396 3442 cmd->cmd_tgt_addr = ptgt;
3397 3443 failure = 0;
3398 3444 }
3399 3445
3400 3446 if (failure || (cmdlen > sizeof (cmd->cmd_cdb)) ||
3401 3447 (tgtlen > PKT_PRIV_LEN) ||
3402 3448 (statuslen > EXTCMDS_STATUS_SIZE)) {
3403 3449 if (failure == 0) {
3404 3450 /*
3405 3451 * if extern alloc fails, all will be
3406 3452 * deallocated, including cmd
3407 3453 */
3408 3454 failure = mptsas_pkt_alloc_extern(mpt, cmd,
3409 3455 cmdlen, tgtlen, statuslen, kf);
3410 3456 }
3411 3457 if (failure) {
3412 3458 /*
3413 3459 * if extern allocation fails, it will
3414 3460 * deallocate the new pkt as well
3415 3461 */
3416 3462 return (NULL);
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
3417 3463 }
3418 3464 }
3419 3465 new_cmd = cmd;
3420 3466
3421 3467 } else {
3422 3468 cmd = PKT2CMD(pkt);
3423 3469 new_cmd = NULL;
3424 3470 }
3425 3471
3426 3472
3427 -#ifndef __sparc
3428 3473 /* grab cmd->cmd_cookiec here as oldcookiec */
3429 3474
3430 3475 oldcookiec = cmd->cmd_cookiec;
3431 -#endif /* __sparc */
3432 3476
3433 3477 /*
3434 3478 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3435 3479 * greater than 0 and we'll need to grab the next dma window
3436 3480 */
3437 3481 /*
3438 3482 * SLM-not doing extra command frame right now; may add later
3439 3483 */
3440 3484
3441 3485 if (cmd->cmd_nwin > 0) {
3442 3486
3443 3487 /*
3444 3488 * Make sure we havn't gone past the the total number
3445 3489 * of windows
3446 3490 */
3447 3491 if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3448 3492 return (NULL);
3449 3493 }
3450 3494 if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3451 3495 &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
3452 3496 &cmd->cmd_cookie, &cmd->cmd_cookiec) == DDI_FAILURE) {
3453 3497 return (NULL);
3454 3498 }
3455 3499 goto get_dma_cookies;
3456 3500 }
3457 3501
3458 3502
3459 3503 if (flags & PKT_XARQ) {
3460 3504 cmd->cmd_flags |= CFLAG_XARQ;
3461 3505 }
3462 3506
3463 3507 /*
3464 3508 * DMA resource allocation. This version assumes your
3465 3509 * HBA has some sort of bus-mastering or onboard DMA capability, with a
3466 3510 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
3467 3511 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
3468 3512 */
3469 3513 if (bp && (bp->b_bcount != 0) &&
3470 3514 (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
3471 3515
3472 3516 int cnt, dma_flags;
3473 3517 mptti_t *dmap; /* ptr to the S/G list */
3474 3518
3475 3519 /*
3476 3520 * Set up DMA memory and position to the next DMA segment.
3477 3521 */
3478 3522 ASSERT(cmd->cmd_dmahandle != NULL);
3479 3523
3480 3524 if (bp->b_flags & B_READ) {
3481 3525 dma_flags = DDI_DMA_READ;
3482 3526 cmd->cmd_flags &= ~CFLAG_DMASEND;
3483 3527 } else {
3484 3528 dma_flags = DDI_DMA_WRITE;
3485 3529 cmd->cmd_flags |= CFLAG_DMASEND;
3486 3530 }
3487 3531 if (flags & PKT_CONSISTENT) {
3488 3532 cmd->cmd_flags |= CFLAG_CMDIOPB;
3489 3533 dma_flags |= DDI_DMA_CONSISTENT;
3490 3534 }
3491 3535
3492 3536 if (flags & PKT_DMA_PARTIAL) {
3493 3537 dma_flags |= DDI_DMA_PARTIAL;
3494 3538 }
3495 3539
3496 3540 /*
3497 3541 * workaround for byte hole issue on psycho and
3498 3542 * schizo pre 2.1
3499 3543 */
3500 3544 if ((bp->b_flags & B_READ) && ((bp->b_flags &
3501 3545 (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
3502 3546 ((uintptr_t)bp->b_un.b_addr & 0x7)) {
3503 3547 dma_flags |= DDI_DMA_CONSISTENT;
3504 3548 }
3505 3549
3506 3550 rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
3507 3551 dma_flags, callback, arg,
3508 3552 &cmd->cmd_cookie, &cmd->cmd_cookiec);
3509 3553 if (rval == DDI_DMA_PARTIAL_MAP) {
3510 3554 (void) ddi_dma_numwin(cmd->cmd_dmahandle,
3511 3555 &cmd->cmd_nwin);
3512 3556 cmd->cmd_winindex = 0;
3513 3557 (void) ddi_dma_getwin(cmd->cmd_dmahandle,
3514 3558 cmd->cmd_winindex, &cmd->cmd_dma_offset,
3515 3559 &cmd->cmd_dma_len, &cmd->cmd_cookie,
3516 3560 &cmd->cmd_cookiec);
3517 3561 } else if (rval && (rval != DDI_DMA_MAPPED)) {
3518 3562 switch (rval) {
3519 3563 case DDI_DMA_NORESOURCES:
3520 3564 bioerror(bp, 0);
3521 3565 break;
3522 3566 case DDI_DMA_BADATTR:
3523 3567 case DDI_DMA_NOMAPPING:
3524 3568 bioerror(bp, EFAULT);
3525 3569 break;
3526 3570 case DDI_DMA_TOOBIG:
3527 3571 default:
3528 3572 bioerror(bp, EINVAL);
3529 3573 break;
3530 3574 }
3531 3575 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3532 3576 if (new_cmd) {
3533 3577 mptsas_scsi_destroy_pkt(ap, pkt);
3534 3578 }
3535 3579 return ((struct scsi_pkt *)NULL);
3536 3580 }
3537 3581
3538 3582 get_dma_cookies:
3539 3583 cmd->cmd_flags |= CFLAG_DMAVALID;
3540 3584 ASSERT(cmd->cmd_cookiec > 0);
3541 3585
3542 3586 if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
3543 3587 mptsas_log(mpt, CE_NOTE, "large cookiec received %d\n",
3544 3588 cmd->cmd_cookiec);
3545 3589 bioerror(bp, EINVAL);
3546 3590 if (new_cmd) {
3547 3591 mptsas_scsi_destroy_pkt(ap, pkt);
3548 3592 }
3549 3593 return ((struct scsi_pkt *)NULL);
3550 3594 }
3551 3595
3552 3596 /*
3553 3597 * Allocate extra SGL buffer if needed.
3554 3598 */
3555 3599 if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
3556 3600 (cmd->cmd_extra_frames == NULL)) {
3557 3601 if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
3558 3602 DDI_FAILURE) {
3559 3603 mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
3560 3604 "failed");
3561 3605 bioerror(bp, ENOMEM);
3562 3606 if (new_cmd) {
3563 3607 mptsas_scsi_destroy_pkt(ap, pkt);
3564 3608 }
3565 3609 return ((struct scsi_pkt *)NULL);
3566 3610 }
3567 3611 }
3568 3612
3569 3613 /*
3570 3614 * Always use scatter-gather transfer
↓ open down ↓ |
129 lines elided |
↑ open up ↑ |
3571 3615 * Use the loop below to store physical addresses of
3572 3616 * DMA segments, from the DMA cookies, into your HBA's
3573 3617 * scatter-gather list.
3574 3618 * We need to ensure we have enough kmem alloc'd
3575 3619 * for the sg entries since we are no longer using an
3576 3620 * array inside mptsas_cmd_t.
3577 3621 *
3578 3622 * We check cmd->cmd_cookiec against oldcookiec so
3579 3623 * the scatter-gather list is correctly allocated
3580 3624 */
3581 -#ifndef __sparc
3625 +
3582 3626 if (oldcookiec != cmd->cmd_cookiec) {
3583 3627 if (cmd->cmd_sg != (mptti_t *)NULL) {
3584 3628 kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
3585 3629 oldcookiec);
3586 3630 cmd->cmd_sg = NULL;
3587 3631 }
3588 3632 }
3589 3633
3590 3634 if (cmd->cmd_sg == (mptti_t *)NULL) {
3591 3635 cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3592 3636 cmd->cmd_cookiec), kf);
3593 3637
3594 3638 if (cmd->cmd_sg == (mptti_t *)NULL) {
3595 3639 mptsas_log(mpt, CE_WARN,
3596 3640 "unable to kmem_alloc enough memory "
3597 3641 "for scatter/gather list");
3598 3642 /*
3599 3643 * if we have an ENOMEM condition we need to behave
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
3600 3644 * the same way as the rest of this routine
3601 3645 */
3602 3646
3603 3647 bioerror(bp, ENOMEM);
3604 3648 if (new_cmd) {
3605 3649 mptsas_scsi_destroy_pkt(ap, pkt);
3606 3650 }
3607 3651 return ((struct scsi_pkt *)NULL);
3608 3652 }
3609 3653 }
3610 -#endif /* __sparc */
3654 +
3611 3655 dmap = cmd->cmd_sg;
3612 3656
3613 3657 ASSERT(cmd->cmd_cookie.dmac_size != 0);
3614 3658
3615 3659 /*
3616 3660 * store the first segment into the S/G list
3617 3661 */
3618 3662 dmap->count = cmd->cmd_cookie.dmac_size;
3619 3663 dmap->addr.address64.Low = (uint32_t)
3620 3664 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3621 3665 dmap->addr.address64.High = (uint32_t)
3622 3666 (cmd->cmd_cookie.dmac_laddress >> 32);
3623 3667
3624 3668 /*
3625 3669 * dmacount counts the size of the dma for this window
3626 3670 * (if partial dma is being used). totaldmacount
3627 3671 * keeps track of the total amount of dma we have
3628 3672 * transferred for all the windows (needed to calculate
3629 3673 * the resid value below).
3630 3674 */
3631 3675 cmd->cmd_dmacount = cmd->cmd_cookie.dmac_size;
3632 3676 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3633 3677
3634 3678 /*
3635 3679 * We already stored the first DMA scatter gather segment,
3636 3680 * start at 1 if we need to store more.
3637 3681 */
3638 3682 for (cnt = 1; cnt < cmd->cmd_cookiec; cnt++) {
3639 3683 /*
3640 3684 * Get next DMA cookie
3641 3685 */
3642 3686 ddi_dma_nextcookie(cmd->cmd_dmahandle,
3643 3687 &cmd->cmd_cookie);
3644 3688 dmap++;
3645 3689
3646 3690 cmd->cmd_dmacount += cmd->cmd_cookie.dmac_size;
3647 3691 cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
3648 3692
3649 3693 /*
3650 3694 * store the segment parms into the S/G list
3651 3695 */
3652 3696 dmap->count = cmd->cmd_cookie.dmac_size;
3653 3697 dmap->addr.address64.Low = (uint32_t)
3654 3698 (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
3655 3699 dmap->addr.address64.High = (uint32_t)
3656 3700 (cmd->cmd_cookie.dmac_laddress >> 32);
3657 3701 }
3658 3702
3659 3703 /*
3660 3704 * If this was partially allocated we set the resid
3661 3705 * the amount of data NOT transferred in this window
3662 3706 * If there is only one window, the resid will be 0
3663 3707 */
3664 3708 pkt->pkt_resid = (bp->b_bcount - cmd->cmd_totaldmacount);
3665 3709 NDBG16(("mptsas_dmaget: cmd_dmacount=%d.", cmd->cmd_dmacount));
3666 3710 }
3667 3711 return (pkt);
3668 3712 }
3669 3713
3670 3714 /*
3671 3715 * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
3672 3716 *
3673 3717 * Notes:
3674 3718 * - also frees DMA resources if allocated
3675 3719 * - implicit DMA synchonization
3676 3720 */
3677 3721 static void
3678 3722 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
3679 3723 {
↓ open down ↓ |
59 lines elided |
↑ open up ↑ |
3680 3724 mptsas_cmd_t *cmd = PKT2CMD(pkt);
3681 3725 mptsas_t *mpt = ADDR2MPT(ap);
3682 3726
3683 3727 NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
3684 3728 ap->a_target, (void *)pkt));
3685 3729
3686 3730 if (cmd->cmd_flags & CFLAG_DMAVALID) {
3687 3731 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
3688 3732 cmd->cmd_flags &= ~CFLAG_DMAVALID;
3689 3733 }
3690 -#ifndef __sparc
3734 +
3691 3735 if (cmd->cmd_sg) {
3692 3736 kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
3693 3737 cmd->cmd_sg = NULL;
3694 3738 }
3695 -#endif /* __sparc */
3739 +
3696 3740 mptsas_free_extra_sgl_frame(mpt, cmd);
3697 3741
3698 3742 if ((cmd->cmd_flags &
3699 3743 (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
3700 3744 CFLAG_SCBEXTERN)) == 0) {
3701 3745 cmd->cmd_flags = CFLAG_FREE;
3702 3746 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
3703 3747 } else {
3704 3748 mptsas_pkt_destroy_extern(mpt, cmd);
3705 3749 }
3706 3750 }
3707 3751
3708 3752 /*
3709 3753 * kmem cache constructor and destructor:
3710 3754 * When constructing, we bzero the cmd and allocate the dma handle
3711 3755 * When destructing, just free the dma handle
3712 3756 */
3713 3757 static int
3714 3758 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
3715 3759 {
3716 3760 mptsas_cmd_t *cmd = buf;
3717 3761 mptsas_t *mpt = cdrarg;
3718 3762 struct scsi_address ap;
3719 3763 uint_t cookiec;
3720 3764 ddi_dma_attr_t arq_dma_attr;
3721 3765 int (*callback)(caddr_t);
3722 3766
3723 3767 callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3724 3768
3725 3769 NDBG4(("mptsas_kmem_cache_constructor"));
3726 3770
3727 3771 ap.a_hba_tran = mpt->m_tran;
3728 3772 ap.a_target = 0;
3729 3773 ap.a_lun = 0;
3730 3774
3731 3775 /*
3732 3776 * allocate a dma handle
3733 3777 */
3734 3778 if ((ddi_dma_alloc_handle(mpt->m_dip, &mpt->m_io_dma_attr, callback,
3735 3779 NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
3736 3780 cmd->cmd_dmahandle = NULL;
3737 3781 return (-1);
3738 3782 }
3739 3783
3740 3784 cmd->cmd_arq_buf = scsi_alloc_consistent_buf(&ap, (struct buf *)NULL,
3741 3785 SENSE_LENGTH, B_READ, callback, NULL);
3742 3786 if (cmd->cmd_arq_buf == NULL) {
3743 3787 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3744 3788 cmd->cmd_dmahandle = NULL;
3745 3789 return (-1);
3746 3790 }
3747 3791
3748 3792 /*
3749 3793 * allocate a arq handle
3750 3794 */
3751 3795 arq_dma_attr = mpt->m_msg_dma_attr;
3752 3796 arq_dma_attr.dma_attr_sgllen = 1;
3753 3797 if ((ddi_dma_alloc_handle(mpt->m_dip, &arq_dma_attr, callback,
3754 3798 NULL, &cmd->cmd_arqhandle)) != DDI_SUCCESS) {
3755 3799 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3756 3800 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3757 3801 cmd->cmd_dmahandle = NULL;
3758 3802 cmd->cmd_arqhandle = NULL;
3759 3803 return (-1);
3760 3804 }
3761 3805
3762 3806 if (ddi_dma_buf_bind_handle(cmd->cmd_arqhandle,
↓ open down ↓ |
57 lines elided |
↑ open up ↑ |
3763 3807 cmd->cmd_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
3764 3808 callback, NULL, &cmd->cmd_arqcookie, &cookiec) != DDI_SUCCESS) {
3765 3809 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3766 3810 ddi_dma_free_handle(&cmd->cmd_arqhandle);
3767 3811 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3768 3812 cmd->cmd_dmahandle = NULL;
3769 3813 cmd->cmd_arqhandle = NULL;
3770 3814 cmd->cmd_arq_buf = NULL;
3771 3815 return (-1);
3772 3816 }
3773 - /*
3774 - * In sparc, the sgl length in most of the cases would be 1, so we
3775 - * pre-allocate it in cache. On x86, the max number would be 256,
3776 - * pre-allocate a maximum would waste a lot of memory especially
3777 - * when many cmds are put onto waitq.
3778 - */
3779 -#ifdef __sparc
3780 - cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
3781 - MPTSAS_MAX_CMD_SEGS), KM_SLEEP);
3782 -#endif /* __sparc */
3783 3817
3784 3818 return (0);
3785 3819 }
3786 3820
3787 3821 static void
3788 3822 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
3789 3823 {
3790 3824 #ifndef __lock_lint
3791 3825 _NOTE(ARGUNUSED(cdrarg))
3792 3826 #endif
3793 3827 mptsas_cmd_t *cmd = buf;
3794 3828
3795 3829 NDBG4(("mptsas_kmem_cache_destructor"));
3796 3830
3797 3831 if (cmd->cmd_arqhandle) {
3798 3832 (void) ddi_dma_unbind_handle(cmd->cmd_arqhandle);
3799 3833 ddi_dma_free_handle(&cmd->cmd_arqhandle);
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
3800 3834 cmd->cmd_arqhandle = NULL;
3801 3835 }
3802 3836 if (cmd->cmd_arq_buf) {
3803 3837 scsi_free_consistent_buf(cmd->cmd_arq_buf);
3804 3838 cmd->cmd_arq_buf = NULL;
3805 3839 }
3806 3840 if (cmd->cmd_dmahandle) {
3807 3841 ddi_dma_free_handle(&cmd->cmd_dmahandle);
3808 3842 cmd->cmd_dmahandle = NULL;
3809 3843 }
3810 -#ifdef __sparc
3811 - if (cmd->cmd_sg) {
3812 - kmem_free(cmd->cmd_sg, sizeof (mptti_t)* MPTSAS_MAX_CMD_SEGS);
3813 - cmd->cmd_sg = NULL;
3814 - }
3815 -#endif /* __sparc */
3816 3844 }
3817 3845
3818 3846 static int
3819 3847 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
3820 3848 {
3821 3849 mptsas_cache_frames_t *p = buf;
3822 3850 mptsas_t *mpt = cdrarg;
3823 3851 ddi_dma_attr_t frame_dma_attr;
3824 3852 size_t mem_size, alloc_len;
3825 3853 ddi_dma_cookie_t cookie;
3826 3854 uint_t ncookie;
3827 3855 int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
3828 3856 ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
3829 3857
3830 3858 frame_dma_attr = mpt->m_msg_dma_attr;
3831 3859 frame_dma_attr.dma_attr_align = 0x10;
3832 3860 frame_dma_attr.dma_attr_sgllen = 1;
3833 3861
3834 3862 if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
3835 3863 &p->m_dma_hdl) != DDI_SUCCESS) {
3836 3864 mptsas_log(mpt, CE_WARN, "Unable to allocate dma handle for"
3837 3865 " extra SGL.");
3838 3866 return (DDI_FAILURE);
3839 3867 }
3840 3868
3841 3869 mem_size = (mpt->m_max_request_frames - 1) * mpt->m_req_frame_size;
3842 3870
3843 3871 if (ddi_dma_mem_alloc(p->m_dma_hdl, mem_size, &mpt->m_dev_acc_attr,
3844 3872 DDI_DMA_CONSISTENT, callback, NULL, (caddr_t *)&p->m_frames_addr,
3845 3873 &alloc_len, &p->m_acc_hdl) != DDI_SUCCESS) {
3846 3874 ddi_dma_free_handle(&p->m_dma_hdl);
3847 3875 p->m_dma_hdl = NULL;
3848 3876 mptsas_log(mpt, CE_WARN, "Unable to allocate dma memory for"
3849 3877 " extra SGL.");
3850 3878 return (DDI_FAILURE);
3851 3879 }
3852 3880
3853 3881 if (ddi_dma_addr_bind_handle(p->m_dma_hdl, NULL, p->m_frames_addr,
3854 3882 alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
3855 3883 &cookie, &ncookie) != DDI_DMA_MAPPED) {
3856 3884 (void) ddi_dma_mem_free(&p->m_acc_hdl);
3857 3885 ddi_dma_free_handle(&p->m_dma_hdl);
3858 3886 p->m_dma_hdl = NULL;
3859 3887 mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources for"
3860 3888 " extra SGL");
3861 3889 return (DDI_FAILURE);
3862 3890 }
3863 3891
3864 3892 /*
3865 3893 * Store the SGL memory address. This chip uses this
3866 3894 * address to dma to and from the driver. The second
3867 3895 * address is the address mpt uses to fill in the SGL.
3868 3896 */
3869 3897 p->m_phys_addr = cookie.dmac_address;
3870 3898
3871 3899 return (DDI_SUCCESS);
3872 3900 }
3873 3901
3874 3902 static void
3875 3903 mptsas_cache_frames_destructor(void *buf, void *cdrarg)
3876 3904 {
3877 3905 #ifndef __lock_lint
3878 3906 _NOTE(ARGUNUSED(cdrarg))
3879 3907 #endif
3880 3908 mptsas_cache_frames_t *p = buf;
3881 3909 if (p->m_dma_hdl != NULL) {
3882 3910 (void) ddi_dma_unbind_handle(p->m_dma_hdl);
3883 3911 (void) ddi_dma_mem_free(&p->m_acc_hdl);
3884 3912 ddi_dma_free_handle(&p->m_dma_hdl);
3885 3913 p->m_phys_addr = NULL;
3886 3914 p->m_frames_addr = NULL;
3887 3915 p->m_dma_hdl = NULL;
3888 3916 p->m_acc_hdl = NULL;
3889 3917 }
3890 3918
3891 3919 }
3892 3920
3893 3921 /*
3894 3922 * allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
3895 3923 * for non-standard length cdb, pkt_private, status areas
3896 3924 * if allocation fails, then deallocate all external space and the pkt
3897 3925 */
3898 3926 /* ARGSUSED */
3899 3927 static int
3900 3928 mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
3901 3929 int cmdlen, int tgtlen, int statuslen, int kf)
3902 3930 {
3903 3931 caddr_t cdbp, scbp, tgt;
3904 3932 int (*callback)(caddr_t) = (kf == KM_SLEEP) ?
3905 3933 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
3906 3934 struct scsi_address ap;
3907 3935 size_t senselength;
3908 3936 ddi_dma_attr_t ext_arq_dma_attr;
3909 3937 uint_t cookiec;
3910 3938
3911 3939 NDBG3(("mptsas_pkt_alloc_extern: "
3912 3940 "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
3913 3941 (void *)cmd, cmdlen, tgtlen, statuslen, kf));
3914 3942
3915 3943 tgt = cdbp = scbp = NULL;
3916 3944 cmd->cmd_scblen = statuslen;
3917 3945 cmd->cmd_privlen = (uchar_t)tgtlen;
3918 3946
3919 3947 if (cmdlen > sizeof (cmd->cmd_cdb)) {
3920 3948 if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
3921 3949 goto fail;
3922 3950 }
3923 3951 cmd->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
3924 3952 cmd->cmd_flags |= CFLAG_CDBEXTERN;
3925 3953 }
3926 3954 if (tgtlen > PKT_PRIV_LEN) {
3927 3955 if ((tgt = kmem_zalloc((size_t)tgtlen, kf)) == NULL) {
3928 3956 goto fail;
3929 3957 }
3930 3958 cmd->cmd_flags |= CFLAG_PRIVEXTERN;
3931 3959 cmd->cmd_pkt->pkt_private = tgt;
3932 3960 }
3933 3961 if (statuslen > EXTCMDS_STATUS_SIZE) {
3934 3962 if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
3935 3963 goto fail;
3936 3964 }
3937 3965 cmd->cmd_flags |= CFLAG_SCBEXTERN;
3938 3966 cmd->cmd_pkt->pkt_scbp = (opaque_t)scbp;
3939 3967
3940 3968 /* allocate sense data buf for DMA */
3941 3969
3942 3970 senselength = statuslen - MPTSAS_GET_ITEM_OFF(
3943 3971 struct scsi_arq_status, sts_sensedata);
3944 3972 cmd->cmd_rqslen = (uchar_t)senselength;
3945 3973
3946 3974 ap.a_hba_tran = mpt->m_tran;
3947 3975 ap.a_target = 0;
3948 3976 ap.a_lun = 0;
3949 3977
3950 3978 cmd->cmd_ext_arq_buf = scsi_alloc_consistent_buf(&ap,
3951 3979 (struct buf *)NULL, senselength, B_READ,
3952 3980 callback, NULL);
3953 3981
3954 3982 if (cmd->cmd_ext_arq_buf == NULL) {
3955 3983 goto fail;
3956 3984 }
3957 3985 /*
3958 3986 * allocate a extern arq handle and bind the buf
3959 3987 */
3960 3988 ext_arq_dma_attr = mpt->m_msg_dma_attr;
3961 3989 ext_arq_dma_attr.dma_attr_sgllen = 1;
3962 3990 if ((ddi_dma_alloc_handle(mpt->m_dip,
3963 3991 &ext_arq_dma_attr, callback,
3964 3992 NULL, &cmd->cmd_ext_arqhandle)) != DDI_SUCCESS) {
3965 3993 goto fail;
3966 3994 }
3967 3995
3968 3996 if (ddi_dma_buf_bind_handle(cmd->cmd_ext_arqhandle,
3969 3997 cmd->cmd_ext_arq_buf, (DDI_DMA_READ | DDI_DMA_CONSISTENT),
3970 3998 callback, NULL, &cmd->cmd_ext_arqcookie,
3971 3999 &cookiec)
3972 4000 != DDI_SUCCESS) {
3973 4001 goto fail;
3974 4002 }
3975 4003 cmd->cmd_flags |= CFLAG_EXTARQBUFVALID;
3976 4004 }
3977 4005 return (0);
3978 4006 fail:
3979 4007 mptsas_pkt_destroy_extern(mpt, cmd);
3980 4008 return (1);
3981 4009 }
3982 4010
3983 4011 /*
3984 4012 * deallocate external pkt space and deallocate the pkt
3985 4013 */
3986 4014 static void
3987 4015 mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd)
3988 4016 {
3989 4017 NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd));
3990 4018
3991 4019 if (cmd->cmd_flags & CFLAG_FREE) {
3992 4020 mptsas_log(mpt, CE_PANIC,
3993 4021 "mptsas_pkt_destroy_extern: freeing free packet");
3994 4022 _NOTE(NOT_REACHED)
3995 4023 /* NOTREACHED */
3996 4024 }
3997 4025 if (cmd->cmd_flags & CFLAG_CDBEXTERN) {
3998 4026 kmem_free(cmd->cmd_pkt->pkt_cdbp, (size_t)cmd->cmd_cdblen);
3999 4027 }
4000 4028 if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
4001 4029 kmem_free(cmd->cmd_pkt->pkt_scbp, (size_t)cmd->cmd_scblen);
4002 4030 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4003 4031 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4004 4032 }
4005 4033 if (cmd->cmd_ext_arqhandle) {
4006 4034 ddi_dma_free_handle(&cmd->cmd_ext_arqhandle);
4007 4035 cmd->cmd_ext_arqhandle = NULL;
4008 4036 }
4009 4037 if (cmd->cmd_ext_arq_buf)
4010 4038 scsi_free_consistent_buf(cmd->cmd_ext_arq_buf);
4011 4039 }
4012 4040 if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
4013 4041 kmem_free(cmd->cmd_pkt->pkt_private, (size_t)cmd->cmd_privlen);
4014 4042 }
4015 4043 cmd->cmd_flags = CFLAG_FREE;
4016 4044 kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4017 4045 }
4018 4046
4019 4047 /*
4020 4048 * tran_sync_pkt(9E) - explicit DMA synchronization
4021 4049 */
4022 4050 /*ARGSUSED*/
4023 4051 static void
4024 4052 mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4025 4053 {
4026 4054 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4027 4055
4028 4056 NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
4029 4057 ap->a_target, (void *)pkt));
4030 4058
4031 4059 if (cmd->cmd_dmahandle) {
4032 4060 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4033 4061 (cmd->cmd_flags & CFLAG_DMASEND) ?
4034 4062 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
4035 4063 }
4036 4064 }
4037 4065
4038 4066 /*
4039 4067 * tran_dmafree(9E) - deallocate DMA resources allocated for command
4040 4068 */
4041 4069 /*ARGSUSED*/
4042 4070 static void
4043 4071 mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
4044 4072 {
4045 4073 mptsas_cmd_t *cmd = PKT2CMD(pkt);
4046 4074 mptsas_t *mpt = ADDR2MPT(ap);
4047 4075
4048 4076 NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
4049 4077 ap->a_target, (void *)pkt));
4050 4078
4051 4079 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4052 4080 (void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4053 4081 cmd->cmd_flags &= ~CFLAG_DMAVALID;
4054 4082 }
4055 4083
4056 4084 if (cmd->cmd_flags & CFLAG_EXTARQBUFVALID) {
4057 4085 (void) ddi_dma_unbind_handle(cmd->cmd_ext_arqhandle);
4058 4086 cmd->cmd_flags &= ~CFLAG_EXTARQBUFVALID;
4059 4087 }
4060 4088
4061 4089 mptsas_free_extra_sgl_frame(mpt, cmd);
4062 4090 }
4063 4091
4064 4092 static void
4065 4093 mptsas_pkt_comp(struct scsi_pkt *pkt, mptsas_cmd_t *cmd)
4066 4094 {
4067 4095 if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
4068 4096 (!(cmd->cmd_flags & CFLAG_DMASEND))) {
4069 4097 (void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4070 4098 DDI_DMA_SYNC_FORCPU);
4071 4099 }
4072 4100 (*pkt->pkt_comp)(pkt);
4073 4101 }
4074 4102
4075 4103 static void
4076 4104 mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control,
4077 4105 pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4078 4106 {
4079 4107 uint_t cookiec;
4080 4108 mptti_t *dmap;
4081 4109 uint32_t flags;
4082 4110 pMpi2SGESimple64_t sge;
4083 4111 pMpi2SGEChain64_t sgechain;
4084 4112 ASSERT(cmd->cmd_flags & CFLAG_DMAVALID);
4085 4113
4086 4114 /*
4087 4115 * Save the number of entries in the DMA
4088 4116 * Scatter/Gather list
4089 4117 */
4090 4118 cookiec = cmd->cmd_cookiec;
4091 4119
4092 4120 NDBG1(("mptsas_sge_setup: cookiec=%d", cookiec));
4093 4121
4094 4122 /*
4095 4123 * Set read/write bit in control.
4096 4124 */
4097 4125 if (cmd->cmd_flags & CFLAG_DMASEND) {
4098 4126 *control |= MPI2_SCSIIO_CONTROL_WRITE;
4099 4127 } else {
4100 4128 *control |= MPI2_SCSIIO_CONTROL_READ;
4101 4129 }
4102 4130
4103 4131 ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount);
4104 4132
4105 4133 /*
4106 4134 * We have 2 cases here. First where we can fit all the
4107 4135 * SG elements into the main frame, and the case
4108 4136 * where we can't.
4109 4137 * If we have more cookies than we can attach to a frame
4110 4138 * we will need to use a chain element to point
4111 4139 * a location of memory where the rest of the S/G
4112 4140 * elements reside.
4113 4141 */
4114 4142 if (cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) {
4115 4143 dmap = cmd->cmd_sg;
4116 4144 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4117 4145 while (cookiec--) {
4118 4146 ddi_put32(acc_hdl,
4119 4147 &sge->Address.Low, dmap->addr.address64.Low);
4120 4148 ddi_put32(acc_hdl,
4121 4149 &sge->Address.High, dmap->addr.address64.High);
4122 4150 ddi_put32(acc_hdl, &sge->FlagsLength,
4123 4151 dmap->count);
4124 4152 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4125 4153 flags |= ((uint32_t)
4126 4154 (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4127 4155 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4128 4156 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4129 4157 MPI2_SGE_FLAGS_SHIFT);
4130 4158
4131 4159 /*
4132 4160 * If this is the last cookie, we set the flags
4133 4161 * to indicate so
4134 4162 */
4135 4163 if (cookiec == 0) {
4136 4164 flags |=
4137 4165 ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
4138 4166 | MPI2_SGE_FLAGS_END_OF_BUFFER
4139 4167 | MPI2_SGE_FLAGS_END_OF_LIST) <<
4140 4168 MPI2_SGE_FLAGS_SHIFT);
4141 4169 }
4142 4170 if (cmd->cmd_flags & CFLAG_DMASEND) {
4143 4171 flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4144 4172 MPI2_SGE_FLAGS_SHIFT);
4145 4173 } else {
4146 4174 flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4147 4175 MPI2_SGE_FLAGS_SHIFT);
4148 4176 }
4149 4177 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4150 4178 dmap++;
4151 4179 sge++;
4152 4180 }
4153 4181 } else {
4154 4182 /*
4155 4183 * Hereby we start to deal with multiple frames.
4156 4184 * The process is as follows:
4157 4185 * 1. Determine how many frames are needed for SGL element
4158 4186 * storage; Note that all frames are stored in contiguous
4159 4187 * memory space and in 64-bit DMA mode each element is
4160 4188 * 3 double-words (12 bytes) long.
4161 4189 * 2. Fill up the main frame. We need to do this separately
4162 4190 * since it contains the SCSI IO request header and needs
4163 4191 * dedicated processing. Note that the last 4 double-words
4164 4192 * of the SCSI IO header is for SGL element storage
4165 4193 * (MPI2_SGE_IO_UNION).
4166 4194 * 3. Fill the chain element in the main frame, so the DMA
4167 4195 * engine can use the following frames.
4168 4196 * 4. Enter a loop to fill the remaining frames. Note that the
4169 4197 * last frame contains no chain element. The remaining
4170 4198 * frames go into the mpt SGL buffer allocated on the fly,
4171 4199 * not immediately following the main message frame, as in
4172 4200 * Gen1.
4173 4201 * Some restrictions:
4174 4202 * 1. For 64-bit DMA, the simple element and chain element
4175 4203 * are both of 3 double-words (12 bytes) in size, even
4176 4204 * though all frames are stored in the first 4G of mem
4177 4205 * range and the higher 32-bits of the address are always 0.
4178 4206 * 2. On some controllers (like the 1064/1068), a frame can
4179 4207 * hold SGL elements with the last 1 or 2 double-words
4180 4208 * (4 or 8 bytes) un-used. On these controllers, we should
4181 4209 * recognize that there's not enough room for another SGL
4182 4210 * element and move the sge pointer to the next frame.
4183 4211 */
4184 4212 int i, j, k, l, frames, sgemax;
4185 4213 int temp;
4186 4214 uint8_t chainflags;
4187 4215 uint16_t chainlength;
4188 4216 mptsas_cache_frames_t *p;
4189 4217
4190 4218 /*
4191 4219 * Sgemax is the number of SGE's that will fit
4192 4220 * each extra frame and frames is total
4193 4221 * number of frames we'll need. 1 sge entry per
4194 4222 * frame is reseverd for the chain element thus the -1 below.
4195 4223 */
4196 4224 sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64))
4197 4225 - 1);
4198 4226 temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4199 4227
4200 4228 /*
4201 4229 * A little check to see if we need to round up the number
4202 4230 * of frames we need
4203 4231 */
4204 4232 if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4205 4233 sgemax) > 1) {
4206 4234 frames = (temp + 1);
4207 4235 } else {
4208 4236 frames = temp;
4209 4237 }
4210 4238 dmap = cmd->cmd_sg;
4211 4239 sge = (pMpi2SGESimple64_t)(&frame->SGL);
4212 4240
4213 4241 /*
4214 4242 * First fill in the main frame
4215 4243 */
4216 4244 for (j = 1; j < MPTSAS_MAX_FRAME_SGES64(mpt); j++) {
4217 4245 ddi_put32(acc_hdl, &sge->Address.Low,
4218 4246 dmap->addr.address64.Low);
4219 4247 ddi_put32(acc_hdl, &sge->Address.High,
4220 4248 dmap->addr.address64.High);
4221 4249 ddi_put32(acc_hdl, &sge->FlagsLength, dmap->count);
4222 4250 flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4223 4251 flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4224 4252 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4225 4253 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4226 4254 MPI2_SGE_FLAGS_SHIFT);
4227 4255
4228 4256 /*
4229 4257 * If this is the last SGE of this frame
4230 4258 * we set the end of list flag
4231 4259 */
4232 4260 if (j == (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) {
4233 4261 flags |= ((uint32_t)
4234 4262 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4235 4263 MPI2_SGE_FLAGS_SHIFT);
4236 4264 }
4237 4265 if (cmd->cmd_flags & CFLAG_DMASEND) {
4238 4266 flags |=
4239 4267 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4240 4268 MPI2_SGE_FLAGS_SHIFT);
4241 4269 } else {
4242 4270 flags |=
4243 4271 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4244 4272 MPI2_SGE_FLAGS_SHIFT);
4245 4273 }
4246 4274 ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4247 4275 dmap++;
4248 4276 sge++;
4249 4277 }
4250 4278
4251 4279 /*
4252 4280 * Fill in the chain element in the main frame.
4253 4281 * About calculation on ChainOffset:
4254 4282 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4255 4283 * in the end reserved for SGL element storage
4256 4284 * (MPI2_SGE_IO_UNION); we should count it in our
4257 4285 * calculation. See its definition in the header file.
4258 4286 * 2. Constant j is the counter of the current SGL element
4259 4287 * that will be processed, and (j - 1) is the number of
4260 4288 * SGL elements that have been processed (stored in the
4261 4289 * main frame).
4262 4290 * 3. ChainOffset value should be in units of double-words (4
4263 4291 * bytes) so the last value should be divided by 4.
4264 4292 */
4265 4293 ddi_put8(acc_hdl, &frame->ChainOffset,
4266 4294 (sizeof (MPI2_SCSI_IO_REQUEST) -
4267 4295 sizeof (MPI2_SGE_IO_UNION) +
4268 4296 (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4269 4297 sgechain = (pMpi2SGEChain64_t)sge;
4270 4298 chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4271 4299 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4272 4300 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4273 4301 ddi_put8(acc_hdl, &sgechain->Flags, chainflags);
4274 4302
4275 4303 /*
4276 4304 * The size of the next frame is the accurate size of space
4277 4305 * (in bytes) used to store the SGL elements. j is the counter
4278 4306 * of SGL elements. (j - 1) is the number of SGL elements that
4279 4307 * have been processed (stored in frames).
4280 4308 */
4281 4309 if (frames >= 2) {
4282 4310 chainlength = mpt->m_req_frame_size /
4283 4311 sizeof (MPI2_SGE_SIMPLE64) *
4284 4312 sizeof (MPI2_SGE_SIMPLE64);
4285 4313 } else {
4286 4314 chainlength = ((cookiec - (j - 1)) *
4287 4315 sizeof (MPI2_SGE_SIMPLE64));
4288 4316 }
4289 4317
4290 4318 p = cmd->cmd_extra_frames;
4291 4319
4292 4320 ddi_put16(acc_hdl, &sgechain->Length, chainlength);
4293 4321 ddi_put32(acc_hdl, &sgechain->Address.Low,
4294 4322 p->m_phys_addr);
4295 4323 /* SGL is allocated in the first 4G mem range */
4296 4324 ddi_put32(acc_hdl, &sgechain->Address.High, 0);
4297 4325
4298 4326 /*
4299 4327 * If there are more than 2 frames left we have to
4300 4328 * fill in the next chain offset to the location of
4301 4329 * the chain element in the next frame.
4302 4330 * sgemax is the number of simple elements in an extra
4303 4331 * frame. Note that the value NextChainOffset should be
4304 4332 * in double-words (4 bytes).
4305 4333 */
4306 4334 if (frames >= 2) {
4307 4335 ddi_put8(acc_hdl, &sgechain->NextChainOffset,
4308 4336 (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4309 4337 } else {
4310 4338 ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0);
4311 4339 }
4312 4340
4313 4341 /*
4314 4342 * Jump to next frame;
4315 4343 * Starting here, chain buffers go into the per command SGL.
4316 4344 * This buffer is allocated when chain buffers are needed.
4317 4345 */
4318 4346 sge = (pMpi2SGESimple64_t)p->m_frames_addr;
4319 4347 i = cookiec;
4320 4348
4321 4349 /*
4322 4350 * Start filling in frames with SGE's. If we
4323 4351 * reach the end of frame and still have SGE's
4324 4352 * to fill we need to add a chain element and
4325 4353 * use another frame. j will be our counter
4326 4354 * for what cookie we are at and i will be
4327 4355 * the total cookiec. k is the current frame
4328 4356 */
4329 4357 for (k = 1; k <= frames; k++) {
4330 4358 for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4331 4359
4332 4360 /*
4333 4361 * If we have reached the end of frame
4334 4362 * and we have more SGE's to fill in
4335 4363 * we have to fill the final entry
4336 4364 * with a chain element and then
4337 4365 * continue to the next frame
4338 4366 */
4339 4367 if ((l == (sgemax + 1)) && (k != frames)) {
4340 4368 sgechain = (pMpi2SGEChain64_t)sge;
4341 4369 j--;
4342 4370 chainflags = (
4343 4371 MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4344 4372 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4345 4373 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4346 4374 ddi_put8(p->m_acc_hdl,
4347 4375 &sgechain->Flags, chainflags);
4348 4376 /*
4349 4377 * k is the frame counter and (k + 1)
4350 4378 * is the number of the next frame.
4351 4379 * Note that frames are in contiguous
4352 4380 * memory space.
4353 4381 */
4354 4382 ddi_put32(p->m_acc_hdl,
4355 4383 &sgechain->Address.Low,
4356 4384 (p->m_phys_addr +
4357 4385 (mpt->m_req_frame_size * k)));
4358 4386 ddi_put32(p->m_acc_hdl,
4359 4387 &sgechain->Address.High, 0);
4360 4388
4361 4389 /*
4362 4390 * If there are more than 2 frames left
4363 4391 * we have to next chain offset to
4364 4392 * the location of the chain element
4365 4393 * in the next frame and fill in the
4366 4394 * length of the next chain
4367 4395 */
4368 4396 if ((frames - k) >= 2) {
4369 4397 ddi_put8(p->m_acc_hdl,
4370 4398 &sgechain->NextChainOffset,
4371 4399 (sgemax *
4372 4400 sizeof (MPI2_SGE_SIMPLE64))
4373 4401 >> 2);
4374 4402 ddi_put16(p->m_acc_hdl,
4375 4403 &sgechain->Length,
4376 4404 mpt->m_req_frame_size /
4377 4405 sizeof (MPI2_SGE_SIMPLE64) *
4378 4406 sizeof (MPI2_SGE_SIMPLE64));
4379 4407 } else {
4380 4408 /*
4381 4409 * This is the last frame. Set
4382 4410 * the NextChainOffset to 0 and
4383 4411 * Length is the total size of
4384 4412 * all remaining simple elements
4385 4413 */
4386 4414 ddi_put8(p->m_acc_hdl,
4387 4415 &sgechain->NextChainOffset,
4388 4416 0);
4389 4417 ddi_put16(p->m_acc_hdl,
4390 4418 &sgechain->Length,
4391 4419 (cookiec - j) *
4392 4420 sizeof (MPI2_SGE_SIMPLE64));
4393 4421 }
4394 4422
4395 4423 /* Jump to the next frame */
4396 4424 sge = (pMpi2SGESimple64_t)
4397 4425 ((char *)p->m_frames_addr +
4398 4426 (int)mpt->m_req_frame_size * k);
4399 4427
4400 4428 continue;
4401 4429 }
4402 4430
4403 4431 ddi_put32(p->m_acc_hdl,
4404 4432 &sge->Address.Low,
4405 4433 dmap->addr.address64.Low);
4406 4434 ddi_put32(p->m_acc_hdl,
4407 4435 &sge->Address.High,
4408 4436 dmap->addr.address64.High);
4409 4437 ddi_put32(p->m_acc_hdl,
4410 4438 &sge->FlagsLength, dmap->count);
4411 4439 flags = ddi_get32(p->m_acc_hdl,
4412 4440 &sge->FlagsLength);
4413 4441 flags |= ((uint32_t)(
4414 4442 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4415 4443 MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4416 4444 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4417 4445 MPI2_SGE_FLAGS_SHIFT);
4418 4446
4419 4447 /*
4420 4448 * If we are at the end of the frame and
4421 4449 * there is another frame to fill in
4422 4450 * we set the last simple element as last
4423 4451 * element
4424 4452 */
4425 4453 if ((l == sgemax) && (k != frames)) {
4426 4454 flags |= ((uint32_t)
4427 4455 (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4428 4456 MPI2_SGE_FLAGS_SHIFT);
4429 4457 }
4430 4458
4431 4459 /*
4432 4460 * If this is the final cookie we
4433 4461 * indicate it by setting the flags
4434 4462 */
4435 4463 if (j == i) {
4436 4464 flags |= ((uint32_t)
4437 4465 (MPI2_SGE_FLAGS_LAST_ELEMENT |
4438 4466 MPI2_SGE_FLAGS_END_OF_BUFFER |
4439 4467 MPI2_SGE_FLAGS_END_OF_LIST) <<
4440 4468 MPI2_SGE_FLAGS_SHIFT);
4441 4469 }
4442 4470 if (cmd->cmd_flags & CFLAG_DMASEND) {
4443 4471 flags |=
4444 4472 (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4445 4473 MPI2_SGE_FLAGS_SHIFT);
4446 4474 } else {
4447 4475 flags |=
4448 4476 (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4449 4477 MPI2_SGE_FLAGS_SHIFT);
4450 4478 }
4451 4479 ddi_put32(p->m_acc_hdl,
4452 4480 &sge->FlagsLength, flags);
4453 4481 dmap++;
4454 4482 sge++;
4455 4483 }
4456 4484 }
4457 4485
4458 4486 /*
4459 4487 * Sync DMA with the chain buffers that were just created
4460 4488 */
4461 4489 (void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4462 4490 }
4463 4491 }
4464 4492
4465 4493 /*
4466 4494 * Interrupt handling
↓ open down ↓ |
641 lines elided |
↑ open up ↑ |
4467 4495 * Utility routine. Poll for status of a command sent to HBA
4468 4496 * without interrupts (a FLAG_NOINTR command).
4469 4497 */
4470 4498 int
4471 4499 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
4472 4500 {
4473 4501 int rval = TRUE;
4474 4502
4475 4503 NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
4476 4504
4477 - /*
4478 - * In order to avoid using m_mutex in ISR(a new separate mutex
4479 - * m_intr_mutex is introduced) and keep the same lock logic,
4480 - * the m_intr_mutex should be used to protect the getting and
4481 - * setting of the ReplyDescriptorIndex.
4482 - *
4483 - * Since the m_intr_mutex would be released during processing the poll
4484 - * cmd, so we should set the poll flag earlier here to make sure the
4485 - * polled cmd be handled in this thread/context. A side effect is other
4486 - * cmds during the period between the flag set and reset are also
4487 - * handled in this thread and not the ISR. Since the poll cmd is not
4488 - * so common, so the performance degradation in this case is not a big
4489 - * issue.
4490 - */
4491 - mutex_enter(&mpt->m_intr_mutex);
4492 - mpt->m_polled_intr = 1;
4493 - mutex_exit(&mpt->m_intr_mutex);
4494 -
4495 4505 if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
4496 4506 mptsas_restart_hba(mpt);
4497 4507 }
4498 4508
4499 4509 /*
4500 4510 * Wait, using drv_usecwait(), long enough for the command to
4501 4511 * reasonably return from the target if the target isn't
4502 4512 * "dead". A polled command may well be sent from scsi_poll, and
4503 4513 * there are retries built in to scsi_poll if the transport
4504 4514 * accepted the packet (TRAN_ACCEPT). scsi_poll waits 1 second
4505 4515 * and retries the transport up to scsi_poll_busycnt times
4506 4516 * (currently 60) if
4507 4517 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
4508 4518 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
4509 4519 *
4510 4520 * limit the waiting to avoid a hang in the event that the
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
4511 4521 * cmd never gets started but we are still receiving interrupts
4512 4522 */
4513 4523 while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
4514 4524 if (mptsas_wait_intr(mpt, polltime) == FALSE) {
4515 4525 NDBG5(("mptsas_poll: command incomplete"));
4516 4526 rval = FALSE;
4517 4527 break;
4518 4528 }
4519 4529 }
4520 4530
4521 - mutex_enter(&mpt->m_intr_mutex);
4522 - mpt->m_polled_intr = 0;
4523 - mutex_exit(&mpt->m_intr_mutex);
4524 -
4525 4531 if (rval == FALSE) {
4526 4532
4527 4533 /*
4528 4534 * this isn't supposed to happen, the hba must be wedged
4529 4535 * Mark this cmd as a timeout.
4530 4536 */
4531 4537 mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
4532 4538 (STAT_TIMEOUT|STAT_ABORTED));
4533 4539
4534 4540 if (poll_cmd->cmd_queued == FALSE) {
4535 4541
4536 4542 NDBG5(("mptsas_poll: not on waitq"));
4537 4543
4538 4544 poll_cmd->cmd_pkt->pkt_state |=
4539 4545 (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
4540 4546 } else {
4541 4547
4542 4548 /* find and remove it from the waitq */
4543 4549 NDBG5(("mptsas_poll: delete from waitq"));
4544 4550 mptsas_waitq_delete(mpt, poll_cmd);
4545 4551 }
4546 4552
4547 4553 }
4548 4554 mptsas_fma_check(mpt, poll_cmd);
4549 4555 NDBG5(("mptsas_poll: done"));
4550 4556 return (rval);
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
4551 4557 }
4552 4558
4553 4559 /*
4554 4560 * Used for polling cmds and TM function
4555 4561 */
4556 4562 static int
4557 4563 mptsas_wait_intr(mptsas_t *mpt, int polltime)
4558 4564 {
4559 4565 int cnt;
4560 4566 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
4561 - Mpi2ReplyDescriptorsUnion_t reply_desc_union_v;
4562 4567 uint32_t int_mask;
4563 - uint8_t reply_type;
4564 4568
4565 4569 NDBG5(("mptsas_wait_intr"));
4566 4570
4571 + mpt->m_polled_intr = 1;
4567 4572
4568 4573 /*
4569 4574 * Get the current interrupt mask and disable interrupts. When
4570 4575 * re-enabling ints, set mask to saved value.
4571 4576 */
4572 4577 int_mask = ddi_get32(mpt->m_datap, &mpt->m_reg->HostInterruptMask);
4573 4578 MPTSAS_DISABLE_INTR(mpt);
4574 4579
4575 4580 /*
4576 4581 * Keep polling for at least (polltime * 1000) seconds
4577 4582 */
4578 4583 for (cnt = 0; cnt < polltime; cnt++) {
4579 - mutex_enter(&mpt->m_intr_mutex);
4580 4584 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
4581 4585 DDI_DMA_SYNC_FORCPU);
4582 4586
4583 4587 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
4584 4588 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
4585 4589
4586 4590 if (ddi_get32(mpt->m_acc_post_queue_hdl,
4587 4591 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
4588 4592 ddi_get32(mpt->m_acc_post_queue_hdl,
4589 4593 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
4590 - mutex_exit(&mpt->m_intr_mutex);
4591 4594 drv_usecwait(1000);
4592 4595 continue;
4593 4596 }
4594 4597
4595 - reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
4596 - &reply_desc_union->Default.ReplyFlags);
4597 - reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
4598 - reply_desc_union_v.Default.ReplyFlags = reply_type;
4599 - if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
4600 - reply_desc_union_v.SCSIIOSuccess.SMID =
4601 - ddi_get16(mpt->m_acc_post_queue_hdl,
4602 - &reply_desc_union->SCSIIOSuccess.SMID);
4603 - } else if (reply_type ==
4604 - MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
4605 - reply_desc_union_v.AddressReply.ReplyFrameAddress =
4606 - ddi_get32(mpt->m_acc_post_queue_hdl,
4607 - &reply_desc_union->AddressReply.ReplyFrameAddress);
4608 - reply_desc_union_v.AddressReply.SMID =
4609 - ddi_get16(mpt->m_acc_post_queue_hdl,
4610 - &reply_desc_union->AddressReply.SMID);
4611 - }
4612 4598 /*
4613 - * Clear the reply descriptor for re-use and increment
4614 - * index.
4599 + * The reply is valid, process it according to its
4600 + * type.
4615 4601 */
4616 - ddi_put64(mpt->m_acc_post_queue_hdl,
4617 - &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
4618 - 0xFFFFFFFFFFFFFFFF);
4619 - (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
4620 - DDI_DMA_SYNC_FORDEV);
4602 + mptsas_process_intr(mpt, reply_desc_union);
4621 4603
4622 4604 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
4623 4605 mpt->m_post_index = 0;
4624 4606 }
4625 4607
4626 4608 /*
4627 4609 * Update the global reply index
4628 4610 */
4629 4611 ddi_put32(mpt->m_datap,
4630 4612 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
4631 - mutex_exit(&mpt->m_intr_mutex);
4613 + mpt->m_polled_intr = 0;
4632 4614
4633 4615 /*
4634 - * The reply is valid, process it according to its
4635 - * type.
4636 - */
4637 - mptsas_process_intr(mpt, &reply_desc_union_v);
4638 -
4639 -
4640 - /*
4641 4616 * Re-enable interrupts and quit.
4642 4617 */
4643 4618 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask,
4644 4619 int_mask);
4645 4620 return (TRUE);
4646 4621
4647 4622 }
4648 4623
4649 4624 /*
4650 4625 * Clear polling flag, re-enable interrupts and quit.
4651 4626 */
4627 + mpt->m_polled_intr = 0;
4652 4628 ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask);
4653 4629 return (FALSE);
4654 4630 }
4655 4631
4656 -/*
4657 - * For fastpath, the m_intr_mutex should be held from the begining to the end,
4658 - * so we only treat those cmds that need not release m_intr_mutex(even just for
4659 - * a moment) as candidate for fast processing. otherwise, we don't handle them
4660 - * and just return, then in ISR, those cmds would be handled later with m_mutex
4661 - * held and m_intr_mutex not held.
4662 - */
4663 -static int
4664 -mptsas_handle_io_fastpath(mptsas_t *mpt,
4665 - uint16_t SMID)
4666 -{
4667 - mptsas_slots_t *slots = mpt->m_active;
4668 - mptsas_cmd_t *cmd = NULL;
4669 - struct scsi_pkt *pkt;
4670 -
4671 - /*
4672 - * This is a success reply so just complete the IO. First, do a sanity
4673 - * check on the SMID. The final slot is used for TM requests, which
4674 - * would not come into this reply handler.
4675 - */
4676 - if ((SMID == 0) || (SMID > slots->m_n_slots)) {
4677 - mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
4678 - SMID);
4679 - ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4680 - return (TRUE);
4681 - }
4682 -
4683 - cmd = slots->m_slot[SMID];
4684 -
4685 - /*
4686 - * print warning and return if the slot is empty
4687 - */
4688 - if (cmd == NULL) {
4689 - mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
4690 - "in slot %d", SMID);
4691 - return (TRUE);
4692 - }
4693 -
4694 - pkt = CMD2PKT(cmd);
4695 - pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
4696 - STATE_GOT_STATUS);
4697 - if (cmd->cmd_flags & CFLAG_DMAVALID) {
4698 - pkt->pkt_state |= STATE_XFERRED_DATA;
4699 - }
4700 - pkt->pkt_resid = 0;
4701 -
4702 - /*
4703 - * If the cmd is a IOC, or a passthrough, then we don't process it in
4704 - * fastpath, and later it would be handled by mptsas_process_intr()
4705 - * with m_mutex protected.
4706 - */
4707 - if (cmd->cmd_flags & (CFLAG_PASSTHRU | CFLAG_CMDIOC)) {
4708 - return (FALSE);
4709 - } else {
4710 - mptsas_remove_cmd0(mpt, cmd);
4711 - }
4712 -
4713 - if (cmd->cmd_flags & CFLAG_RETRY) {
4714 - /*
4715 - * The target returned QFULL or busy, do not add tihs
4716 - * pkt to the doneq since the hba will retry
4717 - * this cmd.
4718 - *
4719 - * The pkt has already been resubmitted in
4720 - * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4721 - * Remove this cmd_flag here.
4722 - */
4723 - cmd->cmd_flags &= ~CFLAG_RETRY;
4724 - } else {
4725 - mptsas_doneq_add0(mpt, cmd);
4726 - }
4727 -
4728 - /*
4729 - * In fastpath, the cmd should only be a context reply, so just check
4730 - * the post queue of the reply descriptor and the dmahandle of the cmd
4731 - * is enough. No sense data in this case and no need to check the dma
4732 - * handle where sense data dma info is saved, the dma handle of the
4733 - * reply frame, and the dma handle of the reply free queue.
4734 - * For the dma handle of the request queue. Check fma here since we
4735 - * are sure the request must have already been sent/DMAed correctly.
4736 - * otherwise checking in mptsas_scsi_start() is not correct since
4737 - * at that time the dma may not start.
4738 - */
4739 - if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
4740 - DDI_SUCCESS) ||
4741 - (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
4742 - DDI_SUCCESS)) {
4743 - ddi_fm_service_impact(mpt->m_dip,
4744 - DDI_SERVICE_UNAFFECTED);
4745 - pkt->pkt_reason = CMD_TRAN_ERR;
4746 - pkt->pkt_statistics = 0;
4747 - }
4748 - if (cmd->cmd_dmahandle &&
4749 - (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
4750 - ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4751 - pkt->pkt_reason = CMD_TRAN_ERR;
4752 - pkt->pkt_statistics = 0;
4753 - }
4754 - if ((cmd->cmd_extra_frames &&
4755 - ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
4756 - DDI_SUCCESS) ||
4757 - (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
4758 - DDI_SUCCESS)))) {
4759 - ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4760 - pkt->pkt_reason = CMD_TRAN_ERR;
4761 - pkt->pkt_statistics = 0;
4762 - }
4763 -
4764 - return (TRUE);
4765 -}
4766 -
4767 4632 static void
4768 4633 mptsas_handle_scsi_io_success(mptsas_t *mpt,
4769 4634 pMpi2ReplyDescriptorsUnion_t reply_desc)
4770 4635 {
4771 4636 pMpi2SCSIIOSuccessReplyDescriptor_t scsi_io_success;
4772 4637 uint16_t SMID;
4773 4638 mptsas_slots_t *slots = mpt->m_active;
4774 4639 mptsas_cmd_t *cmd = NULL;
4775 4640 struct scsi_pkt *pkt;
4776 4641
4642 + ASSERT(mutex_owned(&mpt->m_mutex));
4643 +
4777 4644 scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
4778 - SMID = scsi_io_success->SMID;
4645 + SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &scsi_io_success->SMID);
4779 4646
4780 4647 /*
4781 4648 * This is a success reply so just complete the IO. First, do a sanity
4782 4649 * check on the SMID. The final slot is used for TM requests, which
4783 4650 * would not come into this reply handler.
4784 4651 */
4785 4652 if ((SMID == 0) || (SMID > slots->m_n_slots)) {
4786 4653 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
4787 4654 SMID);
4788 4655 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4789 4656 return;
4790 4657 }
4791 4658
4792 4659 cmd = slots->m_slot[SMID];
4793 4660
4794 4661 /*
4795 4662 * print warning and return if the slot is empty
4796 4663 */
4797 4664 if (cmd == NULL) {
4798 4665 mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
4799 4666 "in slot %d", SMID);
4800 4667 return;
4801 4668 }
4802 4669
4803 4670 pkt = CMD2PKT(cmd);
4804 4671 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
4805 4672 STATE_GOT_STATUS);
4806 4673 if (cmd->cmd_flags & CFLAG_DMAVALID) {
4807 4674 pkt->pkt_state |= STATE_XFERRED_DATA;
4808 4675 }
4809 4676 pkt->pkt_resid = 0;
4810 4677
4811 4678 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
4812 4679 cmd->cmd_flags |= CFLAG_FINISHED;
4813 4680 cv_broadcast(&mpt->m_passthru_cv);
4814 4681 return;
4815 4682 } else {
4816 4683 mptsas_remove_cmd(mpt, cmd);
4817 4684 }
4818 4685
4819 4686 if (cmd->cmd_flags & CFLAG_RETRY) {
4820 4687 /*
4821 4688 * The target returned QFULL or busy, do not add tihs
4822 4689 * pkt to the doneq since the hba will retry
4823 4690 * this cmd.
4824 4691 *
4825 4692 * The pkt has already been resubmitted in
4826 4693 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
4827 4694 * Remove this cmd_flag here.
4828 4695 */
4829 4696 cmd->cmd_flags &= ~CFLAG_RETRY;
4830 4697 } else {
4831 4698 mptsas_doneq_add(mpt, cmd);
4832 4699 }
4833 4700 }
4834 4701
4835 4702 static void
4836 4703 mptsas_handle_address_reply(mptsas_t *mpt,
4837 4704 pMpi2ReplyDescriptorsUnion_t reply_desc)
4838 4705 {
4839 4706 pMpi2AddressReplyDescriptor_t address_reply;
4840 4707 pMPI2DefaultReply_t reply;
4841 4708 mptsas_fw_diagnostic_buffer_t *pBuffer;
4842 4709 uint32_t reply_addr;
↓ open down ↓ |
54 lines elided |
↑ open up ↑ |
4843 4710 uint16_t SMID, iocstatus;
4844 4711 mptsas_slots_t *slots = mpt->m_active;
4845 4712 mptsas_cmd_t *cmd = NULL;
4846 4713 uint8_t function, buffer_type;
4847 4714 m_replyh_arg_t *args;
4848 4715 int reply_frame_no;
4849 4716
4850 4717 ASSERT(mutex_owned(&mpt->m_mutex));
4851 4718
4852 4719 address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
4720 + reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
4721 + &address_reply->ReplyFrameAddress);
4722 + SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID);
4853 4723
4854 - reply_addr = address_reply->ReplyFrameAddress;
4855 - SMID = address_reply->SMID;
4856 4724 /*
4857 4725 * If reply frame is not in the proper range we should ignore this
4858 4726 * message and exit the interrupt handler.
4859 4727 */
4860 4728 if ((reply_addr < mpt->m_reply_frame_dma_addr) ||
4861 4729 (reply_addr >= (mpt->m_reply_frame_dma_addr +
4862 4730 (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
4863 4731 ((reply_addr - mpt->m_reply_frame_dma_addr) %
4864 4732 mpt->m_reply_frame_size != 0)) {
4865 4733 mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
4866 4734 "address 0x%x\n", reply_addr);
4867 4735 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
4868 4736 return;
4869 4737 }
4870 4738
4871 4739 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
4872 4740 DDI_DMA_SYNC_FORCPU);
4873 4741 reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
4874 4742 mpt->m_reply_frame_dma_addr));
4875 4743 function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
4876 4744
4877 4745 /*
4878 4746 * don't get slot information and command for events since these values
4879 4747 * don't exist
4880 4748 */
4881 4749 if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) &&
4882 4750 (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) {
4883 4751 /*
4884 4752 * This could be a TM reply, which use the last allocated SMID,
4885 4753 * so allow for that.
4886 4754 */
4887 4755 if ((SMID == 0) || (SMID > (slots->m_n_slots + 1))) {
4888 4756 mptsas_log(mpt, CE_WARN, "?Received invalid SMID of "
4889 4757 "%d\n", SMID);
4890 4758 ddi_fm_service_impact(mpt->m_dip,
4891 4759 DDI_SERVICE_UNAFFECTED);
4892 4760 return;
4893 4761 }
4894 4762
4895 4763 cmd = slots->m_slot[SMID];
4896 4764
4897 4765 /*
4898 4766 * print warning and return if the slot is empty
4899 4767 */
4900 4768 if (cmd == NULL) {
4901 4769 mptsas_log(mpt, CE_WARN, "?NULL command for address "
4902 4770 "reply in slot %d", SMID);
4903 4771 return;
4904 4772 }
4905 4773 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
4906 4774 (cmd->cmd_flags & CFLAG_CONFIG) ||
4907 4775 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
4908 4776 cmd->cmd_rfm = reply_addr;
4909 4777 cmd->cmd_flags |= CFLAG_FINISHED;
4910 4778 cv_broadcast(&mpt->m_passthru_cv);
4911 4779 cv_broadcast(&mpt->m_config_cv);
4912 4780 cv_broadcast(&mpt->m_fw_diag_cv);
4913 4781 return;
4914 4782 } else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
4915 4783 mptsas_remove_cmd(mpt, cmd);
4916 4784 }
4917 4785 NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
4918 4786 }
4919 4787 /*
4920 4788 * Depending on the function, we need to handle
4921 4789 * the reply frame (and cmd) differently.
4922 4790 */
4923 4791 switch (function) {
4924 4792 case MPI2_FUNCTION_SCSI_IO_REQUEST:
4925 4793 mptsas_check_scsi_io_error(mpt, (pMpi2SCSIIOReply_t)reply, cmd);
4926 4794 break;
4927 4795 case MPI2_FUNCTION_SCSI_TASK_MGMT:
4928 4796 cmd->cmd_rfm = reply_addr;
4929 4797 mptsas_check_task_mgt(mpt, (pMpi2SCSIManagementReply_t)reply,
4930 4798 cmd);
4931 4799 break;
4932 4800 case MPI2_FUNCTION_FW_DOWNLOAD:
4933 4801 cmd->cmd_flags |= CFLAG_FINISHED;
4934 4802 cv_signal(&mpt->m_fw_cv);
4935 4803 break;
4936 4804 case MPI2_FUNCTION_EVENT_NOTIFICATION:
4937 4805 reply_frame_no = (reply_addr - mpt->m_reply_frame_dma_addr) /
4938 4806 mpt->m_reply_frame_size;
4939 4807 args = &mpt->m_replyh_args[reply_frame_no];
4940 4808 args->mpt = (void *)mpt;
4941 4809 args->rfm = reply_addr;
4942 4810
4943 4811 /*
4944 4812 * Record the event if its type is enabled in
4945 4813 * this mpt instance by ioctl.
4946 4814 */
4947 4815 mptsas_record_event(args);
4948 4816
4949 4817 /*
4950 4818 * Handle time critical events
↓ open down ↓ |
85 lines elided |
↑ open up ↑ |
4951 4819 * NOT_RESPONDING/ADDED only now
4952 4820 */
4953 4821 if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
4954 4822 /*
4955 4823 * Would not return main process,
4956 4824 * just let taskq resolve ack action
4957 4825 * and ack would be sent in taskq thread
4958 4826 */
4959 4827 NDBG20(("send mptsas_handle_event_sync success"));
4960 4828 }
4829 +
4830 + if (mpt->m_in_reset) {
4831 + NDBG20(("dropping event received during reset"));
4832 + return;
4833 + }
4834 +
4961 4835 if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
4962 4836 (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
4963 4837 mptsas_log(mpt, CE_WARN, "No memory available"
4964 4838 "for dispatch taskq");
4965 4839 /*
4966 4840 * Return the reply frame to the free queue.
4967 4841 */
4968 4842 ddi_put32(mpt->m_acc_free_queue_hdl,
4969 4843 &((uint32_t *)(void *)
4970 4844 mpt->m_free_queue)[mpt->m_free_index], reply_addr);
4971 4845 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
4972 4846 DDI_DMA_SYNC_FORDEV);
4973 4847 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
4974 4848 mpt->m_free_index = 0;
4975 4849 }
4976 4850
4977 4851 ddi_put32(mpt->m_datap,
4978 4852 &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
4979 4853 }
4980 4854 return;
4981 4855 case MPI2_FUNCTION_DIAG_BUFFER_POST:
4982 4856 /*
4983 4857 * If SMID is 0, this implies that the reply is due to a
4984 4858 * release function with a status that the buffer has been
4985 4859 * released. Set the buffer flags accordingly.
4986 4860 */
4987 4861 if (SMID == 0) {
4988 4862 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
4989 4863 &reply->IOCStatus);
4990 4864 buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl,
4991 4865 &(((pMpi2DiagBufferPostReply_t)reply)->BufferType));
4992 4866 if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) {
4993 4867 pBuffer =
4994 4868 &mpt->m_fw_diag_buffer_list[buffer_type];
4995 4869 pBuffer->valid_data = TRUE;
4996 4870 pBuffer->owned_by_firmware = FALSE;
4997 4871 pBuffer->immediate = FALSE;
4998 4872 }
4999 4873 } else {
5000 4874 /*
5001 4875 * Normal handling of diag post reply with SMID.
5002 4876 */
5003 4877 cmd = slots->m_slot[SMID];
5004 4878
5005 4879 /*
5006 4880 * print warning and return if the slot is empty
5007 4881 */
5008 4882 if (cmd == NULL) {
5009 4883 mptsas_log(mpt, CE_WARN, "?NULL command for "
5010 4884 "address reply in slot %d", SMID);
5011 4885 return;
5012 4886 }
5013 4887 cmd->cmd_rfm = reply_addr;
5014 4888 cmd->cmd_flags |= CFLAG_FINISHED;
5015 4889 cv_broadcast(&mpt->m_fw_diag_cv);
5016 4890 }
5017 4891 return;
5018 4892 default:
5019 4893 mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
5020 4894 break;
5021 4895 }
5022 4896
5023 4897 /*
5024 4898 * Return the reply frame to the free queue.
5025 4899 */
5026 4900 ddi_put32(mpt->m_acc_free_queue_hdl,
5027 4901 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
5028 4902 reply_addr);
5029 4903 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5030 4904 DDI_DMA_SYNC_FORDEV);
5031 4905 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5032 4906 mpt->m_free_index = 0;
5033 4907 }
5034 4908 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
5035 4909 mpt->m_free_index);
5036 4910
5037 4911 if (cmd->cmd_flags & CFLAG_FW_CMD)
5038 4912 return;
5039 4913
5040 4914 if (cmd->cmd_flags & CFLAG_RETRY) {
5041 4915 /*
5042 4916 * The target returned QFULL or busy, do not add tihs
5043 4917 * pkt to the doneq since the hba will retry
5044 4918 * this cmd.
5045 4919 *
5046 4920 * The pkt has already been resubmitted in
5047 4921 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5048 4922 * Remove this cmd_flag here.
5049 4923 */
5050 4924 cmd->cmd_flags &= ~CFLAG_RETRY;
5051 4925 } else {
5052 4926 mptsas_doneq_add(mpt, cmd);
5053 4927 }
5054 4928 }
5055 4929
5056 4930 static void
5057 4931 mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
5058 4932 mptsas_cmd_t *cmd)
5059 4933 {
5060 4934 uint8_t scsi_status, scsi_state;
5061 4935 uint16_t ioc_status;
5062 4936 uint32_t xferred, sensecount, responsedata, loginfo = 0;
5063 4937 struct scsi_pkt *pkt;
5064 4938 struct scsi_arq_status *arqstat;
5065 4939 struct buf *bp;
5066 4940 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5067 4941 uint8_t *sensedata = NULL;
5068 4942
5069 4943 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
5070 4944 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
5071 4945 bp = cmd->cmd_ext_arq_buf;
5072 4946 } else {
5073 4947 bp = cmd->cmd_arq_buf;
5074 4948 }
5075 4949
5076 4950 scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
5077 4951 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5078 4952 scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
5079 4953 xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
5080 4954 sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
5081 4955 responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl,
5082 4956 &reply->ResponseInfo);
5083 4957
5084 4958 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
5085 4959 loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
5086 4960 &reply->IOCLogInfo);
5087 4961 mptsas_log(mpt, CE_NOTE,
5088 4962 "?Log info 0x%x received for target %d.\n"
5089 4963 "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5090 4964 loginfo, Tgt(cmd), scsi_status, ioc_status,
5091 4965 scsi_state);
5092 4966 }
5093 4967
5094 4968 NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5095 4969 scsi_status, ioc_status, scsi_state));
5096 4970
5097 4971 pkt = CMD2PKT(cmd);
5098 4972 *(pkt->pkt_scbp) = scsi_status;
5099 4973
5100 4974 if (loginfo == 0x31170000) {
5101 4975 /*
5102 4976 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
5103 4977 * 0x31170000 comes, that means the device missing delay
5104 4978 * is in progressing, the command need retry later.
↓ open down ↓ |
134 lines elided |
↑ open up ↑ |
5105 4979 */
5106 4980 *(pkt->pkt_scbp) = STATUS_BUSY;
5107 4981 return;
5108 4982 }
5109 4983
5110 4984 if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
5111 4985 ((ioc_status & MPI2_IOCSTATUS_MASK) ==
5112 4986 MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
5113 4987 pkt->pkt_reason = CMD_INCOMPLETE;
5114 4988 pkt->pkt_state |= STATE_GOT_BUS;
5115 - mutex_enter(&ptgt->m_tgt_intr_mutex);
5116 4989 if (ptgt->m_reset_delay == 0) {
5117 4990 mptsas_set_throttle(mpt, ptgt,
5118 4991 DRAIN_THROTTLE);
5119 4992 }
5120 - mutex_exit(&ptgt->m_tgt_intr_mutex);
5121 4993 return;
5122 4994 }
5123 4995
5124 4996 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5125 4997 responsedata &= 0x000000FF;
5126 4998 if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
5127 4999 mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n");
5128 5000 pkt->pkt_reason = CMD_TLR_OFF;
5129 5001 return;
5130 5002 }
5131 5003 }
5132 5004
5133 5005
5134 5006 switch (scsi_status) {
5135 5007 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5136 5008 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5137 5009 arqstat = (void*)(pkt->pkt_scbp);
5138 5010 arqstat->sts_rqpkt_status = *((struct scsi_status *)
5139 5011 (pkt->pkt_scbp));
5140 5012 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
5141 5013 STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
5142 5014 if (cmd->cmd_flags & CFLAG_XARQ) {
5143 5015 pkt->pkt_state |= STATE_XARQ_DONE;
5144 5016 }
5145 5017 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5146 5018 pkt->pkt_state |= STATE_XFERRED_DATA;
5147 5019 }
5148 5020 arqstat->sts_rqpkt_reason = pkt->pkt_reason;
5149 5021 arqstat->sts_rqpkt_state = pkt->pkt_state;
5150 5022 arqstat->sts_rqpkt_state |= STATE_XFERRED_DATA;
5151 5023 arqstat->sts_rqpkt_statistics = pkt->pkt_statistics;
5152 5024 sensedata = (uint8_t *)&arqstat->sts_sensedata;
5153 5025
5154 5026 bcopy((uchar_t *)bp->b_un.b_addr, sensedata,
5155 5027 ((cmd->cmd_rqslen >= sensecount) ? sensecount :
5156 5028 cmd->cmd_rqslen));
5157 5029 arqstat->sts_rqpkt_resid = (cmd->cmd_rqslen - sensecount);
5158 5030 cmd->cmd_flags |= CFLAG_CMDARQ;
5159 5031 /*
5160 5032 * Set proper status for pkt if autosense was valid
5161 5033 */
5162 5034 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5163 5035 struct scsi_status zero_status = { 0 };
5164 5036 arqstat->sts_rqpkt_status = zero_status;
5165 5037 }
5166 5038
5167 5039 /*
5168 5040 * ASC=0x47 is parity error
5169 5041 * ASC=0x48 is initiator detected error received
5170 5042 */
5171 5043 if ((scsi_sense_key(sensedata) == KEY_ABORTED_COMMAND) &&
5172 5044 ((scsi_sense_asc(sensedata) == 0x47) ||
5173 5045 (scsi_sense_asc(sensedata) == 0x48))) {
5174 5046 mptsas_log(mpt, CE_NOTE, "Aborted_command!");
5175 5047 }
5176 5048
5177 5049 /*
5178 5050 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5179 5051 * ASC/ASCQ=0x25/0x00 means invalid lun
5180 5052 */
5181 5053 if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
5182 5054 (scsi_sense_asc(sensedata) == 0x3F) &&
5183 5055 (scsi_sense_ascq(sensedata) == 0x0E)) ||
5184 5056 ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
5185 5057 (scsi_sense_asc(sensedata) == 0x25) &&
5186 5058 (scsi_sense_ascq(sensedata) == 0x00))) {
5187 5059 mptsas_topo_change_list_t *topo_node = NULL;
5188 5060
5189 5061 topo_node = kmem_zalloc(
5190 5062 sizeof (mptsas_topo_change_list_t),
5191 5063 KM_NOSLEEP);
5192 5064 if (topo_node == NULL) {
5193 5065 mptsas_log(mpt, CE_NOTE, "No memory"
5194 5066 "resource for handle SAS dynamic"
5195 5067 "reconfigure.\n");
5196 5068 break;
5197 5069 }
5198 5070 topo_node->mpt = mpt;
5199 5071 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
5200 5072 topo_node->un.phymask = ptgt->m_phymask;
5201 5073 topo_node->devhdl = ptgt->m_devhdl;
5202 5074 topo_node->object = (void *)ptgt;
5203 5075 topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5204 5076
5205 5077 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5206 5078 mptsas_handle_dr,
5207 5079 (void *)topo_node,
5208 5080 DDI_NOSLEEP)) != DDI_SUCCESS) {
5209 5081 mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
↓ open down ↓ |
79 lines elided |
↑ open up ↑ |
5210 5082 "for handle SAS dynamic reconfigure"
5211 5083 "failed. \n");
5212 5084 }
5213 5085 }
5214 5086 break;
5215 5087 case MPI2_SCSI_STATUS_GOOD:
5216 5088 switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5217 5089 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5218 5090 pkt->pkt_reason = CMD_DEV_GONE;
5219 5091 pkt->pkt_state |= STATE_GOT_BUS;
5220 - mutex_enter(&ptgt->m_tgt_intr_mutex);
5221 5092 if (ptgt->m_reset_delay == 0) {
5222 5093 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5223 5094 }
5224 - mutex_exit(&ptgt->m_tgt_intr_mutex);
5225 5095 NDBG31(("lost disk for target%d, command:%x",
5226 5096 Tgt(cmd), pkt->pkt_cdbp[0]));
5227 5097 break;
5228 5098 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5229 5099 NDBG31(("data overrun: xferred=%d", xferred));
5230 5100 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5231 5101 pkt->pkt_reason = CMD_DATA_OVR;
5232 5102 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5233 5103 | STATE_SENT_CMD | STATE_GOT_STATUS
5234 5104 | STATE_XFERRED_DATA);
5235 5105 pkt->pkt_resid = 0;
5236 5106 break;
5237 5107 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5238 5108 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5239 5109 NDBG31(("data underrun: xferred=%d", xferred));
5240 5110 NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5241 5111 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5242 5112 | STATE_SENT_CMD | STATE_GOT_STATUS);
5243 5113 pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5244 5114 if (pkt->pkt_resid != cmd->cmd_dmacount) {
5245 5115 pkt->pkt_state |= STATE_XFERRED_DATA;
5246 5116 }
5247 5117 break;
5248 5118 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5249 5119 mptsas_set_pkt_reason(mpt,
5250 5120 cmd, CMD_RESET, STAT_BUS_RESET);
5251 5121 break;
5252 5122 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5253 5123 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5254 5124 mptsas_set_pkt_reason(mpt,
5255 5125 cmd, CMD_RESET, STAT_DEV_RESET);
5256 5126 break;
5257 5127 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5258 5128 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5259 5129 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5260 5130 mptsas_set_pkt_reason(mpt,
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
5261 5131 cmd, CMD_TERMINATED, STAT_TERMINATED);
5262 5132 break;
5263 5133 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5264 5134 case MPI2_IOCSTATUS_BUSY:
5265 5135 /*
5266 5136 * set throttles to drain
5267 5137 */
5268 5138 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5269 5139 &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
5270 5140 while (ptgt != NULL) {
5271 - mutex_enter(&ptgt->m_tgt_intr_mutex);
5272 5141 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5273 - mutex_exit(&ptgt->m_tgt_intr_mutex);
5274 5142
5275 5143 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
5276 5144 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
5277 5145 }
5278 5146
5279 5147 /*
5280 5148 * retry command
5281 5149 */
5282 5150 cmd->cmd_flags |= CFLAG_RETRY;
5283 5151 cmd->cmd_pkt_flags |= FLAG_HEAD;
5284 5152
5285 - mutex_exit(&mpt->m_mutex);
5286 5153 (void) mptsas_accept_pkt(mpt, cmd);
5287 - mutex_enter(&mpt->m_mutex);
5288 5154 break;
5289 5155 default:
5290 5156 mptsas_log(mpt, CE_WARN,
5291 5157 "unknown ioc_status = %x\n", ioc_status);
5292 5158 mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5293 5159 "count = %x, scsi_status = %x", scsi_state,
5294 5160 xferred, scsi_status);
5295 5161 break;
5296 5162 }
5297 5163 break;
5298 5164 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5299 5165 mptsas_handle_qfull(mpt, cmd);
5300 5166 break;
5301 5167 case MPI2_SCSI_STATUS_BUSY:
5302 5168 NDBG31(("scsi_status busy received"));
5303 5169 break;
5304 5170 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5305 5171 NDBG31(("scsi_status reservation conflict received"));
5306 5172 break;
5307 5173 default:
5308 5174 mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x\n",
5309 5175 scsi_status, ioc_status);
5310 5176 mptsas_log(mpt, CE_WARN,
5311 5177 "mptsas_process_intr: invalid scsi status\n");
5312 5178 break;
5313 5179 }
5314 5180 }
5315 5181
5316 5182 static void
5317 5183 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5318 5184 mptsas_cmd_t *cmd)
5319 5185 {
5320 5186 uint8_t task_type;
5321 5187 uint16_t ioc_status;
5322 5188 uint32_t log_info;
5323 5189 uint16_t dev_handle;
5324 5190 struct scsi_pkt *pkt = CMD2PKT(cmd);
5325 5191
5326 5192 task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5327 5193 ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5328 5194 log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5329 5195 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5330 5196
5331 5197 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5332 5198 mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5333 5199 "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
5334 5200 task_type, ioc_status, log_info, dev_handle);
5335 5201 pkt->pkt_reason = CMD_INCOMPLETE;
5336 5202 return;
5337 5203 }
5338 5204
5339 5205 switch (task_type) {
5340 5206 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5341 5207 case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5342 5208 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5343 5209 case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5344 5210 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5345 5211 case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5346 5212 break;
5347 5213 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5348 5214 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5349 5215 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5350 5216 /*
5351 5217 * Check for invalid DevHandle of 0 in case application
5352 5218 * sends bad command. DevHandle of 0 could cause problems.
5353 5219 */
5354 5220 if (dev_handle == 0) {
5355 5221 mptsas_log(mpt, CE_WARN, "!Can't flush target with"
5356 5222 " DevHandle of 0.");
5357 5223 } else {
5358 5224 mptsas_flush_target(mpt, dev_handle, Lun(cmd),
5359 5225 task_type);
5360 5226 }
5361 5227 break;
5362 5228 default:
5363 5229 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5364 5230 task_type);
5365 5231 mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5366 5232 break;
5367 5233 }
5368 5234 }
5369 5235
5370 5236 static void
5371 5237 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5372 5238 {
5373 5239 mptsas_t *mpt = arg->mpt;
5374 5240 uint64_t t = arg->t;
5375 5241 mptsas_cmd_t *cmd;
5376 5242 struct scsi_pkt *pkt;
5377 5243 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
5378 5244
5379 5245 mutex_enter(&item->mutex);
5380 5246 while (item->flag & MPTSAS_DONEQ_THREAD_ACTIVE) {
5381 5247 if (!item->doneq) {
5382 5248 cv_wait(&item->cv, &item->mutex);
5383 5249 }
5384 5250 pkt = NULL;
5385 5251 if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5386 5252 cmd->cmd_flags |= CFLAG_COMPLETED;
5387 5253 pkt = CMD2PKT(cmd);
5388 5254 }
5389 5255 mutex_exit(&item->mutex);
5390 5256 if (pkt) {
5391 5257 mptsas_pkt_comp(pkt, cmd);
↓ open down ↓ |
94 lines elided |
↑ open up ↑ |
5392 5258 }
5393 5259 mutex_enter(&item->mutex);
5394 5260 }
5395 5261 mutex_exit(&item->mutex);
5396 5262 mutex_enter(&mpt->m_doneq_mutex);
5397 5263 mpt->m_doneq_thread_n--;
5398 5264 cv_broadcast(&mpt->m_doneq_thread_cv);
5399 5265 mutex_exit(&mpt->m_doneq_mutex);
5400 5266 }
5401 5267
5268 +
5402 5269 /*
5403 5270 * mpt interrupt handler.
5404 5271 */
5405 5272 static uint_t
5406 5273 mptsas_intr(caddr_t arg1, caddr_t arg2)
5407 5274 {
5408 5275 mptsas_t *mpt = (void *)arg1;
5409 5276 pMpi2ReplyDescriptorsUnion_t reply_desc_union;
5410 5277 uchar_t did_reply = FALSE;
5411 - int i = 0, j;
5412 - uint8_t reply_type;
5413 - uint16_t SMID;
5414 5278
5415 5279 NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5416 5280
5417 - /*
5418 - * 1.
5419 - * To avoid using m_mutex in the ISR(ISR referes not only mptsas_intr,
5420 - * but all of the recursive called functions in it. the same below),
5421 - * separate mutexs are introduced to protect the elements shown in ISR.
5422 - * 3 type of mutex are involved here:
5423 - * a)per instance mutex m_intr_mutex.
5424 - * b)per target mutex m_tgt_intr_mutex.
5425 - * c)mutex that protect the free slot.
5426 - *
5427 - * a)per instance mutex m_intr_mutex:
5428 - * used to protect m_options, m_power, m_waitq, etc that would be
5429 - * checked/modified in ISR; protect the getting and setting the reply
5430 - * descriptor index; protect the m_slots[];
5431 - *
5432 - * b)per target mutex m_tgt_intr_mutex:
5433 - * used to protect per target element which has relationship to ISR.
5434 - * contention for the new per target mutex is just as high as it in
5435 - * sd(7d) driver.
5436 - *
5437 - * c)mutexs that protect the free slots:
5438 - * those mutexs are introduced to minimize the mutex contentions
5439 - * between the IO request threads where free slots are allocated
5440 - * for sending cmds and ISR where slots holding outstanding cmds
5441 - * are returned to the free pool.
5442 - * the idea is like this:
5443 - * 1) Partition all of the free slot into NCPU groups. For example,
5444 - * In system where we have 15 slots, and 4 CPU, then slot s1,s5,s9,s13
5445 - * are marked belonging to CPU1, s2,s6,s10,s14 to CPU2, s3,s7,s11,s15
5446 - * to CPU3, and s4,s8,s12 to CPU4.
5447 - * 2) In each of the group, an alloc/release queue pair is created,
5448 - * and both the allocq and the releaseq have a dedicated mutex.
5449 - * 3) When init, all of the slots in a CPU group are inserted into the
5450 - * allocq of its CPU's pair.
5451 - * 4) When doing IO,
5452 - * mptsas_scsi_start()
5453 - * {
5454 - * cpuid = the cpu NO of the cpu where this thread is running on
5455 - * retry:
5456 - * mutex_enter(&allocq[cpuid]);
5457 - * if (get free slot = success) {
5458 - * remove the slot from the allocq
5459 - * mutex_exit(&allocq[cpuid]);
5460 - * return(success);
5461 - * } else { // exchange allocq and releaseq and try again
5462 - * mutex_enter(&releq[cpuid]);
5463 - * exchange the allocq and releaseq of this pair;
5464 - * mutex_exit(&releq[cpuid]);
5465 - * if (try to get free slot again = success) {
5466 - * remove the slot from the allocq
5467 - * mutex_exit(&allocq[cpuid]);
5468 - * return(success);
5469 - * } else {
5470 - * MOD(cpuid)++;
5471 - * goto retry;
5472 - * if (all CPU groups tried)
5473 - * mutex_exit(&allocq[cpuid]);
5474 - * return(failure);
5475 - * }
5476 - * }
5477 - * }
5478 - * ISR()
5479 - * {
5480 - * cpuid = the CPU group id where the slot sending the
5481 - * cmd belongs;
5482 - * mutex_enter(&releq[cpuid]);
5483 - * remove the slot from the releaseq
5484 - * mutex_exit(&releq[cpuid]);
5485 - * }
5486 - * This way, only when the queue pair doing exchange have mutex
5487 - * contentions.
5488 - *
5489 - * For mutex m_intr_mutex and m_tgt_intr_mutex, there are 2 scenarios:
5490 - *
5491 - * a)If the elements are only checked but not modified in the ISR, then
5492 - * only the places where those elements are modifed(outside of ISR)
5493 - * need to be protected by the new introduced mutex.
5494 - * For example, data A is only read/checked in ISR, then we need do
5495 - * like this:
5496 - * In ISR:
5497 - * {
5498 - * mutex_enter(&new_mutex);
5499 - * read(A);
5500 - * mutex_exit(&new_mutex);
5501 - * //the new_mutex here is either the m_tgt_intr_mutex or
5502 - * //the m_intr_mutex.
5503 - * }
5504 - * In non-ISR
5505 - * {
5506 - * mutex_enter(&m_mutex); //the stock driver already did this
5507 - * mutex_enter(&new_mutex);
5508 - * write(A);
5509 - * mutex_exit(&new_mutex);
5510 - * mutex_exit(&m_mutex); //the stock driver already did this
5511 - *
5512 - * read(A);
5513 - * // read(A) in non-ISR is not required to be protected by new
5514 - * // mutex since 'A' has already been protected by m_mutex
5515 - * // outside of the ISR
5516 - * }
5517 - *
5518 - * Those fields in mptsas_target_t/ptgt which are only read in ISR
5519 - * fall into this catergory. So they, together with the fields which
5520 - * are never read in ISR, are not necessary to be protected by
5521 - * m_tgt_intr_mutex, don't bother.
5522 - * checking of m_waitq also falls into this catergory. so all of the
5523 - * place outside of ISR where the m_waitq is modified, such as in
5524 - * mptsas_waitq_add(), mptsas_waitq_delete(), mptsas_waitq_rm(),
5525 - * m_intr_mutex should be used.
5526 - *
5527 - * b)If the elements are modified in the ISR, then each place where
5528 - * those elements are referred(outside of ISR) need to be protected
5529 - * by the new introduced mutex. Of course, if those elements only
5530 - * appear in the non-key code path, that is, they don't affect
5531 - * performance, then the m_mutex can still be used as before.
5532 - * For example, data B is modified in key code path in ISR, and data C
5533 - * is modified in non-key code path in ISR, then we can do like this:
5534 - * In ISR:
5535 - * {
5536 - * mutex_enter(&new_mutex);
5537 - * wirte(B);
5538 - * mutex_exit(&new_mutex);
5539 - * if (seldom happen) {
5540 - * mutex_enter(&m_mutex);
5541 - * write(C);
5542 - * mutex_exit(&m_mutex);
5543 - * }
5544 - * //the new_mutex here is either the m_tgt_intr_mutex or
5545 - * //the m_intr_mutex.
5546 - * }
5547 - * In non-ISR
5548 - * {
5549 - * mutex_enter(&new_mutex);
5550 - * write(B);
5551 - * mutex_exit(&new_mutex);
5552 - *
5553 - * mutex_enter(&new_mutex);
5554 - * read(B);
5555 - * mutex_exit(&new_mutex);
5556 - * // both write(B) and read(B) in non-ISR is required to be
5557 - * // protected by new mutex outside of the ISR
5558 - *
5559 - * mutex_enter(&m_mutex); //the stock driver already did this
5560 - * read(C);
5561 - * write(C);
5562 - * mutex_exit(&m_mutex); //the stock driver already did this
5563 - * // both write(C) and read(C) in non-ISR have been already
5564 - * // been protected by m_mutex outside of the ISR
5565 - * }
5566 - *
5567 - * For example, ptgt->m_t_ncmds fall into 'B' of this catergory, and
5568 - * elements shown in address reply, restart_hba, passthrough, IOC
5569 - * fall into 'C' of this catergory.
5570 - *
5571 - * In any case where mutexs are nested, make sure in the following
5572 - * order:
5573 - * m_mutex -> m_intr_mutex -> m_tgt_intr_mutex
5574 - * m_intr_mutex -> m_tgt_intr_mutex
5575 - * m_mutex -> m_intr_mutex
5576 - * m_mutex -> m_tgt_intr_mutex
5577 - *
5578 - * 2.
5579 - * Make sure at any time, getting the ReplyDescriptor by m_post_index
5580 - * and setting m_post_index to the ReplyDescriptorIndex register are
5581 - * atomic. Since m_mutex is not used for this purpose in ISR, the new
5582 - * mutex m_intr_mutex must play this role. So mptsas_poll(), where this
5583 - * kind of getting/setting is also performed, must use m_intr_mutex.
5584 - * Note, since context reply in ISR/process_intr is the only code path
5585 - * which affect performance, a fast path is introduced to only handle
5586 - * the read/write IO having context reply. For other IOs such as
5587 - * passthrough and IOC with context reply and all address reply, we
5588 - * use the as-is process_intr() to handle them. In order to keep the
5589 - * same semantics in process_intr(), make sure any new mutex is not held
5590 - * before enterring it.
5591 - */
5281 + mutex_enter(&mpt->m_mutex);
5592 5282
5593 - mutex_enter(&mpt->m_intr_mutex);
5594 -
5595 5283 /*
5596 5284 * If interrupts are shared by two channels then check whether this
5597 5285 * interrupt is genuinely for this channel by making sure first the
5598 5286 * chip is in high power state.
5599 5287 */
5600 5288 if ((mpt->m_options & MPTSAS_OPT_PM) &&
5601 5289 (mpt->m_power_level != PM_LEVEL_D0)) {
5602 - mutex_exit(&mpt->m_intr_mutex);
5290 + mutex_exit(&mpt->m_mutex);
5603 5291 return (DDI_INTR_UNCLAIMED);
5604 5292 }
5605 5293
5606 5294 /*
5607 5295 * If polling, interrupt was triggered by some shared interrupt because
5608 5296 * IOC interrupts are disabled during polling, so polling routine will
5609 5297 * handle any replies. Considering this, if polling is happening,
5610 5298 * return with interrupt unclaimed.
5611 5299 */
5612 5300 if (mpt->m_polled_intr) {
5613 - mutex_exit(&mpt->m_intr_mutex);
5301 + mutex_exit(&mpt->m_mutex);
5614 5302 mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt");
5615 5303 return (DDI_INTR_UNCLAIMED);
5616 5304 }
5617 5305
5618 5306 /*
5619 5307 * Read the istat register.
5620 5308 */
5621 5309 if ((INTPENDING(mpt)) != 0) {
5622 5310 /*
5623 5311 * read fifo until empty.
5624 5312 */
5625 5313 #ifndef __lock_lint
5626 5314 _NOTE(CONSTCOND)
5627 5315 #endif
5628 5316 while (TRUE) {
5629 5317 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5630 5318 DDI_DMA_SYNC_FORCPU);
5631 5319 reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5632 5320 MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5633 5321
5634 5322 if (ddi_get32(mpt->m_acc_post_queue_hdl,
5635 5323 &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5636 5324 ddi_get32(mpt->m_acc_post_queue_hdl,
5637 5325 &reply_desc_union->Words.High) == 0xFFFFFFFF) {
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
5638 5326 break;
5639 5327 }
5640 5328
5641 5329 /*
5642 5330 * The reply is valid, process it according to its
5643 5331 * type. Also, set a flag for updating the reply index
5644 5332 * after they've all been processed.
5645 5333 */
5646 5334 did_reply = TRUE;
5647 5335
5648 - reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
5649 - &reply_desc_union->Default.ReplyFlags);
5650 - reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
5651 - mpt->m_reply[i].Default.ReplyFlags = reply_type;
5652 - if (reply_type ==
5653 - MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
5654 - SMID = ddi_get16(mpt->m_acc_post_queue_hdl,
5655 - &reply_desc_union->SCSIIOSuccess.SMID);
5656 - if (mptsas_handle_io_fastpath(mpt, SMID) !=
5657 - TRUE) {
5658 - mpt->m_reply[i].SCSIIOSuccess.SMID =
5659 - SMID;
5660 - i++;
5661 - }
5662 - } else if (reply_type ==
5663 - MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
5664 - mpt->m_reply[i].AddressReply.ReplyFrameAddress =
5665 - ddi_get32(mpt->m_acc_post_queue_hdl,
5666 - &reply_desc_union->AddressReply.
5667 - ReplyFrameAddress);
5668 - mpt->m_reply[i].AddressReply.SMID =
5669 - ddi_get16(mpt->m_acc_post_queue_hdl,
5670 - &reply_desc_union->AddressReply.SMID);
5671 - i++;
5672 - }
5673 - /*
5674 - * Clear the reply descriptor for re-use and increment
5675 - * index.
5676 - */
5677 - ddi_put64(mpt->m_acc_post_queue_hdl,
5678 - &((uint64_t *)(void *)mpt->m_post_queue)
5679 - [mpt->m_post_index], 0xFFFFFFFFFFFFFFFF);
5680 - (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5681 - DDI_DMA_SYNC_FORDEV);
5336 + mptsas_process_intr(mpt, reply_desc_union);
5682 5337
5683 5338 /*
5684 5339 * Increment post index and roll over if needed.
5685 5340 */
5686 5341 if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5687 5342 mpt->m_post_index = 0;
5688 5343 }
5689 - if (i >= MPI_ADDRESS_COALSCE_MAX)
5690 - break;
5691 5344 }
5692 5345
5693 5346 /*
5694 5347 * Update the global reply index if at least one reply was
5695 5348 * processed.
5696 5349 */
5697 5350 if (did_reply) {
5698 5351 ddi_put32(mpt->m_datap,
5699 5352 &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5700 -
5701 - /*
5702 - * For fma, only check the PIO is required and enough
5703 - * here. Those cases where fastpath is not hit, the
5704 - * mptsas_fma_check() check all of the types of
5705 - * fma. That is not necessary and sometimes not
5706 - * correct. fma check should only be done after
5707 - * the PIO and/or dma is performed.
5708 - */
5709 - if ((mptsas_check_acc_handle(mpt->m_datap) !=
5710 - DDI_SUCCESS)) {
5711 - ddi_fm_service_impact(mpt->m_dip,
5712 - DDI_SERVICE_UNAFFECTED);
5713 - }
5714 -
5715 5353 }
5716 5354 } else {
5717 - mutex_exit(&mpt->m_intr_mutex);
5355 + mutex_exit(&mpt->m_mutex);
5718 5356 return (DDI_INTR_UNCLAIMED);
5719 5357 }
5720 5358 NDBG1(("mptsas_intr complete"));
5721 - mutex_exit(&mpt->m_intr_mutex);
5722 5359
5723 5360 /*
5724 - * Since most of the cmds(read and write IO with success return.)
5725 - * have already been processed in fast path in which the m_mutex
5726 - * is not held, handling here the address reply and other context reply
5727 - * such as passthrough and IOC cmd with m_mutex held should be a big
5728 - * issue for performance.
5729 - * If holding m_mutex to process these cmds was still an obvious issue,
5730 - * we can process them in a taskq.
5731 - */
5732 - for (j = 0; j < i; j++) {
5733 - mutex_enter(&mpt->m_mutex);
5734 - mptsas_process_intr(mpt, &mpt->m_reply[j]);
5735 - mutex_exit(&mpt->m_mutex);
5736 - }
5737 -
5738 - /*
5739 5361 * If no helper threads are created, process the doneq in ISR. If
5740 5362 * helpers are created, use the doneq length as a metric to measure the
5741 5363 * load on the interrupt CPU. If it is long enough, which indicates the
5742 5364 * load is heavy, then we deliver the IO completions to the helpers.
5743 5365 * This measurement has some limitations, although it is simple and
5744 5366 * straightforward and works well for most of the cases at present.
5745 5367 */
5746 - if (!mpt->m_doneq_thread_n) {
5368 + if (!mpt->m_doneq_thread_n ||
5369 + (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)) {
5747 5370 mptsas_doneq_empty(mpt);
5748 5371 } else {
5749 - int helper = 1;
5750 - mutex_enter(&mpt->m_intr_mutex);
5751 - if (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)
5752 - helper = 0;
5753 - mutex_exit(&mpt->m_intr_mutex);
5754 - if (helper) {
5755 - mptsas_deliver_doneq_thread(mpt);
5756 - } else {
5757 - mptsas_doneq_empty(mpt);
5758 - }
5372 + mptsas_deliver_doneq_thread(mpt);
5759 5373 }
5760 5374
5761 5375 /*
5762 5376 * If there are queued cmd, start them now.
5763 5377 */
5764 - mutex_enter(&mpt->m_intr_mutex);
5765 5378 if (mpt->m_waitq != NULL) {
5766 - mutex_exit(&mpt->m_intr_mutex);
5767 - mutex_enter(&mpt->m_mutex);
5768 - mptsas_restart_hba(mpt);
5769 - mutex_exit(&mpt->m_mutex);
5770 - return (DDI_INTR_CLAIMED);
5379 + mptsas_restart_waitq(mpt);
5771 5380 }
5772 - mutex_exit(&mpt->m_intr_mutex);
5381 +
5382 + mutex_exit(&mpt->m_mutex);
5773 5383 return (DDI_INTR_CLAIMED);
5774 5384 }
5775 5385
5776 -/*
5777 - * In ISR, the successfully completed read and write IO are processed in a
5778 - * fast path. This function is only used to handle non-fastpath IO, including
5779 - * all of the address reply, and the context reply for IOC cmd, passthrough,
5780 - * etc.
5781 - * This function is also used to process polled cmd.
5782 - */
5783 5386 static void
5784 5387 mptsas_process_intr(mptsas_t *mpt,
5785 5388 pMpi2ReplyDescriptorsUnion_t reply_desc_union)
5786 5389 {
5787 5390 uint8_t reply_type;
5788 5391
5392 + ASSERT(mutex_owned(&mpt->m_mutex));
5393 +
5789 5394 /*
5790 5395 * The reply is valid, process it according to its
5791 5396 * type. Also, set a flag for updated the reply index
5792 5397 * after they've all been processed.
5793 5398 */
5794 - reply_type = reply_desc_union->Default.ReplyFlags;
5399 + reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
5400 + &reply_desc_union->Default.ReplyFlags);
5401 + reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
5795 5402 if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
5796 5403 mptsas_handle_scsi_io_success(mpt, reply_desc_union);
5797 5404 } else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
5798 5405 mptsas_handle_address_reply(mpt, reply_desc_union);
5799 5406 } else {
5800 5407 mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
5801 5408 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5802 5409 }
5410 +
5411 + /*
5412 + * Clear the reply descriptor for re-use and increment
5413 + * index.
5414 + */
5415 + ddi_put64(mpt->m_acc_post_queue_hdl,
5416 + &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
5417 + 0xFFFFFFFFFFFFFFFF);
5418 + (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5419 + DDI_DMA_SYNC_FORDEV);
5803 5420 }
5804 5421
5805 5422 /*
5806 5423 * handle qfull condition
5807 5424 */
5808 5425 static void
5809 5426 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
5810 5427 {
5811 5428 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
5812 5429
5813 5430 if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
5814 5431 (ptgt->m_qfull_retries == 0)) {
5815 5432 /*
5816 5433 * We have exhausted the retries on QFULL, or,
5817 5434 * the target driver has indicated that it
5818 5435 * wants to handle QFULL itself by setting
5819 5436 * qfull-retries capability to 0. In either case
5820 5437 * we want the target driver's QFULL handling
5821 5438 * to kick in. We do this by having pkt_reason
5822 5439 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
5823 5440 */
5824 - mutex_enter(&ptgt->m_tgt_intr_mutex);
5825 5441 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5826 - mutex_exit(&ptgt->m_tgt_intr_mutex);
5827 5442 } else {
5828 - mutex_enter(&ptgt->m_tgt_intr_mutex);
5829 5443 if (ptgt->m_reset_delay == 0) {
5830 5444 ptgt->m_t_throttle =
5831 5445 max((ptgt->m_t_ncmds - 2), 0);
5832 5446 }
5833 - mutex_exit(&ptgt->m_tgt_intr_mutex);
5834 5447
5835 5448 cmd->cmd_pkt_flags |= FLAG_HEAD;
5836 5449 cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
5837 5450 cmd->cmd_flags |= CFLAG_RETRY;
5838 5451
5839 - mutex_exit(&mpt->m_mutex);
5840 5452 (void) mptsas_accept_pkt(mpt, cmd);
5841 - mutex_enter(&mpt->m_mutex);
5842 5453
5843 5454 /*
5844 5455 * when target gives queue full status with no commands
5845 5456 * outstanding (m_t_ncmds == 0), throttle is set to 0
5846 5457 * (HOLD_THROTTLE), and the queue full handling start
5847 5458 * (see psarc/1994/313); if there are commands outstanding,
5848 5459 * throttle is set to (m_t_ncmds - 2)
5849 5460 */
5850 - mutex_enter(&ptgt->m_tgt_intr_mutex);
5851 5461 if (ptgt->m_t_throttle == HOLD_THROTTLE) {
5852 5462 /*
5853 5463 * By setting throttle to QFULL_THROTTLE, we
5854 5464 * avoid submitting new commands and in
5855 5465 * mptsas_restart_cmd find out slots which need
5856 5466 * their throttles to be cleared.
5857 5467 */
5858 5468 mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
5859 5469 if (mpt->m_restart_cmd_timeid == 0) {
5860 5470 mpt->m_restart_cmd_timeid =
5861 5471 timeout(mptsas_restart_cmd, mpt,
5862 5472 ptgt->m_qfull_retry_interval);
5863 5473 }
5864 5474 }
5865 - mutex_exit(&ptgt->m_tgt_intr_mutex);
5866 5475 }
5867 5476 }
5868 5477
5869 5478 mptsas_phymask_t
5870 5479 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
5871 5480 {
5872 5481 mptsas_phymask_t phy_mask = 0;
5873 5482 uint8_t i = 0;
5874 5483
5875 5484 NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
5876 5485
5877 5486 ASSERT(mutex_owned(&mpt->m_mutex));
5878 5487
5879 5488 /*
5880 5489 * If physport is 0xFF, this is a RAID volume. Use phymask of 0.
5881 5490 */
5882 5491 if (physport == 0xFF) {
5883 5492 return (0);
5884 5493 }
5885 5494
5886 5495 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
5887 5496 if (mpt->m_phy_info[i].attached_devhdl &&
5888 5497 (mpt->m_phy_info[i].phy_mask != 0) &&
5889 5498 (mpt->m_phy_info[i].port_num == physport)) {
5890 5499 phy_mask = mpt->m_phy_info[i].phy_mask;
5891 5500 break;
5892 5501 }
5893 5502 }
5894 5503 NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
5895 5504 mpt->m_instance, physport, phy_mask));
5896 5505 return (phy_mask);
5897 5506 }
5898 5507
5899 5508 /*
5900 5509 * mpt free device handle after device gone, by use of passthrough
5901 5510 */
5902 5511 static int
5903 5512 mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl)
5904 5513 {
5905 5514 Mpi2SasIoUnitControlRequest_t req;
5906 5515 Mpi2SasIoUnitControlReply_t rep;
5907 5516 int ret;
5908 5517
5909 5518 ASSERT(mutex_owned(&mpt->m_mutex));
5910 5519
5911 5520 /*
5912 5521 * Need to compose a SAS IO Unit Control request message
5913 5522 * and call mptsas_do_passthru() function
5914 5523 */
5915 5524 bzero(&req, sizeof (req));
5916 5525 bzero(&rep, sizeof (rep));
5917 5526
5918 5527 req.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
5919 5528 req.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
5920 5529 req.DevHandle = LE_16(devhdl);
5921 5530
5922 5531 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
5923 5532 sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
5924 5533 if (ret != 0) {
5925 5534 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5926 5535 "Control error %d", ret);
5927 5536 return (DDI_FAILURE);
5928 5537 }
5929 5538
5930 5539 /* do passthrough success, check the ioc status */
5931 5540 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
5932 5541 cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
5933 5542 "Control IOCStatus %d", LE_16(rep.IOCStatus));
5934 5543 return (DDI_FAILURE);
5935 5544 }
5936 5545
5937 5546 return (DDI_SUCCESS);
5938 5547 }
5939 5548
5940 5549 static void
5941 5550 mptsas_update_phymask(mptsas_t *mpt)
5942 5551 {
5943 5552 mptsas_phymask_t mask = 0, phy_mask;
5944 5553 char *phy_mask_name;
5945 5554 uint8_t current_port;
5946 5555 int i, j;
5947 5556
5948 5557 NDBG20(("mptsas%d update phymask ", mpt->m_instance));
5949 5558
5950 5559 ASSERT(mutex_owned(&mpt->m_mutex));
5951 5560
5952 5561 (void) mptsas_get_sas_io_unit_page(mpt);
5953 5562
5954 5563 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
5955 5564
5956 5565 for (i = 0; i < mpt->m_num_phys; i++) {
5957 5566 phy_mask = 0x00;
5958 5567
5959 5568 if (mpt->m_phy_info[i].attached_devhdl == 0)
5960 5569 continue;
5961 5570
5962 5571 bzero(phy_mask_name, sizeof (phy_mask_name));
5963 5572
5964 5573 current_port = mpt->m_phy_info[i].port_num;
5965 5574
5966 5575 if ((mask & (1 << i)) != 0)
5967 5576 continue;
5968 5577
5969 5578 for (j = 0; j < mpt->m_num_phys; j++) {
5970 5579 if (mpt->m_phy_info[j].attached_devhdl &&
5971 5580 (mpt->m_phy_info[j].port_num == current_port)) {
5972 5581 phy_mask |= (1 << j);
5973 5582 }
5974 5583 }
5975 5584 mask = mask | phy_mask;
5976 5585
5977 5586 for (j = 0; j < mpt->m_num_phys; j++) {
5978 5587 if ((phy_mask >> j) & 0x01) {
5979 5588 mpt->m_phy_info[j].phy_mask = phy_mask;
5980 5589 }
5981 5590 }
5982 5591
5983 5592 (void) sprintf(phy_mask_name, "%x", phy_mask);
5984 5593
5985 5594 mutex_exit(&mpt->m_mutex);
5986 5595 /*
5987 5596 * register a iport, if the port has already been existed
5988 5597 * SCSA will do nothing and just return.
5989 5598 */
5990 5599 (void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
5991 5600 mutex_enter(&mpt->m_mutex);
5992 5601 }
5993 5602 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
5994 5603 NDBG20(("mptsas%d update phymask return", mpt->m_instance));
5995 5604 }
5996 5605
5997 5606 /*
5998 5607 * mptsas_handle_dr is a task handler for DR, the DR action includes:
5999 5608 * 1. Directly attched Device Added/Removed.
6000 5609 * 2. Expander Device Added/Removed.
6001 5610 * 3. Indirectly Attached Device Added/Expander.
6002 5611 * 4. LUNs of a existing device status change.
6003 5612 * 5. RAID volume created/deleted.
6004 5613 * 6. Member of RAID volume is released because of RAID deletion.
6005 5614 * 7. Physical disks are removed because of RAID creation.
6006 5615 */
6007 5616 static void
6008 5617 mptsas_handle_dr(void *args) {
6009 5618 mptsas_topo_change_list_t *topo_node = NULL;
6010 5619 mptsas_topo_change_list_t *save_node = NULL;
6011 5620 mptsas_t *mpt;
6012 5621 dev_info_t *parent = NULL;
6013 5622 mptsas_phymask_t phymask = 0;
6014 5623 char *phy_mask_name;
6015 5624 uint8_t flags = 0, physport = 0xff;
6016 5625 uint8_t port_update = 0;
6017 5626 uint_t event;
6018 5627
6019 5628 topo_node = (mptsas_topo_change_list_t *)args;
6020 5629
6021 5630 mpt = topo_node->mpt;
6022 5631 event = topo_node->event;
6023 5632 flags = topo_node->flags;
6024 5633
6025 5634 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6026 5635
6027 5636 NDBG20(("mptsas%d handle_dr enter", mpt->m_instance));
6028 5637
6029 5638 switch (event) {
6030 5639 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6031 5640 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6032 5641 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE) ||
6033 5642 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6034 5643 /*
6035 5644 * Direct attached or expander attached device added
6036 5645 * into system or a Phys Disk that is being unhidden.
6037 5646 */
6038 5647 port_update = 1;
6039 5648 }
6040 5649 break;
6041 5650 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6042 5651 /*
6043 5652 * New expander added into system, it must be the head
6044 5653 * of topo_change_list_t
6045 5654 */
6046 5655 port_update = 1;
6047 5656 break;
6048 5657 default:
6049 5658 port_update = 0;
6050 5659 break;
6051 5660 }
6052 5661 /*
6053 5662 * All cases port_update == 1 may cause initiator port form change
6054 5663 */
6055 5664 mutex_enter(&mpt->m_mutex);
6056 5665 if (mpt->m_port_chng && port_update) {
6057 5666 /*
6058 5667 * mpt->m_port_chng flag indicates some PHYs of initiator
6059 5668 * port have changed to online. So when expander added or
6060 5669 * directly attached device online event come, we force to
6061 5670 * update port information by issueing SAS IO Unit Page and
6062 5671 * update PHYMASKs.
6063 5672 */
6064 5673 (void) mptsas_update_phymask(mpt);
6065 5674 mpt->m_port_chng = 0;
6066 5675
6067 5676 }
6068 5677 mutex_exit(&mpt->m_mutex);
6069 5678 while (topo_node) {
6070 5679 phymask = 0;
6071 5680 if (parent == NULL) {
6072 5681 physport = topo_node->un.physport;
6073 5682 event = topo_node->event;
6074 5683 flags = topo_node->flags;
6075 5684 if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET |
6076 5685 MPTSAS_DR_EVENT_OFFLINE_SMP)) {
6077 5686 /*
6078 5687 * For all offline events, phymask is known
6079 5688 */
6080 5689 phymask = topo_node->un.phymask;
6081 5690 goto find_parent;
6082 5691 }
6083 5692 if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6084 5693 goto handle_topo_change;
6085 5694 }
6086 5695 if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) {
6087 5696 phymask = topo_node->un.phymask;
6088 5697 goto find_parent;
6089 5698 }
6090 5699
6091 5700 if ((flags ==
6092 5701 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) &&
6093 5702 (event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) {
6094 5703 /*
6095 5704 * There is no any field in IR_CONFIG_CHANGE
6096 5705 * event indicate physport/phynum, let's get
6097 5706 * parent after SAS Device Page0 request.
6098 5707 */
6099 5708 goto handle_topo_change;
6100 5709 }
6101 5710
6102 5711 mutex_enter(&mpt->m_mutex);
6103 5712 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6104 5713 /*
6105 5714 * If the direct attached device added or a
6106 5715 * phys disk is being unhidden, argument
6107 5716 * physport actually is PHY#, so we have to get
6108 5717 * phymask according PHY#.
6109 5718 */
6110 5719 physport = mpt->m_phy_info[physport].port_num;
6111 5720 }
6112 5721
6113 5722 /*
6114 5723 * Translate physport to phymask so that we can search
6115 5724 * parent dip.
6116 5725 */
6117 5726 phymask = mptsas_physport_to_phymask(mpt,
6118 5727 physport);
6119 5728 mutex_exit(&mpt->m_mutex);
6120 5729
6121 5730 find_parent:
6122 5731 bzero(phy_mask_name, MPTSAS_MAX_PHYS);
6123 5732 /*
6124 5733 * For RAID topology change node, write the iport name
6125 5734 * as v0.
6126 5735 */
6127 5736 if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6128 5737 (void) sprintf(phy_mask_name, "v0");
6129 5738 } else {
6130 5739 /*
6131 5740 * phymask can bo 0 if the drive has been
6132 5741 * pulled by the time an add event is
6133 5742 * processed. If phymask is 0, just skip this
6134 5743 * event and continue.
6135 5744 */
6136 5745 if (phymask == 0) {
6137 5746 mutex_enter(&mpt->m_mutex);
6138 5747 save_node = topo_node;
6139 5748 topo_node = topo_node->next;
6140 5749 ASSERT(save_node);
6141 5750 kmem_free(save_node,
6142 5751 sizeof (mptsas_topo_change_list_t));
6143 5752 mutex_exit(&mpt->m_mutex);
6144 5753
6145 5754 parent = NULL;
6146 5755 continue;
6147 5756 }
6148 5757 (void) sprintf(phy_mask_name, "%x", phymask);
6149 5758 }
6150 5759 parent = scsi_hba_iport_find(mpt->m_dip,
6151 5760 phy_mask_name);
6152 5761 if (parent == NULL) {
↓ open down ↓ |
277 lines elided |
↑ open up ↑ |
6153 5762 mptsas_log(mpt, CE_WARN, "Failed to find an "
6154 5763 "iport, should not happen!");
6155 5764 goto out;
6156 5765 }
6157 5766
6158 5767 }
6159 5768 ASSERT(parent);
6160 5769 handle_topo_change:
6161 5770
6162 5771 mutex_enter(&mpt->m_mutex);
6163 -
6164 - mptsas_handle_topo_change(topo_node, parent);
5772 + /*
5773 + * If HBA is being reset, don't perform operations depending
5774 + * on the IOC. We must free the topo list, however.
5775 + */
5776 + if (!mpt->m_in_reset)
5777 + mptsas_handle_topo_change(topo_node, parent);
5778 + else
5779 + NDBG20(("skipping topo change received during reset"));
6165 5780 save_node = topo_node;
6166 5781 topo_node = topo_node->next;
6167 5782 ASSERT(save_node);
6168 5783 kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
6169 5784 mutex_exit(&mpt->m_mutex);
6170 5785
6171 5786 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6172 5787 (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
6173 5788 (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
6174 5789 /*
6175 5790 * If direct attached device associated, make sure
6176 5791 * reset the parent before start the next one. But
6177 5792 * all devices associated with expander shares the
6178 5793 * parent. Also, reset parent if this is for RAID.
6179 5794 */
6180 5795 parent = NULL;
6181 5796 }
6182 5797 }
6183 5798 out:
6184 5799 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6185 5800 }
6186 5801
6187 5802 static void
6188 5803 mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
6189 5804 dev_info_t *parent)
6190 5805 {
6191 5806 mptsas_target_t *ptgt = NULL;
6192 5807 mptsas_smp_t *psmp = NULL;
6193 5808 mptsas_t *mpt = (void *)topo_node->mpt;
6194 5809 uint16_t devhdl;
6195 5810 uint16_t attached_devhdl;
6196 5811 uint64_t sas_wwn = 0;
6197 5812 int rval = 0;
6198 5813 uint32_t page_address;
6199 5814 uint8_t phy, flags;
6200 5815 char *addr = NULL;
6201 5816 dev_info_t *lundip;
6202 5817 int circ = 0, circ1 = 0;
6203 5818 char attached_wwnstr[MPTSAS_WWN_STRLEN];
6204 5819
6205 5820 NDBG20(("mptsas%d handle_topo_change enter", mpt->m_instance));
6206 5821
6207 5822 ASSERT(mutex_owned(&mpt->m_mutex));
6208 5823
6209 5824 switch (topo_node->event) {
6210 5825 case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6211 5826 {
6212 5827 char *phy_mask_name;
6213 5828 mptsas_phymask_t phymask = 0;
6214 5829
6215 5830 if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6216 5831 /*
6217 5832 * Get latest RAID info.
6218 5833 */
6219 5834 (void) mptsas_get_raid_info(mpt);
6220 5835 ptgt = mptsas_search_by_devhdl(
6221 5836 &mpt->m_active->m_tgttbl, topo_node->devhdl);
6222 5837 if (ptgt == NULL)
6223 5838 break;
6224 5839 } else {
6225 5840 ptgt = (void *)topo_node->object;
6226 5841 }
6227 5842
6228 5843 if (ptgt == NULL) {
6229 5844 /*
6230 5845 * If a Phys Disk was deleted, RAID info needs to be
6231 5846 * updated to reflect the new topology.
6232 5847 */
6233 5848 (void) mptsas_get_raid_info(mpt);
6234 5849
6235 5850 /*
6236 5851 * Get sas device page 0 by DevHandle to make sure if
6237 5852 * SSP/SATA end device exist.
6238 5853 */
6239 5854 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
6240 5855 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
6241 5856 topo_node->devhdl;
6242 5857
6243 5858 rval = mptsas_get_target_device_info(mpt, page_address,
6244 5859 &devhdl, &ptgt);
6245 5860 if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
6246 5861 mptsas_log(mpt, CE_NOTE,
6247 5862 "mptsas_handle_topo_change: target %d is "
6248 5863 "not a SAS/SATA device. \n",
6249 5864 topo_node->devhdl);
6250 5865 } else if (rval == DEV_INFO_FAIL_ALLOC) {
6251 5866 mptsas_log(mpt, CE_NOTE,
6252 5867 "mptsas_handle_topo_change: could not "
6253 5868 "allocate memory. \n");
6254 5869 }
6255 5870 /*
6256 5871 * If rval is DEV_INFO_PHYS_DISK than there is nothing
6257 5872 * else to do, just leave.
6258 5873 */
6259 5874 if (rval != DEV_INFO_SUCCESS) {
6260 5875 return;
6261 5876 }
6262 5877 }
6263 5878
6264 5879 ASSERT(ptgt->m_devhdl == topo_node->devhdl);
6265 5880
6266 5881 mutex_exit(&mpt->m_mutex);
6267 5882 flags = topo_node->flags;
6268 5883
6269 5884 if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
6270 5885 phymask = ptgt->m_phymask;
6271 5886 phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6272 5887 (void) sprintf(phy_mask_name, "%x", phymask);
6273 5888 parent = scsi_hba_iport_find(mpt->m_dip,
6274 5889 phy_mask_name);
6275 5890 kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6276 5891 if (parent == NULL) {
6277 5892 mptsas_log(mpt, CE_WARN, "Failed to find a "
6278 5893 "iport for PD, should not happen!");
6279 5894 mutex_enter(&mpt->m_mutex);
6280 5895 break;
6281 5896 }
6282 5897 }
6283 5898
6284 5899 if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6285 5900 ndi_devi_enter(parent, &circ1);
6286 5901 (void) mptsas_config_raid(parent, topo_node->devhdl,
6287 5902 &lundip);
6288 5903 ndi_devi_exit(parent, circ1);
6289 5904 } else {
6290 5905 /*
6291 5906 * hold nexus for bus configure
6292 5907 */
6293 5908 ndi_devi_enter(scsi_vhci_dip, &circ);
6294 5909 ndi_devi_enter(parent, &circ1);
6295 5910 rval = mptsas_config_target(parent, ptgt);
6296 5911 /*
6297 5912 * release nexus for bus configure
6298 5913 */
6299 5914 ndi_devi_exit(parent, circ1);
6300 5915 ndi_devi_exit(scsi_vhci_dip, circ);
6301 5916
6302 5917 /*
6303 5918 * Add parent's props for SMHBA support
6304 5919 */
6305 5920 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6306 5921 bzero(attached_wwnstr,
6307 5922 sizeof (attached_wwnstr));
6308 5923 (void) sprintf(attached_wwnstr, "w%016"PRIx64,
6309 5924 ptgt->m_sas_wwn);
6310 5925 if (ddi_prop_update_string(DDI_DEV_T_NONE,
6311 5926 parent,
6312 5927 SCSI_ADDR_PROP_ATTACHED_PORT,
6313 5928 attached_wwnstr)
6314 5929 != DDI_PROP_SUCCESS) {
6315 5930 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6316 5931 parent,
6317 5932 SCSI_ADDR_PROP_ATTACHED_PORT);
6318 5933 mptsas_log(mpt, CE_WARN, "Failed to"
6319 5934 "attached-port props");
6320 5935 return;
6321 5936 }
6322 5937 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6323 5938 MPTSAS_NUM_PHYS, 1) !=
6324 5939 DDI_PROP_SUCCESS) {
6325 5940 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6326 5941 parent, MPTSAS_NUM_PHYS);
6327 5942 mptsas_log(mpt, CE_WARN, "Failed to"
6328 5943 " create num-phys props");
6329 5944 return;
6330 5945 }
6331 5946
6332 5947 /*
6333 5948 * Update PHY info for smhba
6334 5949 */
6335 5950 mutex_enter(&mpt->m_mutex);
6336 5951 if (mptsas_smhba_phy_init(mpt)) {
6337 5952 mutex_exit(&mpt->m_mutex);
6338 5953 mptsas_log(mpt, CE_WARN, "mptsas phy"
6339 5954 " update failed");
6340 5955 return;
6341 5956 }
6342 5957 mutex_exit(&mpt->m_mutex);
6343 5958 mptsas_smhba_set_phy_props(mpt,
6344 5959 ddi_get_name_addr(parent), parent,
6345 5960 1, &attached_devhdl);
6346 5961 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6347 5962 MPTSAS_VIRTUAL_PORT, 0) !=
6348 5963 DDI_PROP_SUCCESS) {
6349 5964 (void) ddi_prop_remove(DDI_DEV_T_NONE,
6350 5965 parent, MPTSAS_VIRTUAL_PORT);
6351 5966 mptsas_log(mpt, CE_WARN,
6352 5967 "mptsas virtual-port"
6353 5968 "port prop update failed");
6354 5969 return;
6355 5970 }
6356 5971 }
6357 5972 }
6358 5973 mutex_enter(&mpt->m_mutex);
6359 5974
6360 5975 NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
6361 5976 "phymask:%x.", mpt->m_instance, ptgt->m_devhdl,
6362 5977 ptgt->m_phymask));
6363 5978 break;
6364 5979 }
6365 5980 case MPTSAS_DR_EVENT_OFFLINE_TARGET:
6366 5981 {
6367 5982 mptsas_hash_table_t *tgttbl = &mpt->m_active->m_tgttbl;
6368 5983 devhdl = topo_node->devhdl;
6369 5984 ptgt = mptsas_search_by_devhdl(tgttbl, devhdl);
6370 5985 if (ptgt == NULL)
6371 5986 break;
6372 5987
6373 5988 sas_wwn = ptgt->m_sas_wwn;
6374 5989 phy = ptgt->m_phynum;
6375 5990
6376 5991 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
6377 5992
6378 5993 if (sas_wwn) {
6379 5994 (void) sprintf(addr, "w%016"PRIx64, sas_wwn);
6380 5995 } else {
6381 5996 (void) sprintf(addr, "p%x", phy);
6382 5997 }
6383 5998 ASSERT(ptgt->m_devhdl == devhdl);
6384 5999
6385 6000 if ((topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) ||
6386 6001 (topo_node->flags ==
6387 6002 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6388 6003 /*
6389 6004 * Get latest RAID info if RAID volume status changes
6390 6005 * or Phys Disk status changes
6391 6006 */
6392 6007 (void) mptsas_get_raid_info(mpt);
6393 6008 }
6394 6009 /*
6395 6010 * Abort all outstanding command on the device
6396 6011 */
6397 6012 rval = mptsas_do_scsi_reset(mpt, devhdl);
6398 6013 if (rval) {
6399 6014 NDBG20(("mptsas%d handle_topo_change to reset target "
6400 6015 "before offline devhdl:%x, phymask:%x, rval:%x",
6401 6016 mpt->m_instance, ptgt->m_devhdl, ptgt->m_phymask,
6402 6017 rval));
6403 6018 }
6404 6019
6405 6020 mutex_exit(&mpt->m_mutex);
6406 6021
6407 6022 ndi_devi_enter(scsi_vhci_dip, &circ);
6408 6023 ndi_devi_enter(parent, &circ1);
6409 6024 rval = mptsas_offline_target(parent, addr);
6410 6025 ndi_devi_exit(parent, circ1);
6411 6026 ndi_devi_exit(scsi_vhci_dip, circ);
6412 6027 NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
6413 6028 "phymask:%x, rval:%x", mpt->m_instance,
6414 6029 ptgt->m_devhdl, ptgt->m_phymask, rval));
6415 6030
6416 6031 kmem_free(addr, SCSI_MAXNAMELEN);
6417 6032
6418 6033 /*
6419 6034 * Clear parent's props for SMHBA support
6420 6035 */
6421 6036 flags = topo_node->flags;
6422 6037 if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6423 6038 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6424 6039 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6425 6040 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6426 6041 DDI_PROP_SUCCESS) {
6427 6042 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6428 6043 SCSI_ADDR_PROP_ATTACHED_PORT);
6429 6044 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6430 6045 "prop update failed");
6431 6046 break;
6432 6047 }
6433 6048 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6434 6049 MPTSAS_NUM_PHYS, 0) !=
6435 6050 DDI_PROP_SUCCESS) {
6436 6051 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6437 6052 MPTSAS_NUM_PHYS);
6438 6053 mptsas_log(mpt, CE_WARN, "mptsas num phys "
6439 6054 "prop update failed");
6440 6055 break;
6441 6056 }
6442 6057 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6443 6058 MPTSAS_VIRTUAL_PORT, 1) !=
↓ open down ↓ |
269 lines elided |
↑ open up ↑ |
6444 6059 DDI_PROP_SUCCESS) {
6445 6060 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6446 6061 MPTSAS_VIRTUAL_PORT);
6447 6062 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6448 6063 "prop update failed");
6449 6064 break;
6450 6065 }
6451 6066 }
6452 6067
6453 6068 mutex_enter(&mpt->m_mutex);
6454 - if (mptsas_set_led_status(mpt, ptgt, 0) != DDI_SUCCESS) {
6455 - NDBG14(("mptsas: clear LED for tgt %x failed",
6456 - ptgt->m_slot_num));
6457 - }
6458 6069 if (rval == DDI_SUCCESS) {
6459 6070 mptsas_tgt_free(&mpt->m_active->m_tgttbl,
6460 6071 ptgt->m_sas_wwn, ptgt->m_phymask);
6461 6072 ptgt = NULL;
6462 6073 } else {
6463 6074 /*
6464 6075 * clean DR_INTRANSITION flag to allow I/O down to
6465 6076 * PHCI driver since failover finished.
6466 6077 * Invalidate the devhdl
6467 6078 */
6468 - mutex_enter(&ptgt->m_tgt_intr_mutex);
6469 6079 ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6470 6080 ptgt->m_tgt_unconfigured = 0;
6081 + mutex_enter(&mpt->m_tx_waitq_mutex);
6471 6082 ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6472 - mutex_exit(&ptgt->m_tgt_intr_mutex);
6083 + mutex_exit(&mpt->m_tx_waitq_mutex);
6473 6084 }
6474 6085
6475 6086 /*
6476 6087 * Send SAS IO Unit Control to free the dev handle
6477 6088 */
6478 6089 if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6479 6090 (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
6480 6091 rval = mptsas_free_devhdl(mpt, devhdl);
6481 6092
6482 6093 NDBG20(("mptsas%d handle_topo_change to remove "
6483 6094 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6484 6095 rval));
6485 6096 }
6486 6097
6487 6098 break;
6488 6099 }
6489 6100 case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
6490 6101 {
6491 6102 devhdl = topo_node->devhdl;
6492 6103 /*
6493 6104 * If this is the remove handle event, do a reset first.
6494 6105 */
6495 6106 if (topo_node->event == MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6496 6107 rval = mptsas_do_scsi_reset(mpt, devhdl);
6497 6108 if (rval) {
6498 6109 NDBG20(("mpt%d reset target before remove "
6499 6110 "devhdl:%x, rval:%x", mpt->m_instance,
6500 6111 devhdl, rval));
6501 6112 }
6502 6113 }
6503 6114
6504 6115 /*
6505 6116 * Send SAS IO Unit Control to free the dev handle
6506 6117 */
6507 6118 rval = mptsas_free_devhdl(mpt, devhdl);
6508 6119 NDBG20(("mptsas%d handle_topo_change to remove "
6509 6120 "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6510 6121 rval));
6511 6122 break;
6512 6123 }
6513 6124 case MPTSAS_DR_EVENT_RECONFIG_SMP:
6514 6125 {
6515 6126 mptsas_smp_t smp;
6516 6127 dev_info_t *smpdip;
6517 6128 mptsas_hash_table_t *smptbl = &mpt->m_active->m_smptbl;
6518 6129
6519 6130 devhdl = topo_node->devhdl;
6520 6131
6521 6132 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
6522 6133 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)devhdl;
6523 6134 rval = mptsas_get_sas_expander_page0(mpt, page_address, &smp);
6524 6135 if (rval != DDI_SUCCESS) {
6525 6136 mptsas_log(mpt, CE_WARN, "failed to online smp, "
6526 6137 "handle %x", devhdl);
6527 6138 return;
6528 6139 }
6529 6140
6530 6141 psmp = mptsas_smp_alloc(smptbl, &smp);
6531 6142 if (psmp == NULL) {
6532 6143 return;
6533 6144 }
6534 6145
6535 6146 mutex_exit(&mpt->m_mutex);
6536 6147 ndi_devi_enter(parent, &circ1);
6537 6148 (void) mptsas_online_smp(parent, psmp, &smpdip);
6538 6149 ndi_devi_exit(parent, circ1);
6539 6150
6540 6151 mutex_enter(&mpt->m_mutex);
6541 6152 break;
6542 6153 }
6543 6154 case MPTSAS_DR_EVENT_OFFLINE_SMP:
6544 6155 {
6545 6156 mptsas_hash_table_t *smptbl = &mpt->m_active->m_smptbl;
6546 6157 devhdl = topo_node->devhdl;
6547 6158 uint32_t dev_info;
6548 6159
6549 6160 psmp = mptsas_search_by_devhdl(smptbl, devhdl);
6550 6161 if (psmp == NULL)
6551 6162 break;
6552 6163 /*
6553 6164 * The mptsas_smp_t data is released only if the dip is offlined
6554 6165 * successfully.
6555 6166 */
6556 6167 mutex_exit(&mpt->m_mutex);
6557 6168
6558 6169 ndi_devi_enter(parent, &circ1);
6559 6170 rval = mptsas_offline_smp(parent, psmp, NDI_DEVI_REMOVE);
6560 6171 ndi_devi_exit(parent, circ1);
6561 6172
6562 6173 dev_info = psmp->m_deviceinfo;
6563 6174 if ((dev_info & DEVINFO_DIRECT_ATTACHED) ==
6564 6175 DEVINFO_DIRECT_ATTACHED) {
6565 6176 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6566 6177 MPTSAS_VIRTUAL_PORT, 1) !=
6567 6178 DDI_PROP_SUCCESS) {
6568 6179 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6569 6180 MPTSAS_VIRTUAL_PORT);
6570 6181 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6571 6182 "prop update failed");
6572 6183 return;
6573 6184 }
6574 6185 /*
6575 6186 * Check whether the smp connected to the iport,
6576 6187 */
6577 6188 if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6578 6189 MPTSAS_NUM_PHYS, 0) !=
6579 6190 DDI_PROP_SUCCESS) {
6580 6191 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6581 6192 MPTSAS_NUM_PHYS);
6582 6193 mptsas_log(mpt, CE_WARN, "mptsas num phys"
6583 6194 "prop update failed");
6584 6195 return;
6585 6196 }
6586 6197 /*
6587 6198 * Clear parent's attached-port props
6588 6199 */
6589 6200 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6590 6201 if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6591 6202 SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6592 6203 DDI_PROP_SUCCESS) {
6593 6204 (void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6594 6205 SCSI_ADDR_PROP_ATTACHED_PORT);
6595 6206 mptsas_log(mpt, CE_WARN, "mptsas attached port "
6596 6207 "prop update failed");
6597 6208 return;
6598 6209 }
6599 6210 }
6600 6211
6601 6212 mutex_enter(&mpt->m_mutex);
6602 6213 NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
6603 6214 "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
6604 6215 if (rval == DDI_SUCCESS) {
6605 6216 mptsas_smp_free(smptbl, psmp->m_sasaddr,
6606 6217 psmp->m_phymask);
6607 6218 } else {
6608 6219 psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
6609 6220 }
6610 6221
6611 6222 bzero(attached_wwnstr, sizeof (attached_wwnstr));
6612 6223
6613 6224 break;
6614 6225 }
6615 6226 default:
6616 6227 return;
6617 6228 }
6618 6229 }
6619 6230
6620 6231 /*
6621 6232 * Record the event if its type is enabled in mpt instance by ioctl.
6622 6233 */
6623 6234 static void
6624 6235 mptsas_record_event(void *args)
6625 6236 {
6626 6237 m_replyh_arg_t *replyh_arg;
6627 6238 pMpi2EventNotificationReply_t eventreply;
6628 6239 uint32_t event, rfm;
6629 6240 mptsas_t *mpt;
6630 6241 int i, j;
6631 6242 uint16_t event_data_len;
6632 6243 boolean_t sendAEN = FALSE;
6633 6244
6634 6245 replyh_arg = (m_replyh_arg_t *)args;
6635 6246 rfm = replyh_arg->rfm;
6636 6247 mpt = replyh_arg->mpt;
6637 6248
6638 6249 eventreply = (pMpi2EventNotificationReply_t)
6639 6250 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6640 6251 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6641 6252
6642 6253
6643 6254 /*
6644 6255 * Generate a system event to let anyone who cares know that a
6645 6256 * LOG_ENTRY_ADDED event has occurred. This is sent no matter what the
6646 6257 * event mask is set to.
6647 6258 */
6648 6259 if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
6649 6260 sendAEN = TRUE;
6650 6261 }
6651 6262
6652 6263 /*
6653 6264 * Record the event only if it is not masked. Determine which dword
6654 6265 * and bit of event mask to test.
6655 6266 */
6656 6267 i = (uint8_t)(event / 32);
6657 6268 j = (uint8_t)(event % 32);
6658 6269 if ((i < 4) && ((1 << j) & mpt->m_event_mask[i])) {
6659 6270 i = mpt->m_event_index;
6660 6271 mpt->m_events[i].Type = event;
6661 6272 mpt->m_events[i].Number = ++mpt->m_event_number;
6662 6273 bzero(mpt->m_events[i].Data, MPTSAS_MAX_EVENT_DATA_LENGTH * 4);
6663 6274 event_data_len = ddi_get16(mpt->m_acc_reply_frame_hdl,
6664 6275 &eventreply->EventDataLength);
6665 6276
6666 6277 if (event_data_len > 0) {
6667 6278 /*
6668 6279 * Limit data to size in m_event entry
6669 6280 */
6670 6281 if (event_data_len > MPTSAS_MAX_EVENT_DATA_LENGTH) {
6671 6282 event_data_len = MPTSAS_MAX_EVENT_DATA_LENGTH;
6672 6283 }
6673 6284 for (j = 0; j < event_data_len; j++) {
6674 6285 mpt->m_events[i].Data[j] =
6675 6286 ddi_get32(mpt->m_acc_reply_frame_hdl,
6676 6287 &(eventreply->EventData[j]));
6677 6288 }
6678 6289
6679 6290 /*
6680 6291 * check for index wrap-around
6681 6292 */
6682 6293 if (++i == MPTSAS_EVENT_QUEUE_SIZE) {
6683 6294 i = 0;
6684 6295 }
6685 6296 mpt->m_event_index = (uint8_t)i;
6686 6297
6687 6298 /*
6688 6299 * Set flag to send the event.
6689 6300 */
6690 6301 sendAEN = TRUE;
6691 6302 }
6692 6303 }
6693 6304
6694 6305 /*
6695 6306 * Generate a system event if flag is set to let anyone who cares know
6696 6307 * that an event has occurred.
6697 6308 */
6698 6309 if (sendAEN) {
6699 6310 (void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
6700 6311 "SAS", NULL, NULL, DDI_NOSLEEP);
6701 6312 }
6702 6313 }
6703 6314
6704 6315 #define SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS
6705 6316 /*
6706 6317 * handle sync events from ioc in interrupt
6707 6318 * return value:
6708 6319 * DDI_SUCCESS: The event is handled by this func
6709 6320 * DDI_FAILURE: Event is not handled
6710 6321 */
6711 6322 static int
6712 6323 mptsas_handle_event_sync(void *args)
6713 6324 {
6714 6325 m_replyh_arg_t *replyh_arg;
6715 6326 pMpi2EventNotificationReply_t eventreply;
6716 6327 uint32_t event, rfm;
6717 6328 mptsas_t *mpt;
6718 6329 uint_t iocstatus;
6719 6330
6720 6331 replyh_arg = (m_replyh_arg_t *)args;
6721 6332 rfm = replyh_arg->rfm;
6722 6333 mpt = replyh_arg->mpt;
6723 6334
6724 6335 ASSERT(mutex_owned(&mpt->m_mutex));
6725 6336
6726 6337 eventreply = (pMpi2EventNotificationReply_t)
6727 6338 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
6728 6339 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
6729 6340
6730 6341 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
6731 6342 &eventreply->IOCStatus)) {
6732 6343 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
6733 6344 mptsas_log(mpt, CE_WARN,
6734 6345 "!mptsas_handle_event_sync: IOCStatus=0x%x, "
6735 6346 "IOCLogInfo=0x%x", iocstatus,
6736 6347 ddi_get32(mpt->m_acc_reply_frame_hdl,
6737 6348 &eventreply->IOCLogInfo));
6738 6349 } else {
6739 6350 mptsas_log(mpt, CE_WARN,
6740 6351 "mptsas_handle_event_sync: IOCStatus=0x%x, "
6741 6352 "IOCLogInfo=0x%x", iocstatus,
6742 6353 ddi_get32(mpt->m_acc_reply_frame_hdl,
6743 6354 &eventreply->IOCLogInfo));
6744 6355 }
6745 6356 }
6746 6357
6747 6358 /*
6748 6359 * figure out what kind of event we got and handle accordingly
6749 6360 */
6750 6361 switch (event) {
6751 6362 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
6752 6363 {
6753 6364 pMpi2EventDataSasTopologyChangeList_t sas_topo_change_list;
6754 6365 uint8_t num_entries, expstatus, phy;
6755 6366 uint8_t phystatus, physport, state, i;
6756 6367 uint8_t start_phy_num, link_rate;
6757 6368 uint16_t dev_handle, reason_code;
6758 6369 uint16_t enc_handle, expd_handle;
6759 6370 char string[80], curr[80], prev[80];
6760 6371 mptsas_topo_change_list_t *topo_head = NULL;
6761 6372 mptsas_topo_change_list_t *topo_tail = NULL;
6762 6373 mptsas_topo_change_list_t *topo_node = NULL;
6763 6374 mptsas_target_t *ptgt;
6764 6375 mptsas_smp_t *psmp;
6765 6376 mptsas_hash_table_t *tgttbl, *smptbl;
6766 6377 uint8_t flags = 0, exp_flag;
6767 6378 smhba_info_t *pSmhba = NULL;
6768 6379
6769 6380 NDBG20(("mptsas_handle_event_sync: SAS topology change"));
6770 6381
6771 6382 tgttbl = &mpt->m_active->m_tgttbl;
6772 6383 smptbl = &mpt->m_active->m_smptbl;
6773 6384
6774 6385 sas_topo_change_list = (pMpi2EventDataSasTopologyChangeList_t)
6775 6386 eventreply->EventData;
6776 6387
6777 6388 enc_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6778 6389 &sas_topo_change_list->EnclosureHandle);
6779 6390 expd_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6780 6391 &sas_topo_change_list->ExpanderDevHandle);
6781 6392 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
6782 6393 &sas_topo_change_list->NumEntries);
6783 6394 start_phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
6784 6395 &sas_topo_change_list->StartPhyNum);
6785 6396 expstatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6786 6397 &sas_topo_change_list->ExpStatus);
6787 6398 physport = ddi_get8(mpt->m_acc_reply_frame_hdl,
6788 6399 &sas_topo_change_list->PhysicalPort);
6789 6400
6790 6401 string[0] = 0;
6791 6402 if (expd_handle) {
6792 6403 flags = MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED;
6793 6404 switch (expstatus) {
6794 6405 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6795 6406 (void) sprintf(string, " added");
6796 6407 /*
6797 6408 * New expander device added
6798 6409 */
6799 6410 mpt->m_port_chng = 1;
6800 6411 topo_node = kmem_zalloc(
6801 6412 sizeof (mptsas_topo_change_list_t),
6802 6413 KM_SLEEP);
6803 6414 topo_node->mpt = mpt;
6804 6415 topo_node->event = MPTSAS_DR_EVENT_RECONFIG_SMP;
6805 6416 topo_node->un.physport = physport;
6806 6417 topo_node->devhdl = expd_handle;
6807 6418 topo_node->flags = flags;
6808 6419 topo_node->object = NULL;
6809 6420 if (topo_head == NULL) {
6810 6421 topo_head = topo_tail = topo_node;
6811 6422 } else {
6812 6423 topo_tail->next = topo_node;
6813 6424 topo_tail = topo_node;
6814 6425 }
6815 6426 break;
6816 6427 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6817 6428 (void) sprintf(string, " not responding, "
6818 6429 "removed");
6819 6430 psmp = mptsas_search_by_devhdl(smptbl,
6820 6431 expd_handle);
6821 6432 if (psmp == NULL)
6822 6433 break;
6823 6434
6824 6435 topo_node = kmem_zalloc(
6825 6436 sizeof (mptsas_topo_change_list_t),
6826 6437 KM_SLEEP);
6827 6438 topo_node->mpt = mpt;
6828 6439 topo_node->un.phymask = psmp->m_phymask;
6829 6440 topo_node->event = MPTSAS_DR_EVENT_OFFLINE_SMP;
6830 6441 topo_node->devhdl = expd_handle;
6831 6442 topo_node->flags = flags;
6832 6443 topo_node->object = NULL;
6833 6444 if (topo_head == NULL) {
6834 6445 topo_head = topo_tail = topo_node;
6835 6446 } else {
6836 6447 topo_tail->next = topo_node;
6837 6448 topo_tail = topo_node;
6838 6449 }
6839 6450 break;
6840 6451 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6841 6452 break;
6842 6453 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6843 6454 (void) sprintf(string, " not responding, "
6844 6455 "delaying removal");
6845 6456 break;
6846 6457 default:
6847 6458 break;
6848 6459 }
6849 6460 } else {
6850 6461 flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
6851 6462 }
6852 6463
6853 6464 NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
6854 6465 enc_handle, expd_handle, string));
6855 6466 for (i = 0; i < num_entries; i++) {
6856 6467 phy = i + start_phy_num;
6857 6468 phystatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
6858 6469 &sas_topo_change_list->PHY[i].PhyStatus);
6859 6470 dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
6860 6471 &sas_topo_change_list->PHY[i].AttachedDevHandle);
6861 6472 reason_code = phystatus & MPI2_EVENT_SAS_TOPO_RC_MASK;
6862 6473 /*
6863 6474 * Filter out processing of Phy Vacant Status unless
6864 6475 * the reason code is "Not Responding". Process all
6865 6476 * other combinations of Phy Status and Reason Codes.
6866 6477 */
6867 6478 if ((phystatus &
6868 6479 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) &&
6869 6480 (reason_code !=
6870 6481 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
6871 6482 continue;
6872 6483 }
6873 6484 curr[0] = 0;
6874 6485 prev[0] = 0;
6875 6486 string[0] = 0;
6876 6487 switch (reason_code) {
6877 6488 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6878 6489 {
6879 6490 NDBG20(("mptsas%d phy %d physical_port %d "
6880 6491 "dev_handle %d added", mpt->m_instance, phy,
6881 6492 physport, dev_handle));
6882 6493 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
6883 6494 &sas_topo_change_list->PHY[i].LinkRate);
6884 6495 state = (link_rate &
6885 6496 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
6886 6497 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
6887 6498 switch (state) {
6888 6499 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
6889 6500 (void) sprintf(curr, "is disabled");
6890 6501 break;
6891 6502 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
6892 6503 (void) sprintf(curr, "is offline, "
6893 6504 "failed speed negotiation");
6894 6505 break;
6895 6506 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
6896 6507 (void) sprintf(curr, "SATA OOB "
6897 6508 "complete");
6898 6509 break;
6899 6510 case SMP_RESET_IN_PROGRESS:
6900 6511 (void) sprintf(curr, "SMP reset in "
6901 6512 "progress");
6902 6513 break;
6903 6514 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
6904 6515 (void) sprintf(curr, "is online at "
6905 6516 "1.5 Gbps");
6906 6517 break;
6907 6518 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
6908 6519 (void) sprintf(curr, "is online at 3.0 "
6909 6520 "Gbps");
6910 6521 break;
6911 6522 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
6912 6523 (void) sprintf(curr, "is online at 6.0 "
6913 6524 "Gbps");
6914 6525 break;
6915 6526 default:
6916 6527 (void) sprintf(curr, "state is "
6917 6528 "unknown");
6918 6529 break;
6919 6530 }
6920 6531 /*
6921 6532 * New target device added into the system.
6922 6533 * Set association flag according to if an
6923 6534 * expander is used or not.
6924 6535 */
6925 6536 exp_flag =
6926 6537 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6927 6538 if (flags ==
6928 6539 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6929 6540 flags = exp_flag;
6930 6541 }
6931 6542 topo_node = kmem_zalloc(
6932 6543 sizeof (mptsas_topo_change_list_t),
6933 6544 KM_SLEEP);
6934 6545 topo_node->mpt = mpt;
6935 6546 topo_node->event =
6936 6547 MPTSAS_DR_EVENT_RECONFIG_TARGET;
6937 6548 if (expd_handle == 0) {
6938 6549 /*
6939 6550 * Per MPI 2, if expander dev handle
6940 6551 * is 0, it's a directly attached
6941 6552 * device. So driver use PHY to decide
6942 6553 * which iport is associated
6943 6554 */
6944 6555 physport = phy;
6945 6556 mpt->m_port_chng = 1;
6946 6557 }
6947 6558 topo_node->un.physport = physport;
6948 6559 topo_node->devhdl = dev_handle;
6949 6560 topo_node->flags = flags;
6950 6561 topo_node->object = NULL;
6951 6562 if (topo_head == NULL) {
6952 6563 topo_head = topo_tail = topo_node;
6953 6564 } else {
6954 6565 topo_tail->next = topo_node;
6955 6566 topo_tail = topo_node;
6956 6567 }
6957 6568 break;
6958 6569 }
6959 6570 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6960 6571 {
6961 6572 NDBG20(("mptsas%d phy %d physical_port %d "
6962 6573 "dev_handle %d removed", mpt->m_instance,
6963 6574 phy, physport, dev_handle));
6964 6575 /*
6965 6576 * Set association flag according to if an
6966 6577 * expander is used or not.
6967 6578 */
6968 6579 exp_flag =
6969 6580 MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
6970 6581 if (flags ==
6971 6582 MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
6972 6583 flags = exp_flag;
6973 6584 }
6974 6585 /*
6975 6586 * Target device is removed from the system
6976 6587 * Before the device is really offline from
6977 6588 * from system.
6978 6589 */
6979 6590 ptgt = mptsas_search_by_devhdl(tgttbl,
6980 6591 dev_handle);
6981 6592 /*
6982 6593 * If ptgt is NULL here, it means that the
6983 6594 * DevHandle is not in the hash table. This is
6984 6595 * reasonable sometimes. For example, if a
6985 6596 * disk was pulled, then added, then pulled
6986 6597 * again, the disk will not have been put into
6987 6598 * the hash table because the add event will
6988 6599 * have an invalid phymask. BUT, this does not
6989 6600 * mean that the DevHandle is invalid. The
6990 6601 * controller will still have a valid DevHandle
6991 6602 * that must be removed. To do this, use the
6992 6603 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
6993 6604 */
6994 6605 if (ptgt == NULL) {
6995 6606 topo_node = kmem_zalloc(
6996 6607 sizeof (mptsas_topo_change_list_t),
6997 6608 KM_SLEEP);
6998 6609 topo_node->mpt = mpt;
6999 6610 topo_node->un.phymask = 0;
7000 6611 topo_node->event =
7001 6612 MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
7002 6613 topo_node->devhdl = dev_handle;
7003 6614 topo_node->flags = flags;
7004 6615 topo_node->object = NULL;
7005 6616 if (topo_head == NULL) {
7006 6617 topo_head = topo_tail =
7007 6618 topo_node;
↓ open down ↓ |
525 lines elided |
↑ open up ↑ |
7008 6619 } else {
7009 6620 topo_tail->next = topo_node;
7010 6621 topo_tail = topo_node;
7011 6622 }
7012 6623 break;
7013 6624 }
7014 6625
7015 6626 /*
7016 6627 * Update DR flag immediately avoid I/O failure
7017 6628 * before failover finish. Pay attention to the
7018 - * mutex protect, we need grab the per target
7019 - * mutex during set m_dr_flag because the
7020 - * m_mutex would not be held all the time in
7021 - * mptsas_scsi_start().
6629 + * mutex protect, we need grab m_tx_waitq_mutex
6630 + * during set m_dr_flag because we won't add
6631 + * the following command into waitq, instead,
6632 + * we need return TRAN_BUSY in the tran_start
6633 + * context.
7022 6634 */
7023 - mutex_enter(&ptgt->m_tgt_intr_mutex);
6635 + mutex_enter(&mpt->m_tx_waitq_mutex);
7024 6636 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7025 - mutex_exit(&ptgt->m_tgt_intr_mutex);
6637 + mutex_exit(&mpt->m_tx_waitq_mutex);
7026 6638
7027 6639 topo_node = kmem_zalloc(
7028 6640 sizeof (mptsas_topo_change_list_t),
7029 6641 KM_SLEEP);
7030 6642 topo_node->mpt = mpt;
7031 6643 topo_node->un.phymask = ptgt->m_phymask;
7032 6644 topo_node->event =
7033 6645 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7034 6646 topo_node->devhdl = dev_handle;
7035 6647 topo_node->flags = flags;
7036 6648 topo_node->object = NULL;
7037 6649 if (topo_head == NULL) {
7038 6650 topo_head = topo_tail = topo_node;
7039 6651 } else {
7040 6652 topo_tail->next = topo_node;
7041 6653 topo_tail = topo_node;
7042 6654 }
7043 6655 break;
7044 6656 }
7045 6657 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7046 6658 link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
7047 6659 &sas_topo_change_list->PHY[i].LinkRate);
7048 6660 state = (link_rate &
7049 6661 MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
7050 6662 MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
7051 6663 pSmhba = &mpt->m_phy_info[i].smhba_info;
7052 6664 pSmhba->negotiated_link_rate = state;
7053 6665 switch (state) {
7054 6666 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7055 6667 (void) sprintf(curr, "is disabled");
7056 6668 mptsas_smhba_log_sysevent(mpt,
7057 6669 ESC_SAS_PHY_EVENT,
7058 6670 SAS_PHY_REMOVE,
7059 6671 &mpt->m_phy_info[i].smhba_info);
7060 6672 mpt->m_phy_info[i].smhba_info.
7061 6673 negotiated_link_rate
7062 6674 = 0x1;
7063 6675 break;
7064 6676 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7065 6677 (void) sprintf(curr, "is offline, "
7066 6678 "failed speed negotiation");
7067 6679 mptsas_smhba_log_sysevent(mpt,
7068 6680 ESC_SAS_PHY_EVENT,
7069 6681 SAS_PHY_OFFLINE,
7070 6682 &mpt->m_phy_info[i].smhba_info);
7071 6683 break;
7072 6684 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7073 6685 (void) sprintf(curr, "SATA OOB "
7074 6686 "complete");
7075 6687 break;
7076 6688 case SMP_RESET_IN_PROGRESS:
7077 6689 (void) sprintf(curr, "SMP reset in "
7078 6690 "progress");
7079 6691 break;
7080 6692 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7081 6693 (void) sprintf(curr, "is online at "
7082 6694 "1.5 Gbps");
7083 6695 if ((expd_handle == 0) &&
7084 6696 (enc_handle == 1)) {
7085 6697 mpt->m_port_chng = 1;
7086 6698 }
7087 6699 mptsas_smhba_log_sysevent(mpt,
7088 6700 ESC_SAS_PHY_EVENT,
7089 6701 SAS_PHY_ONLINE,
7090 6702 &mpt->m_phy_info[i].smhba_info);
7091 6703 break;
7092 6704 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7093 6705 (void) sprintf(curr, "is online at 3.0 "
7094 6706 "Gbps");
7095 6707 if ((expd_handle == 0) &&
7096 6708 (enc_handle == 1)) {
7097 6709 mpt->m_port_chng = 1;
7098 6710 }
7099 6711 mptsas_smhba_log_sysevent(mpt,
7100 6712 ESC_SAS_PHY_EVENT,
7101 6713 SAS_PHY_ONLINE,
7102 6714 &mpt->m_phy_info[i].smhba_info);
7103 6715 break;
7104 6716 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7105 6717 (void) sprintf(curr, "is online at "
7106 6718 "6.0 Gbps");
7107 6719 if ((expd_handle == 0) &&
7108 6720 (enc_handle == 1)) {
7109 6721 mpt->m_port_chng = 1;
7110 6722 }
7111 6723 mptsas_smhba_log_sysevent(mpt,
7112 6724 ESC_SAS_PHY_EVENT,
7113 6725 SAS_PHY_ONLINE,
7114 6726 &mpt->m_phy_info[i].smhba_info);
7115 6727 break;
7116 6728 default:
7117 6729 (void) sprintf(curr, "state is "
7118 6730 "unknown");
7119 6731 break;
7120 6732 }
7121 6733
7122 6734 state = (link_rate &
7123 6735 MPI2_EVENT_SAS_TOPO_LR_PREV_MASK) >>
7124 6736 MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT;
7125 6737 switch (state) {
7126 6738 case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7127 6739 (void) sprintf(prev, ", was disabled");
7128 6740 break;
7129 6741 case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7130 6742 (void) sprintf(prev, ", was offline, "
7131 6743 "failed speed negotiation");
7132 6744 break;
7133 6745 case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7134 6746 (void) sprintf(prev, ", was SATA OOB "
7135 6747 "complete");
7136 6748 break;
7137 6749 case SMP_RESET_IN_PROGRESS:
7138 6750 (void) sprintf(prev, ", was SMP reset "
7139 6751 "in progress");
7140 6752 break;
7141 6753 case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7142 6754 (void) sprintf(prev, ", was online at "
7143 6755 "1.5 Gbps");
7144 6756 break;
7145 6757 case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7146 6758 (void) sprintf(prev, ", was online at "
7147 6759 "3.0 Gbps");
7148 6760 break;
7149 6761 case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7150 6762 (void) sprintf(prev, ", was online at "
7151 6763 "6.0 Gbps");
7152 6764 break;
7153 6765 default:
7154 6766 break;
7155 6767 }
7156 6768 (void) sprintf(&string[strlen(string)], "link "
7157 6769 "changed, ");
7158 6770 break;
7159 6771 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7160 6772 continue;
7161 6773 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7162 6774 (void) sprintf(&string[strlen(string)],
7163 6775 "target not responding, delaying "
7164 6776 "removal");
7165 6777 break;
7166 6778 }
7167 6779 NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
7168 6780 mpt->m_instance, phy, dev_handle, string, curr,
7169 6781 prev));
7170 6782 }
7171 6783 if (topo_head != NULL) {
7172 6784 /*
7173 6785 * Launch DR taskq to handle topology change
7174 6786 */
7175 6787 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7176 6788 mptsas_handle_dr, (void *)topo_head,
7177 6789 DDI_NOSLEEP)) != DDI_SUCCESS) {
7178 6790 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7179 6791 "for handle SAS DR event failed. \n");
7180 6792 }
7181 6793 }
7182 6794 break;
7183 6795 }
7184 6796 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7185 6797 {
7186 6798 Mpi2EventDataIrConfigChangeList_t *irChangeList;
7187 6799 mptsas_topo_change_list_t *topo_head = NULL;
7188 6800 mptsas_topo_change_list_t *topo_tail = NULL;
7189 6801 mptsas_topo_change_list_t *topo_node = NULL;
7190 6802 mptsas_target_t *ptgt;
7191 6803 mptsas_hash_table_t *tgttbl;
7192 6804 uint8_t num_entries, i, reason;
7193 6805 uint16_t volhandle, diskhandle;
7194 6806
7195 6807 irChangeList = (pMpi2EventDataIrConfigChangeList_t)
7196 6808 eventreply->EventData;
7197 6809 num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
7198 6810 &irChangeList->NumElements);
7199 6811
7200 6812 tgttbl = &mpt->m_active->m_tgttbl;
7201 6813
7202 6814 NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
7203 6815 mpt->m_instance));
7204 6816
7205 6817 for (i = 0; i < num_entries; i++) {
7206 6818 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7207 6819 &irChangeList->ConfigElement[i].ReasonCode);
7208 6820 volhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7209 6821 &irChangeList->ConfigElement[i].VolDevHandle);
7210 6822 diskhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7211 6823 &irChangeList->ConfigElement[i].PhysDiskDevHandle);
7212 6824
7213 6825 switch (reason) {
7214 6826 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
7215 6827 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
7216 6828 {
7217 6829 NDBG20(("mptsas %d volume added\n",
7218 6830 mpt->m_instance));
7219 6831
7220 6832 topo_node = kmem_zalloc(
7221 6833 sizeof (mptsas_topo_change_list_t),
7222 6834 KM_SLEEP);
7223 6835
7224 6836 topo_node->mpt = mpt;
7225 6837 topo_node->event =
7226 6838 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7227 6839 topo_node->un.physport = 0xff;
7228 6840 topo_node->devhdl = volhandle;
7229 6841 topo_node->flags =
7230 6842 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7231 6843 topo_node->object = NULL;
7232 6844 if (topo_head == NULL) {
7233 6845 topo_head = topo_tail = topo_node;
7234 6846 } else {
7235 6847 topo_tail->next = topo_node;
7236 6848 topo_tail = topo_node;
7237 6849 }
7238 6850 break;
7239 6851 }
7240 6852 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
7241 6853 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
7242 6854 {
7243 6855 NDBG20(("mptsas %d volume deleted\n",
7244 6856 mpt->m_instance));
7245 6857 ptgt = mptsas_search_by_devhdl(tgttbl,
7246 6858 volhandle);
7247 6859 if (ptgt == NULL)
↓ open down ↓ |
212 lines elided |
↑ open up ↑ |
7248 6860 break;
7249 6861
7250 6862 /*
7251 6863 * Clear any flags related to volume
7252 6864 */
7253 6865 (void) mptsas_delete_volume(mpt, volhandle);
7254 6866
7255 6867 /*
7256 6868 * Update DR flag immediately avoid I/O failure
7257 6869 */
7258 - mutex_enter(&ptgt->m_tgt_intr_mutex);
6870 + mutex_enter(&mpt->m_tx_waitq_mutex);
7259 6871 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7260 - mutex_exit(&ptgt->m_tgt_intr_mutex);
6872 + mutex_exit(&mpt->m_tx_waitq_mutex);
7261 6873
7262 6874 topo_node = kmem_zalloc(
7263 6875 sizeof (mptsas_topo_change_list_t),
7264 6876 KM_SLEEP);
7265 6877 topo_node->mpt = mpt;
7266 6878 topo_node->un.phymask = ptgt->m_phymask;
7267 6879 topo_node->event =
7268 6880 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7269 6881 topo_node->devhdl = volhandle;
7270 6882 topo_node->flags =
7271 6883 MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7272 6884 topo_node->object = (void *)ptgt;
7273 6885 if (topo_head == NULL) {
7274 6886 topo_head = topo_tail = topo_node;
7275 6887 } else {
7276 6888 topo_tail->next = topo_node;
7277 6889 topo_tail = topo_node;
7278 6890 }
7279 6891 break;
7280 6892 }
7281 6893 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
7282 6894 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
7283 6895 {
7284 6896 ptgt = mptsas_search_by_devhdl(tgttbl,
7285 6897 diskhandle);
7286 6898 if (ptgt == NULL)
7287 6899 break;
7288 6900
7289 6901 /*
7290 6902 * Update DR flag immediately avoid I/O failure
7291 6903 */
7292 - mutex_enter(&ptgt->m_tgt_intr_mutex);
6904 + mutex_enter(&mpt->m_tx_waitq_mutex);
7293 6905 ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7294 - mutex_exit(&ptgt->m_tgt_intr_mutex);
6906 + mutex_exit(&mpt->m_tx_waitq_mutex);
7295 6907
7296 6908 topo_node = kmem_zalloc(
7297 6909 sizeof (mptsas_topo_change_list_t),
7298 6910 KM_SLEEP);
7299 6911 topo_node->mpt = mpt;
7300 6912 topo_node->un.phymask = ptgt->m_phymask;
7301 6913 topo_node->event =
7302 6914 MPTSAS_DR_EVENT_OFFLINE_TARGET;
7303 6915 topo_node->devhdl = diskhandle;
7304 6916 topo_node->flags =
7305 6917 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7306 6918 topo_node->object = (void *)ptgt;
7307 6919 if (topo_head == NULL) {
7308 6920 topo_head = topo_tail = topo_node;
7309 6921 } else {
7310 6922 topo_tail->next = topo_node;
7311 6923 topo_tail = topo_node;
7312 6924 }
7313 6925 break;
7314 6926 }
7315 6927 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
7316 6928 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
7317 6929 {
7318 6930 /*
7319 6931 * The physical drive is released by a IR
7320 6932 * volume. But we cannot get the the physport
7321 6933 * or phynum from the event data, so we only
7322 6934 * can get the physport/phynum after SAS
7323 6935 * Device Page0 request for the devhdl.
7324 6936 */
7325 6937 topo_node = kmem_zalloc(
7326 6938 sizeof (mptsas_topo_change_list_t),
7327 6939 KM_SLEEP);
7328 6940 topo_node->mpt = mpt;
7329 6941 topo_node->un.phymask = 0;
7330 6942 topo_node->event =
7331 6943 MPTSAS_DR_EVENT_RECONFIG_TARGET;
7332 6944 topo_node->devhdl = diskhandle;
7333 6945 topo_node->flags =
7334 6946 MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7335 6947 topo_node->object = NULL;
7336 6948 mpt->m_port_chng = 1;
7337 6949 if (topo_head == NULL) {
7338 6950 topo_head = topo_tail = topo_node;
7339 6951 } else {
7340 6952 topo_tail->next = topo_node;
7341 6953 topo_tail = topo_node;
7342 6954 }
7343 6955 break;
7344 6956 }
7345 6957 default:
7346 6958 break;
7347 6959 }
7348 6960 }
7349 6961
7350 6962 if (topo_head != NULL) {
7351 6963 /*
7352 6964 * Launch DR taskq to handle topology change
7353 6965 */
7354 6966 if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7355 6967 mptsas_handle_dr, (void *)topo_head,
7356 6968 DDI_NOSLEEP)) != DDI_SUCCESS) {
7357 6969 mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7358 6970 "for handle SAS DR event failed. \n");
7359 6971 }
7360 6972 }
7361 6973 break;
7362 6974 }
7363 6975 default:
7364 6976 return (DDI_FAILURE);
7365 6977 }
7366 6978
7367 6979 return (DDI_SUCCESS);
7368 6980 }
7369 6981
7370 6982 /*
7371 6983 * handle events from ioc
7372 6984 */
7373 6985 static void
7374 6986 mptsas_handle_event(void *args)
7375 6987 {
7376 6988 m_replyh_arg_t *replyh_arg;
7377 6989 pMpi2EventNotificationReply_t eventreply;
7378 6990 uint32_t event, iocloginfo, rfm;
↓ open down ↓ |
74 lines elided |
↑ open up ↑ |
7379 6991 uint32_t status;
7380 6992 uint8_t port;
7381 6993 mptsas_t *mpt;
7382 6994 uint_t iocstatus;
7383 6995
7384 6996 replyh_arg = (m_replyh_arg_t *)args;
7385 6997 rfm = replyh_arg->rfm;
7386 6998 mpt = replyh_arg->mpt;
7387 6999
7388 7000 mutex_enter(&mpt->m_mutex);
7001 + /*
7002 + * If HBA is being reset, drop incoming event.
7003 + */
7004 + if (mpt->m_in_reset) {
7005 + NDBG20(("dropping event received prior to reset"));
7006 + mutex_exit(&mpt->m_mutex);
7007 + return;
7008 + }
7389 7009
7390 7010 eventreply = (pMpi2EventNotificationReply_t)
7391 7011 (mpt->m_reply_frame + (rfm - mpt->m_reply_frame_dma_addr));
7392 7012 event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7393 7013
7394 7014 if (iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7395 7015 &eventreply->IOCStatus)) {
7396 7016 if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7397 7017 mptsas_log(mpt, CE_WARN,
7398 7018 "!mptsas_handle_event: IOCStatus=0x%x, "
7399 7019 "IOCLogInfo=0x%x", iocstatus,
7400 7020 ddi_get32(mpt->m_acc_reply_frame_hdl,
7401 7021 &eventreply->IOCLogInfo));
7402 7022 } else {
7403 7023 mptsas_log(mpt, CE_WARN,
7404 7024 "mptsas_handle_event: IOCStatus=0x%x, "
7405 7025 "IOCLogInfo=0x%x", iocstatus,
7406 7026 ddi_get32(mpt->m_acc_reply_frame_hdl,
7407 7027 &eventreply->IOCLogInfo));
7408 7028 }
7409 7029 }
7410 7030
7411 7031 /*
7412 7032 * figure out what kind of event we got and handle accordingly
7413 7033 */
7414 7034 switch (event) {
7415 7035 case MPI2_EVENT_LOG_ENTRY_ADDED:
7416 7036 break;
7417 7037 case MPI2_EVENT_LOG_DATA:
7418 7038 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7419 7039 &eventreply->IOCLogInfo);
7420 7040 NDBG20(("mptsas %d log info %x received.\n", mpt->m_instance,
7421 7041 iocloginfo));
7422 7042 break;
7423 7043 case MPI2_EVENT_STATE_CHANGE:
7424 7044 NDBG20(("mptsas%d state change.", mpt->m_instance));
7425 7045 break;
7426 7046 case MPI2_EVENT_HARD_RESET_RECEIVED:
7427 7047 NDBG20(("mptsas%d event change.", mpt->m_instance));
7428 7048 break;
7429 7049 case MPI2_EVENT_SAS_DISCOVERY:
7430 7050 {
7431 7051 MPI2_EVENT_DATA_SAS_DISCOVERY *sasdiscovery;
7432 7052 char string[80];
7433 7053 uint8_t rc;
7434 7054
7435 7055 sasdiscovery =
7436 7056 (pMpi2EventDataSasDiscovery_t)eventreply->EventData;
7437 7057
7438 7058 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7439 7059 &sasdiscovery->ReasonCode);
7440 7060 port = ddi_get8(mpt->m_acc_reply_frame_hdl,
7441 7061 &sasdiscovery->PhysicalPort);
7442 7062 status = ddi_get32(mpt->m_acc_reply_frame_hdl,
7443 7063 &sasdiscovery->DiscoveryStatus);
7444 7064
7445 7065 string[0] = 0;
7446 7066 switch (rc) {
7447 7067 case MPI2_EVENT_SAS_DISC_RC_STARTED:
7448 7068 (void) sprintf(string, "STARTING");
7449 7069 break;
7450 7070 case MPI2_EVENT_SAS_DISC_RC_COMPLETED:
7451 7071 (void) sprintf(string, "COMPLETED");
7452 7072 break;
7453 7073 default:
7454 7074 (void) sprintf(string, "UNKNOWN");
7455 7075 break;
7456 7076 }
7457 7077
7458 7078 NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string,
7459 7079 port, status));
7460 7080
7461 7081 break;
7462 7082 }
7463 7083 case MPI2_EVENT_EVENT_CHANGE:
7464 7084 NDBG20(("mptsas%d event change.", mpt->m_instance));
7465 7085 break;
7466 7086 case MPI2_EVENT_TASK_SET_FULL:
7467 7087 {
7468 7088 pMpi2EventDataTaskSetFull_t taskfull;
7469 7089
7470 7090 taskfull = (pMpi2EventDataTaskSetFull_t)eventreply->EventData;
7471 7091
7472 7092 NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
7473 7093 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7474 7094 &taskfull->CurrentDepth)));
7475 7095 break;
7476 7096 }
7477 7097 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7478 7098 {
7479 7099 /*
7480 7100 * SAS TOPOLOGY CHANGE LIST Event has already been handled
7481 7101 * in mptsas_handle_event_sync() of interrupt context
7482 7102 */
7483 7103 break;
7484 7104 }
7485 7105 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
7486 7106 {
7487 7107 pMpi2EventDataSasEnclDevStatusChange_t encstatus;
7488 7108 uint8_t rc;
7489 7109 char string[80];
7490 7110
7491 7111 encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
7492 7112 eventreply->EventData;
7493 7113
7494 7114 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7495 7115 &encstatus->ReasonCode);
7496 7116 switch (rc) {
7497 7117 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7498 7118 (void) sprintf(string, "added");
7499 7119 break;
7500 7120 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7501 7121 (void) sprintf(string, ", not responding");
7502 7122 break;
7503 7123 default:
7504 7124 break;
7505 7125 }
7506 7126 NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure %x%s\n",
7507 7127 mpt->m_instance, ddi_get16(mpt->m_acc_reply_frame_hdl,
7508 7128 &encstatus->EnclosureHandle), string));
7509 7129 break;
7510 7130 }
7511 7131
7512 7132 /*
7513 7133 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
7514 7134 * mptsas_handle_event_sync,in here just send ack message.
7515 7135 */
7516 7136 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
7517 7137 {
7518 7138 pMpi2EventDataSasDeviceStatusChange_t statuschange;
7519 7139 uint8_t rc;
7520 7140 uint16_t devhdl;
7521 7141 uint64_t wwn = 0;
7522 7142 uint32_t wwn_lo, wwn_hi;
7523 7143
7524 7144 statuschange = (pMpi2EventDataSasDeviceStatusChange_t)
7525 7145 eventreply->EventData;
7526 7146 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7527 7147 &statuschange->ReasonCode);
7528 7148 wwn_lo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7529 7149 (uint32_t *)(void *)&statuschange->SASAddress);
7530 7150 wwn_hi = ddi_get32(mpt->m_acc_reply_frame_hdl,
7531 7151 (uint32_t *)(void *)&statuschange->SASAddress + 1);
7532 7152 wwn = ((uint64_t)wwn_hi << 32) | wwn_lo;
7533 7153 devhdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
7534 7154 &statuschange->DevHandle);
7535 7155
7536 7156 NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64,
7537 7157 wwn));
7538 7158
7539 7159 switch (rc) {
7540 7160 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7541 7161 NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x",
7542 7162 ddi_get8(mpt->m_acc_reply_frame_hdl,
7543 7163 &statuschange->ASC),
7544 7164 ddi_get8(mpt->m_acc_reply_frame_hdl,
7545 7165 &statuschange->ASCQ)));
7546 7166 break;
7547 7167
7548 7168 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7549 7169 NDBG20(("Device not supported"));
7550 7170 break;
7551 7171
7552 7172 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7553 7173 NDBG20(("IOC internally generated the Target Reset "
7554 7174 "for devhdl:%x", devhdl));
7555 7175 break;
7556 7176
7557 7177 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7558 7178 NDBG20(("IOC's internally generated Target Reset "
7559 7179 "completed for devhdl:%x", devhdl));
7560 7180 break;
7561 7181
7562 7182 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7563 7183 NDBG20(("IOC internally generated Abort Task"));
7564 7184 break;
7565 7185
7566 7186 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7567 7187 NDBG20(("IOC's internally generated Abort Task "
7568 7188 "completed"));
7569 7189 break;
7570 7190
7571 7191 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7572 7192 NDBG20(("IOC internally generated Abort Task Set"));
7573 7193 break;
7574 7194
7575 7195 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7576 7196 NDBG20(("IOC internally generated Clear Task Set"));
7577 7197 break;
7578 7198
7579 7199 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7580 7200 NDBG20(("IOC internally generated Query Task"));
7581 7201 break;
7582 7202
7583 7203 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7584 7204 NDBG20(("Device sent an Asynchronous Notification"));
7585 7205 break;
7586 7206
7587 7207 default:
7588 7208 break;
7589 7209 }
7590 7210 break;
7591 7211 }
7592 7212 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7593 7213 {
7594 7214 /*
7595 7215 * IR TOPOLOGY CHANGE LIST Event has already been handled
7596 7216 * in mpt_handle_event_sync() of interrupt context
7597 7217 */
7598 7218 break;
7599 7219 }
7600 7220 case MPI2_EVENT_IR_OPERATION_STATUS:
7601 7221 {
7602 7222 Mpi2EventDataIrOperationStatus_t *irOpStatus;
7603 7223 char reason_str[80];
7604 7224 uint8_t rc, percent;
7605 7225 uint16_t handle;
7606 7226
7607 7227 irOpStatus = (pMpi2EventDataIrOperationStatus_t)
7608 7228 eventreply->EventData;
7609 7229 rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7610 7230 &irOpStatus->RAIDOperation);
7611 7231 percent = ddi_get8(mpt->m_acc_reply_frame_hdl,
7612 7232 &irOpStatus->PercentComplete);
7613 7233 handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7614 7234 &irOpStatus->VolDevHandle);
7615 7235
7616 7236 switch (rc) {
7617 7237 case MPI2_EVENT_IR_RAIDOP_RESYNC:
7618 7238 (void) sprintf(reason_str, "resync");
7619 7239 break;
7620 7240 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
7621 7241 (void) sprintf(reason_str, "online capacity "
7622 7242 "expansion");
7623 7243 break;
7624 7244 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
7625 7245 (void) sprintf(reason_str, "consistency check");
7626 7246 break;
7627 7247 default:
7628 7248 (void) sprintf(reason_str, "unknown reason %x",
7629 7249 rc);
7630 7250 }
7631 7251
7632 7252 NDBG20(("mptsas%d raid operational status: (%s)"
7633 7253 "\thandle(0x%04x), percent complete(%d)\n",
7634 7254 mpt->m_instance, reason_str, handle, percent));
7635 7255 break;
7636 7256 }
7637 7257 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
7638 7258 {
7639 7259 pMpi2EventDataSasBroadcastPrimitive_t sas_broadcast;
7640 7260 uint8_t phy_num;
7641 7261 uint8_t primitive;
7642 7262
7643 7263 sas_broadcast = (pMpi2EventDataSasBroadcastPrimitive_t)
7644 7264 eventreply->EventData;
7645 7265
7646 7266 phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
7647 7267 &sas_broadcast->PhyNum);
7648 7268 primitive = ddi_get8(mpt->m_acc_reply_frame_hdl,
7649 7269 &sas_broadcast->Primitive);
7650 7270
7651 7271 switch (primitive) {
7652 7272 case MPI2_EVENT_PRIMITIVE_CHANGE:
7653 7273 mptsas_smhba_log_sysevent(mpt,
7654 7274 ESC_SAS_HBA_PORT_BROADCAST,
7655 7275 SAS_PORT_BROADCAST_CHANGE,
7656 7276 &mpt->m_phy_info[phy_num].smhba_info);
7657 7277 break;
7658 7278 case MPI2_EVENT_PRIMITIVE_SES:
7659 7279 mptsas_smhba_log_sysevent(mpt,
7660 7280 ESC_SAS_HBA_PORT_BROADCAST,
7661 7281 SAS_PORT_BROADCAST_SES,
7662 7282 &mpt->m_phy_info[phy_num].smhba_info);
7663 7283 break;
7664 7284 case MPI2_EVENT_PRIMITIVE_EXPANDER:
7665 7285 mptsas_smhba_log_sysevent(mpt,
7666 7286 ESC_SAS_HBA_PORT_BROADCAST,
7667 7287 SAS_PORT_BROADCAST_D01_4,
7668 7288 &mpt->m_phy_info[phy_num].smhba_info);
7669 7289 break;
7670 7290 case MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT:
7671 7291 mptsas_smhba_log_sysevent(mpt,
7672 7292 ESC_SAS_HBA_PORT_BROADCAST,
7673 7293 SAS_PORT_BROADCAST_D04_7,
7674 7294 &mpt->m_phy_info[phy_num].smhba_info);
7675 7295 break;
7676 7296 case MPI2_EVENT_PRIMITIVE_RESERVED3:
7677 7297 mptsas_smhba_log_sysevent(mpt,
7678 7298 ESC_SAS_HBA_PORT_BROADCAST,
7679 7299 SAS_PORT_BROADCAST_D16_7,
7680 7300 &mpt->m_phy_info[phy_num].smhba_info);
7681 7301 break;
7682 7302 case MPI2_EVENT_PRIMITIVE_RESERVED4:
7683 7303 mptsas_smhba_log_sysevent(mpt,
7684 7304 ESC_SAS_HBA_PORT_BROADCAST,
7685 7305 SAS_PORT_BROADCAST_D29_7,
7686 7306 &mpt->m_phy_info[phy_num].smhba_info);
7687 7307 break;
7688 7308 case MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED:
7689 7309 mptsas_smhba_log_sysevent(mpt,
7690 7310 ESC_SAS_HBA_PORT_BROADCAST,
7691 7311 SAS_PORT_BROADCAST_D24_0,
7692 7312 &mpt->m_phy_info[phy_num].smhba_info);
7693 7313 break;
7694 7314 case MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED:
7695 7315 mptsas_smhba_log_sysevent(mpt,
7696 7316 ESC_SAS_HBA_PORT_BROADCAST,
7697 7317 SAS_PORT_BROADCAST_D27_4,
7698 7318 &mpt->m_phy_info[phy_num].smhba_info);
7699 7319 break;
7700 7320 default:
7701 7321 NDBG20(("mptsas%d: unknown BROADCAST PRIMITIVE"
7702 7322 " %x received",
7703 7323 mpt->m_instance, primitive));
7704 7324 break;
7705 7325 }
7706 7326 NDBG20(("mptsas%d sas broadcast primitive: "
7707 7327 "\tprimitive(0x%04x), phy(%d) complete\n",
7708 7328 mpt->m_instance, primitive, phy_num));
7709 7329 break;
7710 7330 }
7711 7331 case MPI2_EVENT_IR_VOLUME:
7712 7332 {
7713 7333 Mpi2EventDataIrVolume_t *irVolume;
7714 7334 uint16_t devhandle;
7715 7335 uint32_t state;
7716 7336 int config, vol;
7717 7337 mptsas_slots_t *slots = mpt->m_active;
7718 7338 uint8_t found = FALSE;
7719 7339
7720 7340 irVolume = (pMpi2EventDataIrVolume_t)eventreply->EventData;
7721 7341 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7722 7342 &irVolume->NewValue);
7723 7343 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7724 7344 &irVolume->VolDevHandle);
7725 7345
7726 7346 NDBG20(("EVENT_IR_VOLUME event is received"));
7727 7347
7728 7348 /*
7729 7349 * Get latest RAID info and then find the DevHandle for this
7730 7350 * event in the configuration. If the DevHandle is not found
7731 7351 * just exit the event.
7732 7352 */
7733 7353 (void) mptsas_get_raid_info(mpt);
7734 7354 for (config = 0; (config < slots->m_num_raid_configs) &&
7735 7355 (!found); config++) {
7736 7356 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
7737 7357 if (slots->m_raidconfig[config].m_raidvol[vol].
7738 7358 m_raidhandle == devhandle) {
7739 7359 found = TRUE;
7740 7360 break;
7741 7361 }
7742 7362 }
7743 7363 }
7744 7364 if (!found) {
7745 7365 break;
7746 7366 }
7747 7367
7748 7368 switch (irVolume->ReasonCode) {
7749 7369 case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
7750 7370 {
7751 7371 uint32_t i;
7752 7372 slots->m_raidconfig[config].m_raidvol[vol].m_settings =
7753 7373 state;
7754 7374
7755 7375 i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
7756 7376 mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
7757 7377 ", auto-config of hot-swap drives is %s"
7758 7378 ", write caching is %s"
7759 7379 ", hot-spare pool mask is %02x\n",
7760 7380 vol, state &
7761 7381 MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
7762 7382 ? "disabled" : "enabled",
7763 7383 i == MPI2_RAIDVOL0_SETTING_UNCHANGED
7764 7384 ? "controlled by member disks" :
7765 7385 i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
7766 7386 ? "disabled" :
7767 7387 i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
7768 7388 ? "enabled" :
7769 7389 "incorrectly set",
7770 7390 (state >> 16) & 0xff);
7771 7391 break;
7772 7392 }
7773 7393 case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
7774 7394 {
7775 7395 slots->m_raidconfig[config].m_raidvol[vol].m_state =
7776 7396 (uint8_t)state;
7777 7397
7778 7398 mptsas_log(mpt, CE_NOTE,
7779 7399 "Volume %d is now %s\n", vol,
7780 7400 state == MPI2_RAID_VOL_STATE_OPTIMAL
7781 7401 ? "optimal" :
7782 7402 state == MPI2_RAID_VOL_STATE_DEGRADED
7783 7403 ? "degraded" :
7784 7404 state == MPI2_RAID_VOL_STATE_ONLINE
7785 7405 ? "online" :
7786 7406 state == MPI2_RAID_VOL_STATE_INITIALIZING
7787 7407 ? "initializing" :
7788 7408 state == MPI2_RAID_VOL_STATE_FAILED
7789 7409 ? "failed" :
7790 7410 state == MPI2_RAID_VOL_STATE_MISSING
7791 7411 ? "missing" :
7792 7412 "state unknown");
7793 7413 break;
7794 7414 }
7795 7415 case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
7796 7416 {
7797 7417 slots->m_raidconfig[config].m_raidvol[vol].
7798 7418 m_statusflags = state;
7799 7419
7800 7420 mptsas_log(mpt, CE_NOTE,
7801 7421 " Volume %d is now %s%s%s%s%s%s%s%s%s\n",
7802 7422 vol,
7803 7423 state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
7804 7424 ? ", enabled" : ", disabled",
7805 7425 state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
7806 7426 ? ", quiesced" : "",
7807 7427 state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
7808 7428 ? ", inactive" : ", active",
7809 7429 state &
7810 7430 MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
7811 7431 ? ", bad block table is full" : "",
7812 7432 state &
7813 7433 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
7814 7434 ? ", resync in progress" : "",
7815 7435 state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
7816 7436 ? ", background initialization in progress" : "",
7817 7437 state &
7818 7438 MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
7819 7439 ? ", capacity expansion in progress" : "",
7820 7440 state &
7821 7441 MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
7822 7442 ? ", consistency check in progress" : "",
7823 7443 state & MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB
7824 7444 ? ", data scrub in progress" : "");
7825 7445 break;
7826 7446 }
7827 7447 default:
7828 7448 break;
7829 7449 }
7830 7450 break;
7831 7451 }
7832 7452 case MPI2_EVENT_IR_PHYSICAL_DISK:
7833 7453 {
7834 7454 Mpi2EventDataIrPhysicalDisk_t *irPhysDisk;
7835 7455 uint16_t devhandle, enchandle, slot;
7836 7456 uint32_t status, state;
7837 7457 uint8_t physdisknum, reason;
7838 7458
7839 7459 irPhysDisk = (Mpi2EventDataIrPhysicalDisk_t *)
7840 7460 eventreply->EventData;
7841 7461 physdisknum = ddi_get8(mpt->m_acc_reply_frame_hdl,
7842 7462 &irPhysDisk->PhysDiskNum);
7843 7463 devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7844 7464 &irPhysDisk->PhysDiskDevHandle);
7845 7465 enchandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7846 7466 &irPhysDisk->EnclosureHandle);
7847 7467 slot = ddi_get16(mpt->m_acc_reply_frame_hdl,
7848 7468 &irPhysDisk->Slot);
7849 7469 state = ddi_get32(mpt->m_acc_reply_frame_hdl,
7850 7470 &irPhysDisk->NewValue);
7851 7471 reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7852 7472 &irPhysDisk->ReasonCode);
7853 7473
7854 7474 NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
7855 7475
7856 7476 switch (reason) {
7857 7477 case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
7858 7478 mptsas_log(mpt, CE_NOTE,
7859 7479 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7860 7480 "for enclosure with handle 0x%x is now in hot "
7861 7481 "spare pool %d",
7862 7482 physdisknum, devhandle, slot, enchandle,
7863 7483 (state >> 16) & 0xff);
7864 7484 break;
7865 7485
7866 7486 case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
7867 7487 status = state;
7868 7488 mptsas_log(mpt, CE_NOTE,
7869 7489 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7870 7490 "for enclosure with handle 0x%x is now "
7871 7491 "%s%s%s%s%s\n", physdisknum, devhandle, slot,
7872 7492 enchandle,
7873 7493 status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
7874 7494 ? ", inactive" : ", active",
7875 7495 status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
7876 7496 ? ", out of sync" : "",
7877 7497 status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
7878 7498 ? ", quiesced" : "",
7879 7499 status &
7880 7500 MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
7881 7501 ? ", write cache enabled" : "",
7882 7502 status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
7883 7503 ? ", capacity expansion target" : "");
7884 7504 break;
7885 7505
7886 7506 case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
7887 7507 mptsas_log(mpt, CE_NOTE,
7888 7508 " PhysDiskNum %d with DevHandle 0x%x in slot %d "
7889 7509 "for enclosure with handle 0x%x is now %s\n",
7890 7510 physdisknum, devhandle, slot, enchandle,
7891 7511 state == MPI2_RAID_PD_STATE_OPTIMAL
7892 7512 ? "optimal" :
7893 7513 state == MPI2_RAID_PD_STATE_REBUILDING
7894 7514 ? "rebuilding" :
7895 7515 state == MPI2_RAID_PD_STATE_DEGRADED
7896 7516 ? "degraded" :
7897 7517 state == MPI2_RAID_PD_STATE_HOT_SPARE
7898 7518 ? "a hot spare" :
7899 7519 state == MPI2_RAID_PD_STATE_ONLINE
7900 7520 ? "online" :
7901 7521 state == MPI2_RAID_PD_STATE_OFFLINE
7902 7522 ? "offline" :
7903 7523 state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
7904 7524 ? "not compatible" :
7905 7525 state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
7906 7526 ? "not configured" :
7907 7527 "state unknown");
7908 7528 break;
7909 7529 }
7910 7530 break;
7911 7531 }
7912 7532 default:
7913 7533 NDBG20(("mptsas%d: unknown event %x received",
7914 7534 mpt->m_instance, event));
7915 7535 break;
7916 7536 }
7917 7537
7918 7538 /*
7919 7539 * Return the reply frame to the free queue.
7920 7540 */
7921 7541 ddi_put32(mpt->m_acc_free_queue_hdl,
7922 7542 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm);
7923 7543 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
7924 7544 DDI_DMA_SYNC_FORDEV);
7925 7545 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
7926 7546 mpt->m_free_index = 0;
7927 7547 }
7928 7548 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
7929 7549 mpt->m_free_index);
7930 7550 mutex_exit(&mpt->m_mutex);
7931 7551 }
7932 7552
7933 7553 /*
7934 7554 * invoked from timeout() to restart qfull cmds with throttle == 0
7935 7555 */
7936 7556 static void
7937 7557 mptsas_restart_cmd(void *arg)
7938 7558 {
↓ open down ↓ |
540 lines elided |
↑ open up ↑ |
7939 7559 mptsas_t *mpt = arg;
7940 7560 mptsas_target_t *ptgt = NULL;
7941 7561
7942 7562 mutex_enter(&mpt->m_mutex);
7943 7563
7944 7564 mpt->m_restart_cmd_timeid = 0;
7945 7565
7946 7566 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
7947 7567 MPTSAS_HASH_FIRST);
7948 7568 while (ptgt != NULL) {
7949 - mutex_enter(&ptgt->m_tgt_intr_mutex);
7950 7569 if (ptgt->m_reset_delay == 0) {
7951 7570 if (ptgt->m_t_throttle == QFULL_THROTTLE) {
7952 7571 mptsas_set_throttle(mpt, ptgt,
7953 7572 MAX_THROTTLE);
7954 7573 }
7955 7574 }
7956 - mutex_exit(&ptgt->m_tgt_intr_mutex);
7957 7575
7958 7576 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
7959 7577 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
7960 7578 }
7961 7579 mptsas_restart_hba(mpt);
7962 7580 mutex_exit(&mpt->m_mutex);
7963 7581 }
7964 7582
7965 -/*
7966 - * mptsas_remove_cmd0 is similar to mptsas_remove_cmd except that it is called
7967 - * where m_intr_mutex has already been held.
7968 - */
7969 7583 void
7970 7584 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
7971 7585 {
7972 - ASSERT(mutex_owned(&mpt->m_mutex));
7973 -
7974 - /*
7975 - * With new fine-grained lock mechanism, the outstanding cmd is only
7976 - * linked to m_active before the dma is triggerred(MPTSAS_START_CMD)
7977 - * to send it. that is, mptsas_save_cmd() doesn't link the outstanding
7978 - * cmd now. So when mptsas_remove_cmd is called, a mptsas_save_cmd must
7979 - * have been called, but the cmd may have not been linked.
7980 - * For mptsas_remove_cmd0, the cmd must have been linked.
7981 - * In order to keep the same semantic, we link the cmd to the
7982 - * outstanding cmd list.
7983 - */
7984 - mpt->m_active->m_slot[cmd->cmd_slot] = cmd;
7985 -
7986 - mutex_enter(&mpt->m_intr_mutex);
7987 - mptsas_remove_cmd0(mpt, cmd);
7988 - mutex_exit(&mpt->m_intr_mutex);
7989 -}
7990 -
7991 -static inline void
7992 -mptsas_remove_cmd0(mptsas_t *mpt, mptsas_cmd_t *cmd)
7993 -{
7994 7586 int slot;
7995 7587 mptsas_slots_t *slots = mpt->m_active;
7996 7588 int t;
7997 7589 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
7998 - mptsas_slot_free_e_t *pe;
7999 7590
8000 7591 ASSERT(cmd != NULL);
8001 7592 ASSERT(cmd->cmd_queued == FALSE);
8002 7593
8003 7594 /*
8004 7595 * Task Management cmds are removed in their own routines. Also,
8005 7596 * we don't want to modify timeout based on TM cmds.
8006 7597 */
8007 7598 if (cmd->cmd_flags & CFLAG_TM_CMD) {
8008 7599 return;
8009 7600 }
8010 7601
8011 7602 t = Tgt(cmd);
8012 7603 slot = cmd->cmd_slot;
8013 - pe = mpt->m_slot_free_ae + slot - 1;
8014 - ASSERT(cmd == slots->m_slot[slot]);
8015 - ASSERT((slot > 0) && slot < (mpt->m_max_requests - 1));
8016 7604
8017 7605 /*
8018 7606 * remove the cmd.
8019 7607 */
8020 - mutex_enter(&mpt->m_slot_freeq_pairp[pe->cpuid].
8021 - m_slot_releq.s.m_fq_mutex);
8022 - NDBG31(("mptsas_remove_cmd0: removing cmd=0x%p", (void *)cmd));
8023 - slots->m_slot[slot] = NULL;
8024 - ASSERT(pe->slot == slot);
8025 - list_insert_tail(&mpt->m_slot_freeq_pairp[pe->cpuid].
8026 - m_slot_releq.s.m_fq_list, pe);
8027 - mpt->m_slot_freeq_pairp[pe->cpuid].m_slot_releq.s.m_fq_n++;
8028 - ASSERT(mpt->m_slot_freeq_pairp[pe->cpuid].
8029 - m_slot_releq.s.m_fq_n <= mpt->m_max_requests - 2);
8030 - mutex_exit(&mpt->m_slot_freeq_pairp[pe->cpuid].
8031 - m_slot_releq.s.m_fq_mutex);
7608 + if (cmd == slots->m_slot[slot]) {
7609 + NDBG31(("mptsas_remove_cmd: removing cmd=0x%p", (void *)cmd));
7610 + slots->m_slot[slot] = NULL;
7611 + mpt->m_ncmds--;
8032 7612
8033 - /*
8034 - * only decrement per target ncmds if command
8035 - * has a target associated with it.
8036 - */
8037 - if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
8038 - mutex_enter(&ptgt->m_tgt_intr_mutex);
8039 - ptgt->m_t_ncmds--;
8040 7613 /*
8041 - * reset throttle if we just ran an untagged command
8042 - * to a tagged target
7614 + * only decrement per target ncmds if command
7615 + * has a target associated with it.
8043 7616 */
8044 - if ((ptgt->m_t_ncmds == 0) &&
8045 - ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
8046 - mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7617 + if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
7618 + ptgt->m_t_ncmds--;
7619 + /*
7620 + * reset throttle if we just ran an untagged command
7621 + * to a tagged target
7622 + */
7623 + if ((ptgt->m_t_ncmds == 0) &&
7624 + ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
7625 + mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7626 + }
8047 7627 }
8048 - mutex_exit(&ptgt->m_tgt_intr_mutex);
7628 +
8049 7629 }
8050 7630
8051 7631 /*
8052 7632 * This is all we need to do for ioc commands.
8053 - * The ioc cmds would never be handled in fastpath in ISR, so we make
8054 - * sure the mptsas_return_to_pool() would always be called with
8055 - * m_mutex protected.
8056 7633 */
8057 7634 if (cmd->cmd_flags & CFLAG_CMDIOC) {
8058 - ASSERT(mutex_owned(&mpt->m_mutex));
8059 7635 mptsas_return_to_pool(mpt, cmd);
8060 7636 return;
8061 7637 }
8062 7638
8063 7639 /*
8064 7640 * Figure out what to set tag Q timeout for...
8065 7641 *
8066 7642 * Optimize: If we have duplicate's of same timeout
8067 7643 * we're using, then we'll use it again until we run
8068 7644 * out of duplicates. This should be the normal case
8069 7645 * for block and raw I/O.
8070 7646 * If no duplicates, we have to scan through tag que and
8071 7647 * find the longest timeout value and use it. This is
8072 7648 * going to take a while...
8073 7649 * Add 1 to m_n_slots to account for TM request.
8074 7650 */
8075 - mutex_enter(&ptgt->m_tgt_intr_mutex);
8076 7651 if (cmd->cmd_pkt->pkt_time == ptgt->m_timebase) {
8077 7652 if (--(ptgt->m_dups) == 0) {
8078 7653 if (ptgt->m_t_ncmds) {
8079 7654 mptsas_cmd_t *ssp;
8080 7655 uint_t n = 0;
8081 7656 ushort_t nslots = (slots->m_n_slots + 1);
8082 7657 ushort_t i;
8083 7658 /*
8084 7659 * This crude check assumes we don't do
8085 7660 * this too often which seems reasonable
8086 7661 * for block and raw I/O.
8087 7662 */
8088 7663 for (i = 0; i < nslots; i++) {
8089 7664 ssp = slots->m_slot[i];
8090 7665 if (ssp && (Tgt(ssp) == t) &&
8091 7666 (ssp->cmd_pkt->pkt_time > n)) {
8092 7667 n = ssp->cmd_pkt->pkt_time;
8093 7668 ptgt->m_dups = 1;
8094 7669 } else if (ssp && (Tgt(ssp) == t) &&
8095 7670 (ssp->cmd_pkt->pkt_time == n)) {
8096 7671 ptgt->m_dups++;
8097 7672 }
8098 7673 }
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
8099 7674 ptgt->m_timebase = n;
8100 7675 } else {
8101 7676 ptgt->m_dups = 0;
8102 7677 ptgt->m_timebase = 0;
8103 7678 }
8104 7679 }
8105 7680 }
8106 7681 ptgt->m_timeout = ptgt->m_timebase;
8107 7682
8108 7683 ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);
8109 - mutex_exit(&ptgt->m_tgt_intr_mutex);
8110 7684 }
8111 7685
8112 7686 /*
7687 + * accept all cmds on the tx_waitq if any and then
8113 7688 * start a fresh request from the top of the device queue.
7689 + *
7690 + * since there are always cmds queued on the tx_waitq, and rare cmds on
7691 + * the instance waitq, so this function should not be invoked in the ISR,
7692 + * the mptsas_restart_waitq() is invoked in the ISR instead. otherwise, the
7693 + * burden belongs to the IO dispatch CPUs is moved the interrupt CPU.
8114 7694 */
8115 7695 static void
8116 7696 mptsas_restart_hba(mptsas_t *mpt)
8117 7697 {
7698 + ASSERT(mutex_owned(&mpt->m_mutex));
7699 +
7700 + mutex_enter(&mpt->m_tx_waitq_mutex);
7701 + if (mpt->m_tx_waitq) {
7702 + mptsas_accept_tx_waitq(mpt);
7703 + }
7704 + mutex_exit(&mpt->m_tx_waitq_mutex);
7705 + mptsas_restart_waitq(mpt);
7706 +}
7707 +
7708 +/*
7709 + * start a fresh request from the top of the device queue
7710 + */
7711 +static void
7712 +mptsas_restart_waitq(mptsas_t *mpt)
7713 +{
8118 7714 mptsas_cmd_t *cmd, *next_cmd;
8119 7715 mptsas_target_t *ptgt = NULL;
8120 7716
8121 - NDBG1(("mptsas_restart_hba: mpt=0x%p", (void *)mpt));
7717 + NDBG1(("mptsas_restart_waitq: mpt=0x%p", (void *)mpt));
8122 7718
8123 7719 ASSERT(mutex_owned(&mpt->m_mutex));
8124 7720
8125 7721 /*
8126 7722 * If there is a reset delay, don't start any cmds. Otherwise, start
8127 7723 * as many cmds as possible.
8128 7724 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
8129 7725 * commands is m_max_requests - 2.
8130 7726 */
8131 7727 cmd = mpt->m_waitq;
8132 7728
8133 7729 while (cmd != NULL) {
8134 7730 next_cmd = cmd->cmd_linkp;
8135 7731 if (cmd->cmd_flags & CFLAG_PASSTHRU) {
8136 7732 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8137 7733 /*
8138 7734 * passthru command get slot need
8139 7735 * set CFLAG_PREPARED.
8140 7736 */
8141 7737 cmd->cmd_flags |= CFLAG_PREPARED;
8142 7738 mptsas_waitq_delete(mpt, cmd);
8143 7739 mptsas_start_passthru(mpt, cmd);
8144 7740 }
8145 7741 cmd = next_cmd;
8146 7742 continue;
8147 7743 }
8148 7744 if (cmd->cmd_flags & CFLAG_CONFIG) {
8149 7745 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8150 7746 /*
8151 7747 * Send the config page request and delete it
8152 7748 * from the waitq.
8153 7749 */
8154 7750 cmd->cmd_flags |= CFLAG_PREPARED;
8155 7751 mptsas_waitq_delete(mpt, cmd);
8156 7752 mptsas_start_config_page_access(mpt, cmd);
8157 7753 }
8158 7754 cmd = next_cmd;
8159 7755 continue;
8160 7756 }
8161 7757 if (cmd->cmd_flags & CFLAG_FW_DIAG) {
8162 7758 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8163 7759 /*
8164 7760 * Send the FW Diag request and delete if from
8165 7761 * the waitq.
↓ open down ↓ |
34 lines elided |
↑ open up ↑ |
8166 7762 */
8167 7763 cmd->cmd_flags |= CFLAG_PREPARED;
8168 7764 mptsas_waitq_delete(mpt, cmd);
8169 7765 mptsas_start_diag(mpt, cmd);
8170 7766 }
8171 7767 cmd = next_cmd;
8172 7768 continue;
8173 7769 }
8174 7770
8175 7771 ptgt = cmd->cmd_tgt_addr;
8176 - if (ptgt) {
8177 - mutex_enter(&mpt->m_intr_mutex);
8178 - mutex_enter(&ptgt->m_tgt_intr_mutex);
8179 - if ((ptgt->m_t_throttle == DRAIN_THROTTLE) &&
8180 - (ptgt->m_t_ncmds == 0)) {
8181 - mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7772 + if (ptgt && (ptgt->m_t_throttle == DRAIN_THROTTLE) &&
7773 + (ptgt->m_t_ncmds == 0)) {
7774 + mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
7775 + }
7776 + if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
7777 + (ptgt && (ptgt->m_reset_delay == 0)) &&
7778 + (ptgt && (ptgt->m_t_ncmds <
7779 + ptgt->m_t_throttle))) {
7780 + if (mptsas_save_cmd(mpt, cmd) == TRUE) {
7781 + mptsas_waitq_delete(mpt, cmd);
7782 + (void) mptsas_start_cmd(mpt, cmd);
8182 7783 }
8183 - if ((ptgt->m_reset_delay == 0) &&
8184 - (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
8185 - mutex_exit(&ptgt->m_tgt_intr_mutex);
8186 - mutex_exit(&mpt->m_intr_mutex);
8187 - if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8188 - mptsas_waitq_delete(mpt, cmd);
8189 - (void) mptsas_start_cmd(mpt, cmd);
8190 - }
8191 - goto out;
8192 - }
8193 - mutex_exit(&ptgt->m_tgt_intr_mutex);
8194 - mutex_exit(&mpt->m_intr_mutex);
8195 7784 }
8196 -out:
8197 7785 cmd = next_cmd;
8198 7786 }
8199 7787 }
8200 -
8201 7788 /*
8202 - * mpt tag type lookup
7789 + * Cmds are queued if tran_start() doesn't get the m_mutexlock(no wait).
7790 + * Accept all those queued cmds before new cmd is accept so that the
7791 + * cmds are sent in order.
8203 7792 */
8204 -static char mptsas_tag_lookup[] =
8205 - {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
8206 -
8207 -/*
8208 - * mptsas_start_cmd0 is similar to mptsas_start_cmd, except that, it is called
8209 - * without ANY mutex protected, while, mptsas_start_cmd is called with m_mutex
8210 - * protected.
8211 - *
8212 - * the relevant field in ptgt should be protected by m_tgt_intr_mutex in both
8213 - * functions.
8214 - *
8215 - * before the cmds are linked on the slot for monitor as outstanding cmds, they
8216 - * are accessed as slab objects, so slab framework ensures the exclusive access,
8217 - * and no other mutex is requireed. Linking for monitor and the trigger of dma
8218 - * must be done exclusively.
8219 - */
8220 -static int
8221 -mptsas_start_cmd0(mptsas_t *mpt, mptsas_cmd_t *cmd)
7793 +static void
7794 +mptsas_accept_tx_waitq(mptsas_t *mpt)
8222 7795 {
8223 - struct scsi_pkt *pkt = CMD2PKT(cmd);
8224 - uint32_t control = 0;
8225 - int n;
8226 - caddr_t mem;
8227 - pMpi2SCSIIORequest_t io_request;
8228 - ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
8229 - ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
8230 - mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8231 - uint16_t SMID, io_flags = 0;
8232 - uint32_t request_desc_low, request_desc_high;
7796 + mptsas_cmd_t *cmd;
8233 7797
8234 - NDBG1(("mptsas_start_cmd0: cmd=0x%p", (void *)cmd));
7798 + ASSERT(mutex_owned(&mpt->m_mutex));
7799 + ASSERT(mutex_owned(&mpt->m_tx_waitq_mutex));
8235 7800
8236 7801 /*
8237 - * Set SMID and increment index. Rollover to 1 instead of 0 if index
8238 - * is at the max. 0 is an invalid SMID, so we call the first index 1.
7802 + * A Bus Reset could occur at any time and flush the tx_waitq,
7803 + * so we cannot count on the tx_waitq to contain even one cmd.
7804 + * And when the m_tx_waitq_mutex is released and run
7805 + * mptsas_accept_pkt(), the tx_waitq may be flushed.
8239 7806 */
8240 - SMID = cmd->cmd_slot;
8241 -
8242 - /*
8243 - * It is possible for back to back device reset to
8244 - * happen before the reset delay has expired. That's
8245 - * ok, just let the device reset go out on the bus.
8246 - */
8247 - if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8248 - ASSERT(ptgt->m_reset_delay == 0);
8249 - }
8250 -
8251 - /*
8252 - * if a non-tagged cmd is submitted to an active tagged target
8253 - * then drain before submitting this cmd; SCSI-2 allows RQSENSE
8254 - * to be untagged
8255 - */
8256 - mutex_enter(&ptgt->m_tgt_intr_mutex);
8257 - if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
8258 - (ptgt->m_t_ncmds > 1) &&
8259 - ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
8260 - (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
8261 - if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8262 - NDBG23(("target=%d, untagged cmd, start draining\n",
8263 - ptgt->m_devhdl));
8264 -
8265 - if (ptgt->m_reset_delay == 0) {
8266 - mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
8267 - }
8268 - mutex_exit(&ptgt->m_tgt_intr_mutex);
8269 -
8270 - mutex_enter(&mpt->m_mutex);
8271 - mptsas_remove_cmd(mpt, cmd);
8272 - cmd->cmd_pkt_flags |= FLAG_HEAD;
8273 - mptsas_waitq_add(mpt, cmd);
8274 - mutex_exit(&mpt->m_mutex);
8275 - return (DDI_FAILURE);
8276 - }
8277 - mutex_exit(&ptgt->m_tgt_intr_mutex);
8278 - return (DDI_FAILURE);
8279 - }
8280 - mutex_exit(&ptgt->m_tgt_intr_mutex);
8281 -
8282 - /*
8283 - * Set correct tag bits.
8284 - */
8285 - if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
8286 - switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
8287 - FLAG_TAGMASK) >> 12)]) {
8288 - case MSG_SIMPLE_QTAG:
8289 - control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
7807 + cmd = mpt->m_tx_waitq;
7808 + for (;;) {
7809 + if ((cmd = mpt->m_tx_waitq) == NULL) {
7810 + mpt->m_tx_draining = 0;
8290 7811 break;
8291 - case MSG_HEAD_QTAG:
8292 - control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
8293 - break;
8294 - case MSG_ORDERED_QTAG:
8295 - control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
8296 - break;
8297 - default:
8298 - mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
8299 - break;
8300 7812 }
8301 - } else {
8302 - if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
8303 - ptgt->m_t_throttle = 1;
7813 + if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL) {
7814 + mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8304 7815 }
8305 - control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
7816 + cmd->cmd_linkp = NULL;
7817 + mutex_exit(&mpt->m_tx_waitq_mutex);
7818 + if (mptsas_accept_pkt(mpt, cmd) != TRAN_ACCEPT)
7819 + cmn_err(CE_WARN, "mpt: mptsas_accept_tx_waitq: failed "
7820 + "to accept cmd on queue\n");
7821 + mutex_enter(&mpt->m_tx_waitq_mutex);
8306 7822 }
7823 +}
8307 7824
8308 - if (cmd->cmd_pkt_flags & FLAG_TLR) {
8309 - control |= MPI2_SCSIIO_CONTROL_TLR_ON;
8310 - }
8311 7825
8312 - mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
8313 - io_request = (pMpi2SCSIIORequest_t)mem;
7826 +/*
7827 + * mpt tag type lookup
7828 + */
7829 +static char mptsas_tag_lookup[] =
7830 + {0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
8314 7831
8315 - bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
8316 - ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
8317 - (MPI2_SCSI_IO_REQUEST, SGL) / 4);
8318 - mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
8319 - MPI2_FUNCTION_SCSI_IO_REQUEST);
8320 -
8321 - (void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
8322 - io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
8323 -
8324 - io_flags = cmd->cmd_cdblen;
8325 - ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
8326 - /*
8327 - * setup the Scatter/Gather DMA list for this request
8328 - */
8329 - if (cmd->cmd_cookiec > 0) {
8330 - mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
8331 - } else {
8332 - ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
8333 - ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
8334 - MPI2_SGE_FLAGS_END_OF_BUFFER |
8335 - MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
8336 - MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
8337 - }
8338 -
8339 - /*
8340 - * save ARQ information
8341 - */
8342 - ddi_put8(acc_hdl, &io_request->SenseBufferLength, cmd->cmd_rqslen);
8343 - if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
8344 - (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
8345 - ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8346 - cmd->cmd_ext_arqcookie.dmac_address);
8347 - } else {
8348 - ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8349 - cmd->cmd_arqcookie.dmac_address);
8350 - }
8351 -
8352 - ddi_put32(acc_hdl, &io_request->Control, control);
8353 -
8354 - NDBG31(("starting message=0x%p, with cmd=0x%p",
8355 - (void *)(uintptr_t)mpt->m_req_frame_dma_addr, (void *)cmd));
8356 -
8357 - (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
8358 -
8359 - /*
8360 - * Build request descriptor and write it to the request desc post reg.
8361 - */
8362 - request_desc_low = (SMID << 16) + MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
8363 - request_desc_high = ptgt->m_devhdl << 16;
8364 -
8365 - mutex_enter(&mpt->m_mutex);
8366 - mpt->m_active->m_slot[cmd->cmd_slot] = cmd;
8367 - MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
8368 - mutex_exit(&mpt->m_mutex);
8369 -
8370 - /*
8371 - * Start timeout.
8372 - */
8373 - mutex_enter(&ptgt->m_tgt_intr_mutex);
8374 -#ifdef MPTSAS_TEST
8375 - /*
8376 - * Temporarily set timebase = 0; needed for
8377 - * timeout torture test.
8378 - */
8379 - if (mptsas_test_timeouts) {
8380 - ptgt->m_timebase = 0;
8381 - }
8382 -#endif
8383 - n = pkt->pkt_time - ptgt->m_timebase;
8384 -
8385 - if (n == 0) {
8386 - (ptgt->m_dups)++;
8387 - ptgt->m_timeout = ptgt->m_timebase;
8388 - } else if (n > 0) {
8389 - ptgt->m_timeout =
8390 - ptgt->m_timebase = pkt->pkt_time;
8391 - ptgt->m_dups = 1;
8392 - } else if (n < 0) {
8393 - ptgt->m_timeout = ptgt->m_timebase;
8394 - }
8395 -#ifdef MPTSAS_TEST
8396 - /*
8397 - * Set back to a number higher than
8398 - * mptsas_scsi_watchdog_tick
8399 - * so timeouts will happen in mptsas_watchsubr
8400 - */
8401 - if (mptsas_test_timeouts) {
8402 - ptgt->m_timebase = 60;
8403 - }
8404 -#endif
8405 - mutex_exit(&ptgt->m_tgt_intr_mutex);
8406 -
8407 - if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8408 - (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8409 - ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8410 - return (DDI_FAILURE);
8411 - }
8412 - return (DDI_SUCCESS);
8413 -}
8414 -
8415 7832 static int
8416 7833 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8417 7834 {
8418 7835 struct scsi_pkt *pkt = CMD2PKT(cmd);
8419 7836 uint32_t control = 0;
8420 7837 int n;
8421 7838 caddr_t mem;
8422 7839 pMpi2SCSIIORequest_t io_request;
8423 7840 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
8424 7841 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
8425 7842 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8426 7843 uint16_t SMID, io_flags = 0;
8427 7844 uint32_t request_desc_low, request_desc_high;
8428 7845
8429 7846 NDBG1(("mptsas_start_cmd: cmd=0x%p", (void *)cmd));
8430 7847
8431 7848 /*
8432 7849 * Set SMID and increment index. Rollover to 1 instead of 0 if index
8433 7850 * is at the max. 0 is an invalid SMID, so we call the first index 1.
8434 7851 */
8435 7852 SMID = cmd->cmd_slot;
8436 7853
8437 7854 /*
8438 7855 * It is possible for back to back device reset to
8439 7856 * happen before the reset delay has expired. That's
8440 7857 * ok, just let the device reset go out on the bus.
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
8441 7858 */
8442 7859 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8443 7860 ASSERT(ptgt->m_reset_delay == 0);
8444 7861 }
8445 7862
8446 7863 /*
8447 7864 * if a non-tagged cmd is submitted to an active tagged target
8448 7865 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
8449 7866 * to be untagged
8450 7867 */
8451 - mutex_enter(&ptgt->m_tgt_intr_mutex);
8452 7868 if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
8453 7869 (ptgt->m_t_ncmds > 1) &&
8454 7870 ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
8455 7871 (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
8456 7872 if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8457 7873 NDBG23(("target=%d, untagged cmd, start draining\n",
8458 7874 ptgt->m_devhdl));
8459 7875
8460 7876 if (ptgt->m_reset_delay == 0) {
8461 7877 mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
8462 7878 }
8463 - mutex_exit(&ptgt->m_tgt_intr_mutex);
8464 7879
8465 7880 mptsas_remove_cmd(mpt, cmd);
8466 7881 cmd->cmd_pkt_flags |= FLAG_HEAD;
8467 7882 mptsas_waitq_add(mpt, cmd);
8468 - return (DDI_FAILURE);
8469 7883 }
8470 - mutex_exit(&ptgt->m_tgt_intr_mutex);
8471 7884 return (DDI_FAILURE);
8472 7885 }
8473 - mutex_exit(&ptgt->m_tgt_intr_mutex);
8474 7886
8475 7887 /*
8476 7888 * Set correct tag bits.
8477 7889 */
8478 7890 if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
8479 7891 switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
8480 7892 FLAG_TAGMASK) >> 12)]) {
8481 7893 case MSG_SIMPLE_QTAG:
8482 7894 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8483 7895 break;
8484 7896 case MSG_HEAD_QTAG:
8485 7897 control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
8486 7898 break;
8487 7899 case MSG_ORDERED_QTAG:
8488 7900 control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
8489 7901 break;
8490 7902 default:
8491 7903 mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
8492 7904 break;
8493 7905 }
8494 7906 } else {
8495 7907 if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
8496 7908 ptgt->m_t_throttle = 1;
8497 7909 }
8498 7910 control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8499 7911 }
8500 7912
8501 7913 if (cmd->cmd_pkt_flags & FLAG_TLR) {
8502 7914 control |= MPI2_SCSIIO_CONTROL_TLR_ON;
8503 7915 }
8504 7916
8505 7917 mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
8506 7918 io_request = (pMpi2SCSIIORequest_t)mem;
8507 7919
8508 7920 bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
8509 7921 ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
8510 7922 (MPI2_SCSI_IO_REQUEST, SGL) / 4);
8511 7923 mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
8512 7924 MPI2_FUNCTION_SCSI_IO_REQUEST);
8513 7925
8514 7926 (void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
8515 7927 io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
8516 7928
8517 7929 io_flags = cmd->cmd_cdblen;
8518 7930 ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
8519 7931 /*
8520 7932 * setup the Scatter/Gather DMA list for this request
8521 7933 */
8522 7934 if (cmd->cmd_cookiec > 0) {
8523 7935 mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
8524 7936 } else {
8525 7937 ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
8526 7938 ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
8527 7939 MPI2_SGE_FLAGS_END_OF_BUFFER |
8528 7940 MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
8529 7941 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
8530 7942 }
8531 7943
8532 7944 /*
8533 7945 * save ARQ information
8534 7946 */
8535 7947 ddi_put8(acc_hdl, &io_request->SenseBufferLength, cmd->cmd_rqslen);
8536 7948 if ((cmd->cmd_flags & (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) ==
8537 7949 (CFLAG_SCBEXTERN | CFLAG_EXTARQBUFVALID)) {
8538 7950 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8539 7951 cmd->cmd_ext_arqcookie.dmac_address);
8540 7952 } else {
8541 7953 ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress,
8542 7954 cmd->cmd_arqcookie.dmac_address);
8543 7955 }
8544 7956
8545 7957 ddi_put32(acc_hdl, &io_request->Control, control);
8546 7958
↓ open down ↓ |
63 lines elided |
↑ open up ↑ |
8547 7959 NDBG31(("starting message=0x%p, with cmd=0x%p",
8548 7960 (void *)(uintptr_t)mpt->m_req_frame_dma_addr, (void *)cmd));
8549 7961
8550 7962 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
8551 7963
8552 7964 /*
8553 7965 * Build request descriptor and write it to the request desc post reg.
8554 7966 */
8555 7967 request_desc_low = (SMID << 16) + MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
8556 7968 request_desc_high = ptgt->m_devhdl << 16;
8557 -
8558 - mpt->m_active->m_slot[cmd->cmd_slot] = cmd;
8559 7969 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
8560 7970
8561 7971 /*
8562 7972 * Start timeout.
8563 7973 */
8564 - mutex_enter(&ptgt->m_tgt_intr_mutex);
8565 7974 #ifdef MPTSAS_TEST
8566 7975 /*
8567 7976 * Temporarily set timebase = 0; needed for
8568 7977 * timeout torture test.
8569 7978 */
8570 7979 if (mptsas_test_timeouts) {
8571 7980 ptgt->m_timebase = 0;
8572 7981 }
8573 7982 #endif
8574 7983 n = pkt->pkt_time - ptgt->m_timebase;
8575 7984
8576 7985 if (n == 0) {
8577 7986 (ptgt->m_dups)++;
8578 7987 ptgt->m_timeout = ptgt->m_timebase;
8579 7988 } else if (n > 0) {
8580 7989 ptgt->m_timeout =
8581 7990 ptgt->m_timebase = pkt->pkt_time;
8582 7991 ptgt->m_dups = 1;
8583 7992 } else if (n < 0) {
8584 7993 ptgt->m_timeout = ptgt->m_timebase;
8585 7994 }
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
8586 7995 #ifdef MPTSAS_TEST
8587 7996 /*
8588 7997 * Set back to a number higher than
8589 7998 * mptsas_scsi_watchdog_tick
8590 7999 * so timeouts will happen in mptsas_watchsubr
8591 8000 */
8592 8001 if (mptsas_test_timeouts) {
8593 8002 ptgt->m_timebase = 60;
8594 8003 }
8595 8004 #endif
8596 - mutex_exit(&ptgt->m_tgt_intr_mutex);
8597 8005
8598 8006 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8599 8007 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8600 8008 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8601 8009 return (DDI_FAILURE);
8602 8010 }
8603 8011 return (DDI_SUCCESS);
8604 8012 }
8605 8013
8606 8014 /*
8607 8015 * Select a helper thread to handle current doneq
8608 8016 */
8609 8017 static void
8610 8018 mptsas_deliver_doneq_thread(mptsas_t *mpt)
8611 8019 {
8612 8020 uint64_t t, i;
8613 8021 uint32_t min = 0xffffffff;
8614 8022 mptsas_doneq_thread_list_t *item;
8615 8023
8616 8024 for (i = 0; i < mpt->m_doneq_thread_n; i++) {
8617 8025 item = &mpt->m_doneq_thread_id[i];
8618 8026 /*
8619 8027 * If the completed command on help thread[i] less than
8620 8028 * doneq_thread_threshold, then pick the thread[i]. Otherwise
8621 8029 * pick a thread which has least completed command.
8622 8030 */
8623 8031
8624 8032 mutex_enter(&item->mutex);
8625 8033 if (item->len < mpt->m_doneq_thread_threshold) {
8626 8034 t = i;
8627 8035 mutex_exit(&item->mutex);
8628 8036 break;
8629 8037 }
8630 8038 if (item->len < min) {
8631 8039 min = item->len;
8632 8040 t = i;
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
8633 8041 }
8634 8042 mutex_exit(&item->mutex);
8635 8043 }
8636 8044 mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
8637 8045 mptsas_doneq_mv(mpt, t);
8638 8046 cv_signal(&mpt->m_doneq_thread_id[t].cv);
8639 8047 mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
8640 8048 }
8641 8049
8642 8050 /*
8643 - * move the current global doneq to the doneq of thread[t]
8051 + * move the current global doneq to the doneq of thead[t]
8644 8052 */
8645 8053 static void
8646 8054 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
8647 8055 {
8648 8056 mptsas_cmd_t *cmd;
8649 8057 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8650 8058
8651 8059 ASSERT(mutex_owned(&item->mutex));
8652 - mutex_enter(&mpt->m_intr_mutex);
8653 8060 while ((cmd = mpt->m_doneq) != NULL) {
8654 8061 if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
8655 8062 mpt->m_donetail = &mpt->m_doneq;
8656 8063 }
8657 8064 cmd->cmd_linkp = NULL;
8658 8065 *item->donetail = cmd;
8659 8066 item->donetail = &cmd->cmd_linkp;
8660 8067 mpt->m_doneq_len--;
8661 8068 item->len++;
8662 8069 }
8663 - mutex_exit(&mpt->m_intr_mutex);
8664 8070 }
8665 8071
8666 8072 void
8667 8073 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
8668 8074 {
8669 8075 struct scsi_pkt *pkt = CMD2PKT(cmd);
8670 8076
8671 8077 /* Check all acc and dma handles */
8672 8078 if ((mptsas_check_acc_handle(mpt->m_datap) !=
8673 8079 DDI_SUCCESS) ||
8674 8080 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
8675 8081 DDI_SUCCESS) ||
8676 8082 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
8677 8083 DDI_SUCCESS) ||
8678 8084 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
8679 8085 DDI_SUCCESS) ||
8680 8086 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
8681 8087 DDI_SUCCESS) ||
8682 8088 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
8683 8089 DDI_SUCCESS) ||
8684 8090 (mptsas_check_acc_handle(mpt->m_config_handle) !=
8685 8091 DDI_SUCCESS)) {
8686 8092 ddi_fm_service_impact(mpt->m_dip,
8687 8093 DDI_SERVICE_UNAFFECTED);
8688 8094 ddi_fm_acc_err_clear(mpt->m_config_handle,
8689 8095 DDI_FME_VER0);
8690 8096 pkt->pkt_reason = CMD_TRAN_ERR;
8691 8097 pkt->pkt_statistics = 0;
8692 8098 }
8693 8099 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
8694 8100 DDI_SUCCESS) ||
8695 8101 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
8696 8102 DDI_SUCCESS) ||
8697 8103 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
8698 8104 DDI_SUCCESS) ||
8699 8105 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
8700 8106 DDI_SUCCESS) ||
8701 8107 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
8702 8108 DDI_SUCCESS)) {
8703 8109 ddi_fm_service_impact(mpt->m_dip,
8704 8110 DDI_SERVICE_UNAFFECTED);
8705 8111 pkt->pkt_reason = CMD_TRAN_ERR;
8706 8112 pkt->pkt_statistics = 0;
8707 8113 }
8708 8114 if (cmd->cmd_dmahandle &&
8709 8115 (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
8710 8116 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8711 8117 pkt->pkt_reason = CMD_TRAN_ERR;
8712 8118 pkt->pkt_statistics = 0;
8713 8119 }
8714 8120 if ((cmd->cmd_extra_frames &&
8715 8121 ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
8716 8122 DDI_SUCCESS) ||
8717 8123 (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
8718 8124 DDI_SUCCESS)))) {
8719 8125 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8720 8126 pkt->pkt_reason = CMD_TRAN_ERR;
8721 8127 pkt->pkt_statistics = 0;
8722 8128 }
8723 8129 if (cmd->cmd_arqhandle &&
8724 8130 (mptsas_check_dma_handle(cmd->cmd_arqhandle) != DDI_SUCCESS)) {
8725 8131 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8726 8132 pkt->pkt_reason = CMD_TRAN_ERR;
8727 8133 pkt->pkt_statistics = 0;
↓ open down ↓ |
54 lines elided |
↑ open up ↑ |
8728 8134 }
8729 8135 if (cmd->cmd_ext_arqhandle &&
8730 8136 (mptsas_check_dma_handle(cmd->cmd_ext_arqhandle) != DDI_SUCCESS)) {
8731 8137 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8732 8138 pkt->pkt_reason = CMD_TRAN_ERR;
8733 8139 pkt->pkt_statistics = 0;
8734 8140 }
8735 8141 }
8736 8142
8737 8143 /*
8738 - * mptsas_doneq_add0 is similar to mptsas_doneq_add except that it is called
8739 - * where m_intr_mutex has already been held.
8144 + * These routines manipulate the queue of commands that
8145 + * are waiting for their completion routines to be called.
8146 + * The queue is usually in FIFO order but on an MP system
8147 + * it's possible for the completion routines to get out
8148 + * of order. If that's a problem you need to add a global
8149 + * mutex around the code that calls the completion routine
8150 + * in the interrupt handler.
8740 8151 */
8741 -static inline void
8742 -mptsas_doneq_add0(mptsas_t *mpt, mptsas_cmd_t *cmd)
8152 +static void
8153 +mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8743 8154 {
8744 8155 struct scsi_pkt *pkt = CMD2PKT(cmd);
8745 8156
8746 - NDBG31(("mptsas_doneq_add0: cmd=0x%p", (void *)cmd));
8157 + NDBG31(("mptsas_doneq_add: cmd=0x%p", (void *)cmd));
8747 8158
8748 8159 ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
8749 8160 cmd->cmd_linkp = NULL;
8750 8161 cmd->cmd_flags |= CFLAG_FINISHED;
8751 8162 cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
8752 8163
8164 + mptsas_fma_check(mpt, cmd);
8165 +
8753 8166 /*
8754 8167 * only add scsi pkts that have completion routines to
8755 8168 * the doneq. no intr cmds do not have callbacks.
8756 8169 */
8757 8170 if (pkt && (pkt->pkt_comp)) {
8758 8171 *mpt->m_donetail = cmd;
8759 8172 mpt->m_donetail = &cmd->cmd_linkp;
8760 8173 mpt->m_doneq_len++;
8761 8174 }
8762 8175 }
8763 8176
8764 -/*
8765 - * These routines manipulate the queue of commands that
8766 - * are waiting for their completion routines to be called.
8767 - * The queue is usually in FIFO order but on an MP system
8768 - * it's possible for the completion routines to get out
8769 - * of order. If that's a problem you need to add a global
8770 - * mutex around the code that calls the completion routine
8771 - * in the interrupt handler.
8772 - */
8773 -static void
8774 -mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8775 -{
8776 - ASSERT(mutex_owned(&mpt->m_mutex));
8777 -
8778 - mptsas_fma_check(mpt, cmd);
8779 -
8780 - mutex_enter(&mpt->m_intr_mutex);
8781 - mptsas_doneq_add0(mpt, cmd);
8782 - mutex_exit(&mpt->m_intr_mutex);
8783 -}
8784 -
8785 8177 static mptsas_cmd_t *
8786 8178 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
8787 8179 {
8788 8180 mptsas_cmd_t *cmd;
8789 8181 mptsas_doneq_thread_list_t *item = &mpt->m_doneq_thread_id[t];
8790 8182
8791 8183 /* pop one off the done queue */
8792 8184 if ((cmd = item->doneq) != NULL) {
8793 8185 /* if the queue is now empty fix the tail pointer */
8794 8186 NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
8795 8187 if ((item->doneq = cmd->cmd_linkp) == NULL) {
8796 8188 item->donetail = &item->doneq;
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
8797 8189 }
8798 8190 cmd->cmd_linkp = NULL;
8799 8191 item->len--;
8800 8192 }
8801 8193 return (cmd);
8802 8194 }
8803 8195
8804 8196 static void
8805 8197 mptsas_doneq_empty(mptsas_t *mpt)
8806 8198 {
8807 - mutex_enter(&mpt->m_intr_mutex);
8808 8199 if (mpt->m_doneq && !mpt->m_in_callback) {
8809 8200 mptsas_cmd_t *cmd, *next;
8810 8201 struct scsi_pkt *pkt;
8811 8202
8812 8203 mpt->m_in_callback = 1;
8813 8204 cmd = mpt->m_doneq;
8814 8205 mpt->m_doneq = NULL;
8815 8206 mpt->m_donetail = &mpt->m_doneq;
8816 8207 mpt->m_doneq_len = 0;
8817 8208
8818 - mutex_exit(&mpt->m_intr_mutex);
8819 -
8209 + mutex_exit(&mpt->m_mutex);
8820 8210 /*
8821 - * ONLY in ISR, is it called without m_mutex held, otherwise,
8822 - * it is always called with m_mutex held.
8823 - */
8824 - if ((curthread->t_flag & T_INTR_THREAD) == 0)
8825 - mutex_exit(&mpt->m_mutex);
8826 - /*
8827 8211 * run the completion routines of all the
8828 8212 * completed commands
8829 8213 */
8830 8214 while (cmd != NULL) {
8831 8215 next = cmd->cmd_linkp;
8832 8216 cmd->cmd_linkp = NULL;
8833 8217 /* run this command's completion routine */
8834 8218 cmd->cmd_flags |= CFLAG_COMPLETED;
8835 8219 pkt = CMD2PKT(cmd);
8836 8220 mptsas_pkt_comp(pkt, cmd);
8837 8221 cmd = next;
8838 8222 }
8839 - if ((curthread->t_flag & T_INTR_THREAD) == 0)
8840 - mutex_enter(&mpt->m_mutex);
8223 + mutex_enter(&mpt->m_mutex);
8841 8224 mpt->m_in_callback = 0;
8842 - return;
8843 8225 }
8844 - mutex_exit(&mpt->m_intr_mutex);
8845 8226 }
8846 8227
8847 8228 /*
8848 8229 * These routines manipulate the target's queue of pending requests
8849 8230 */
8850 8231 void
8851 8232 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
8852 8233 {
8853 8234 NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
8854 8235 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8855 8236 cmd->cmd_queued = TRUE;
8856 8237 if (ptgt)
8857 8238 ptgt->m_t_nwait++;
8858 8239 if (cmd->cmd_pkt_flags & FLAG_HEAD) {
8859 - mutex_enter(&mpt->m_intr_mutex);
8860 8240 if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
8861 8241 mpt->m_waitqtail = &cmd->cmd_linkp;
8862 8242 }
8863 8243 mpt->m_waitq = cmd;
8864 - mutex_exit(&mpt->m_intr_mutex);
8865 8244 } else {
8866 8245 cmd->cmd_linkp = NULL;
8867 8246 *(mpt->m_waitqtail) = cmd;
8868 8247 mpt->m_waitqtail = &cmd->cmd_linkp;
8869 8248 }
8870 8249 }
8871 8250
8872 8251 static mptsas_cmd_t *
8873 8252 mptsas_waitq_rm(mptsas_t *mpt)
8874 8253 {
8875 8254 mptsas_cmd_t *cmd;
8876 8255 mptsas_target_t *ptgt;
8877 8256 NDBG7(("mptsas_waitq_rm"));
8878 8257
8879 - mutex_enter(&mpt->m_intr_mutex);
8880 8258 MPTSAS_WAITQ_RM(mpt, cmd);
8881 - mutex_exit(&mpt->m_intr_mutex);
8882 8259
8883 8260 NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
8884 8261 if (cmd) {
8885 8262 ptgt = cmd->cmd_tgt_addr;
8886 8263 if (ptgt) {
8887 8264 ptgt->m_t_nwait--;
8888 8265 ASSERT(ptgt->m_t_nwait >= 0);
8889 8266 }
8890 8267 }
8891 8268 return (cmd);
8892 8269 }
8893 8270
8894 8271 /*
8895 8272 * remove specified cmd from the middle of the wait queue.
8896 8273 */
8897 8274 static void
8898 8275 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8899 8276 {
8900 8277 mptsas_cmd_t *prevp = mpt->m_waitq;
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
8901 8278 mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
8902 8279
8903 8280 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8904 8281 (void *)mpt, (void *)cmd));
8905 8282 if (ptgt) {
8906 8283 ptgt->m_t_nwait--;
8907 8284 ASSERT(ptgt->m_t_nwait >= 0);
8908 8285 }
8909 8286
8910 8287 if (prevp == cmd) {
8911 - mutex_enter(&mpt->m_intr_mutex);
8912 8288 if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
8913 8289 mpt->m_waitqtail = &mpt->m_waitq;
8914 - mutex_exit(&mpt->m_intr_mutex);
8915 8290
8916 8291 cmd->cmd_linkp = NULL;
8917 8292 cmd->cmd_queued = FALSE;
8918 8293 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8919 8294 (void *)mpt, (void *)cmd));
8920 8295 return;
8921 8296 }
8922 8297
8923 8298 while (prevp != NULL) {
8924 8299 if (prevp->cmd_linkp == cmd) {
8925 8300 if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8926 8301 mpt->m_waitqtail = &prevp->cmd_linkp;
8927 8302
8928 8303 cmd->cmd_linkp = NULL;
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
8929 8304 cmd->cmd_queued = FALSE;
8930 8305 NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
8931 8306 (void *)mpt, (void *)cmd));
8932 8307 return;
8933 8308 }
8934 8309 prevp = prevp->cmd_linkp;
8935 8310 }
8936 8311 cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
8937 8312 }
8938 8313
8314 +static mptsas_cmd_t *
8315 +mptsas_tx_waitq_rm(mptsas_t *mpt)
8316 +{
8317 + mptsas_cmd_t *cmd;
8318 + NDBG7(("mptsas_tx_waitq_rm"));
8319 +
8320 + MPTSAS_TX_WAITQ_RM(mpt, cmd);
8321 +
8322 + NDBG7(("mptsas_tx_waitq_rm: cmd=0x%p", (void *)cmd));
8323 +
8324 + return (cmd);
8325 +}
8326 +
8939 8327 /*
8328 + * remove specified cmd from the middle of the tx_waitq.
8329 + */
8330 +static void
8331 +mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
8332 +{
8333 + mptsas_cmd_t *prevp = mpt->m_tx_waitq;
8334 +
8335 + NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8336 + (void *)mpt, (void *)cmd));
8337 +
8338 + if (prevp == cmd) {
8339 + if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL)
8340 + mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8341 +
8342 + cmd->cmd_linkp = NULL;
8343 + cmd->cmd_queued = FALSE;
8344 + NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8345 + (void *)mpt, (void *)cmd));
8346 + return;
8347 + }
8348 +
8349 + while (prevp != NULL) {
8350 + if (prevp->cmd_linkp == cmd) {
8351 + if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
8352 + mpt->m_tx_waitqtail = &prevp->cmd_linkp;
8353 +
8354 + cmd->cmd_linkp = NULL;
8355 + cmd->cmd_queued = FALSE;
8356 + NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
8357 + (void *)mpt, (void *)cmd));
8358 + return;
8359 + }
8360 + prevp = prevp->cmd_linkp;
8361 + }
8362 + cmn_err(CE_PANIC, "mpt: mptsas_tx_waitq_delete: queue botch");
8363 +}
8364 +
8365 +/*
8940 8366 * device and bus reset handling
8941 8367 *
8942 8368 * Notes:
8943 8369 * - RESET_ALL: reset the controller
8944 8370 * - RESET_TARGET: reset the target specified in scsi_address
8945 8371 */
8946 8372 static int
8947 8373 mptsas_scsi_reset(struct scsi_address *ap, int level)
8948 8374 {
8949 8375 mptsas_t *mpt = ADDR2MPT(ap);
8950 8376 int rval;
8951 8377 mptsas_tgt_private_t *tgt_private;
8952 8378 mptsas_target_t *ptgt = NULL;
8953 8379
8954 8380 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
8955 8381 ptgt = tgt_private->t_private;
8956 8382 if (ptgt == NULL) {
8957 8383 return (FALSE);
8958 8384 }
8959 8385 NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,
8960 8386 level));
8961 8387
8962 8388 mutex_enter(&mpt->m_mutex);
8963 8389 /*
8964 8390 * if we are not in panic set up a reset delay for this target
8965 8391 */
8966 8392 if (!ddi_in_panic()) {
8967 8393 mptsas_setup_bus_reset_delay(mpt);
8968 8394 } else {
8969 8395 drv_usecwait(mpt->m_scsi_reset_delay * 1000);
8970 8396 }
8971 8397 rval = mptsas_do_scsi_reset(mpt, ptgt->m_devhdl);
8972 8398 mutex_exit(&mpt->m_mutex);
8973 8399
8974 8400 /*
8975 8401 * The transport layer expect to only see TRUE and
8976 8402 * FALSE. Therefore, we will adjust the return value
8977 8403 * if mptsas_do_scsi_reset returns FAILED.
8978 8404 */
8979 8405 if (rval == FAILED)
8980 8406 rval = FALSE;
8981 8407 return (rval);
8982 8408 }
8983 8409
8984 8410 static int
8985 8411 mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl)
8986 8412 {
8987 8413 int rval = FALSE;
8988 8414 uint8_t config, disk;
8989 8415 mptsas_slots_t *slots = mpt->m_active;
8990 8416
8991 8417 ASSERT(mutex_owned(&mpt->m_mutex));
8992 8418
8993 8419 if (mptsas_debug_resets) {
8994 8420 mptsas_log(mpt, CE_WARN, "mptsas_do_scsi_reset: target=%d",
8995 8421 devhdl);
8996 8422 }
8997 8423
8998 8424 /*
8999 8425 * Issue a Target Reset message to the target specified but not to a
9000 8426 * disk making up a raid volume. Just look through the RAID config
9001 8427 * Phys Disk list of DevHandles. If the target's DevHandle is in this
9002 8428 * list, then don't reset this target.
9003 8429 */
9004 8430 for (config = 0; config < slots->m_num_raid_configs; config++) {
9005 8431 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
9006 8432 if (devhdl == slots->m_raidconfig[config].
9007 8433 m_physdisk_devhdl[disk]) {
9008 8434 return (TRUE);
9009 8435 }
9010 8436 }
9011 8437 }
9012 8438
9013 8439 rval = mptsas_ioc_task_management(mpt,
9014 8440 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, devhdl, 0, NULL, 0, 0);
9015 8441
9016 8442 mptsas_doneq_empty(mpt);
9017 8443 return (rval);
9018 8444 }
9019 8445
9020 8446 static int
9021 8447 mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
9022 8448 void (*callback)(caddr_t), caddr_t arg)
9023 8449 {
9024 8450 mptsas_t *mpt = ADDR2MPT(ap);
9025 8451
9026 8452 NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap->a_target));
9027 8453
9028 8454 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
9029 8455 &mpt->m_mutex, &mpt->m_reset_notify_listf));
9030 8456 }
9031 8457
9032 8458 static int
9033 8459 mptsas_get_name(struct scsi_device *sd, char *name, int len)
9034 8460 {
9035 8461 dev_info_t *lun_dip = NULL;
9036 8462
9037 8463 ASSERT(sd != NULL);
9038 8464 ASSERT(name != NULL);
9039 8465 lun_dip = sd->sd_dev;
9040 8466 ASSERT(lun_dip != NULL);
9041 8467
9042 8468 if (mptsas_name_child(lun_dip, name, len) == DDI_SUCCESS) {
9043 8469 return (1);
9044 8470 } else {
9045 8471 return (0);
9046 8472 }
9047 8473 }
9048 8474
9049 8475 static int
9050 8476 mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len)
9051 8477 {
9052 8478 return (mptsas_get_name(sd, name, len));
9053 8479 }
9054 8480
9055 8481 void
9056 8482 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
9057 8483 {
9058 8484
9059 8485 NDBG25(("mptsas_set_throttle: throttle=%x", what));
9060 8486
9061 8487 /*
9062 8488 * if the bus is draining/quiesced, no changes to the throttles
9063 8489 * are allowed. Not allowing change of throttles during draining
9064 8490 * limits error recovery but will reduce draining time
9065 8491 *
9066 8492 * all throttles should have been set to HOLD_THROTTLE
9067 8493 */
9068 8494 if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
9069 8495 return;
9070 8496 }
9071 8497
9072 8498 if (what == HOLD_THROTTLE) {
9073 8499 ptgt->m_t_throttle = HOLD_THROTTLE;
9074 8500 } else if (ptgt->m_reset_delay == 0) {
9075 8501 ptgt->m_t_throttle = what;
9076 8502 }
9077 8503 }
9078 8504
9079 8505 /*
9080 8506 * Clean up from a device reset.
9081 8507 * For the case of target reset, this function clears the waitq of all
9082 8508 * commands for a particular target. For the case of abort task set, this
9083 8509 * function clears the waitq of all commonds for a particular target/lun.
9084 8510 */
9085 8511 static void
9086 8512 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
9087 8513 {
9088 8514 mptsas_slots_t *slots = mpt->m_active;
9089 8515 mptsas_cmd_t *cmd, *next_cmd;
9090 8516 int slot;
9091 8517 uchar_t reason;
↓ open down ↓ |
142 lines elided |
↑ open up ↑ |
9092 8518 uint_t stat;
9093 8519
9094 8520 NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
9095 8521
9096 8522 /*
9097 8523 * Make sure the I/O Controller has flushed all cmds
9098 8524 * that are associated with this target for a target reset
9099 8525 * and target/lun for abort task set.
9100 8526 * Account for TM requests, which use the last SMID.
9101 8527 */
9102 - mutex_enter(&mpt->m_intr_mutex);
9103 8528 for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
9104 - if ((cmd = slots->m_slot[slot]) == NULL) {
8529 + if ((cmd = slots->m_slot[slot]) == NULL)
9105 8530 continue;
9106 - }
9107 8531 reason = CMD_RESET;
9108 8532 stat = STAT_DEV_RESET;
9109 8533 switch (tasktype) {
9110 8534 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
9111 8535 if (Tgt(cmd) == target) {
8536 + if (cmd->cmd_tgt_addr->m_timeout < 0) {
8537 + /*
8538 + * When timeout requested, propagate
8539 + * proper reason and statistics to
8540 + * target drivers.
8541 + */
8542 + reason = CMD_TIMEOUT;
8543 + stat |= STAT_TIMEOUT;
8544 + }
9112 8545 NDBG25(("mptsas_flush_target discovered non-"
9113 8546 "NULL cmd in slot %d, tasktype 0x%x", slot,
9114 8547 tasktype));
9115 8548 mptsas_dump_cmd(mpt, cmd);
9116 - mptsas_remove_cmd0(mpt, cmd);
8549 + mptsas_remove_cmd(mpt, cmd);
9117 8550 mptsas_set_pkt_reason(mpt, cmd, reason, stat);
9118 - mptsas_doneq_add0(mpt, cmd);
8551 + mptsas_doneq_add(mpt, cmd);
9119 8552 }
9120 8553 break;
9121 8554 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9122 8555 reason = CMD_ABORTED;
9123 8556 stat = STAT_ABORTED;
9124 8557 /*FALLTHROUGH*/
9125 8558 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9126 8559 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9127 8560
9128 8561 NDBG25(("mptsas_flush_target discovered non-"
9129 8562 "NULL cmd in slot %d, tasktype 0x%x", slot,
9130 8563 tasktype));
9131 8564 mptsas_dump_cmd(mpt, cmd);
9132 - mptsas_remove_cmd0(mpt, cmd);
8565 + mptsas_remove_cmd(mpt, cmd);
9133 8566 mptsas_set_pkt_reason(mpt, cmd, reason,
9134 8567 stat);
9135 - mptsas_doneq_add0(mpt, cmd);
8568 + mptsas_doneq_add(mpt, cmd);
9136 8569 }
9137 8570 break;
9138 8571 default:
9139 8572 break;
9140 8573 }
9141 8574 }
9142 - mutex_exit(&mpt->m_intr_mutex);
9143 8575
9144 8576 /*
9145 - * Flush the waitq of this target's cmds
8577 + * Flush the waitq and tx_waitq of this target's cmds
9146 8578 */
9147 8579 cmd = mpt->m_waitq;
9148 8580
9149 8581 reason = CMD_RESET;
9150 8582 stat = STAT_DEV_RESET;
9151 8583
9152 8584 switch (tasktype) {
9153 8585 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
9154 8586 while (cmd != NULL) {
9155 8587 next_cmd = cmd->cmd_linkp;
9156 8588 if (Tgt(cmd) == target) {
9157 8589 mptsas_waitq_delete(mpt, cmd);
9158 8590 mptsas_set_pkt_reason(mpt, cmd,
9159 8591 reason, stat);
9160 8592 mptsas_doneq_add(mpt, cmd);
9161 8593 }
9162 8594 cmd = next_cmd;
9163 8595 }
8596 + mutex_enter(&mpt->m_tx_waitq_mutex);
8597 + cmd = mpt->m_tx_waitq;
8598 + while (cmd != NULL) {
8599 + next_cmd = cmd->cmd_linkp;
8600 + if (Tgt(cmd) == target) {
8601 + mptsas_tx_waitq_delete(mpt, cmd);
8602 + mutex_exit(&mpt->m_tx_waitq_mutex);
8603 + mptsas_set_pkt_reason(mpt, cmd,
8604 + reason, stat);
8605 + mptsas_doneq_add(mpt, cmd);
8606 + mutex_enter(&mpt->m_tx_waitq_mutex);
8607 + }
8608 + cmd = next_cmd;
8609 + }
8610 + mutex_exit(&mpt->m_tx_waitq_mutex);
9164 8611 break;
9165 8612 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9166 8613 reason = CMD_ABORTED;
9167 8614 stat = STAT_ABORTED;
9168 8615 /*FALLTHROUGH*/
9169 8616 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9170 8617 while (cmd != NULL) {
9171 8618 next_cmd = cmd->cmd_linkp;
9172 8619 if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9173 8620 mptsas_waitq_delete(mpt, cmd);
9174 8621 mptsas_set_pkt_reason(mpt, cmd,
9175 8622 reason, stat);
9176 8623 mptsas_doneq_add(mpt, cmd);
9177 8624 }
9178 8625 cmd = next_cmd;
9179 8626 }
8627 + mutex_enter(&mpt->m_tx_waitq_mutex);
8628 + cmd = mpt->m_tx_waitq;
8629 + while (cmd != NULL) {
8630 + next_cmd = cmd->cmd_linkp;
8631 + if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
8632 + mptsas_tx_waitq_delete(mpt, cmd);
8633 + mutex_exit(&mpt->m_tx_waitq_mutex);
8634 + mptsas_set_pkt_reason(mpt, cmd,
8635 + reason, stat);
8636 + mptsas_doneq_add(mpt, cmd);
8637 + mutex_enter(&mpt->m_tx_waitq_mutex);
8638 + }
8639 + cmd = next_cmd;
8640 + }
8641 + mutex_exit(&mpt->m_tx_waitq_mutex);
9180 8642 break;
9181 8643 default:
9182 8644 mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
9183 8645 tasktype);
9184 8646 break;
9185 8647 }
9186 8648 }
9187 8649
9188 8650 /*
9189 8651 * Clean up hba state, abort all outstanding command and commands in waitq
9190 8652 * reset timeout of all targets.
9191 8653 */
9192 8654 static void
9193 8655 mptsas_flush_hba(mptsas_t *mpt)
9194 8656 {
9195 8657 mptsas_slots_t *slots = mpt->m_active;
9196 8658 mptsas_cmd_t *cmd;
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
9197 8659 int slot;
9198 8660
9199 8661 NDBG25(("mptsas_flush_hba"));
9200 8662
9201 8663 /*
9202 8664 * The I/O Controller should have already sent back
9203 8665 * all commands via the scsi I/O reply frame. Make
9204 8666 * sure all commands have been flushed.
9205 8667 * Account for TM request, which use the last SMID.
9206 8668 */
9207 - mutex_enter(&mpt->m_intr_mutex);
9208 8669 for (slot = 0; slot <= mpt->m_active->m_n_slots; slot++) {
9209 - if ((cmd = slots->m_slot[slot]) == NULL) {
8670 + if ((cmd = slots->m_slot[slot]) == NULL)
9210 8671 continue;
9211 - }
9212 8672
9213 8673 if (cmd->cmd_flags & CFLAG_CMDIOC) {
9214 8674 /*
9215 8675 * Need to make sure to tell everyone that might be
9216 8676 * waiting on this command that it's going to fail. If
9217 8677 * we get here, this command will never timeout because
9218 8678 * the active command table is going to be re-allocated,
9219 8679 * so there will be nothing to check against a time out.
9220 8680 * Instead, mark the command as failed due to reset.
9221 8681 */
9222 8682 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
9223 8683 STAT_BUS_RESET);
9224 8684 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9225 8685 (cmd->cmd_flags & CFLAG_CONFIG) ||
9226 8686 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9227 8687 cmd->cmd_flags |= CFLAG_FINISHED;
9228 8688 cv_broadcast(&mpt->m_passthru_cv);
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
9229 8689 cv_broadcast(&mpt->m_config_cv);
9230 8690 cv_broadcast(&mpt->m_fw_diag_cv);
9231 8691 }
9232 8692 continue;
9233 8693 }
9234 8694
9235 8695 NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
9236 8696 slot));
9237 8697 mptsas_dump_cmd(mpt, cmd);
9238 8698
9239 - mptsas_remove_cmd0(mpt, cmd);
8699 + mptsas_remove_cmd(mpt, cmd);
9240 8700 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9241 - mptsas_doneq_add0(mpt, cmd);
8701 + mptsas_doneq_add(mpt, cmd);
9242 8702 }
9243 - mutex_exit(&mpt->m_intr_mutex);
9244 8703
9245 8704 /*
9246 8705 * Flush the waitq.
9247 8706 */
9248 8707 while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
9249 8708 mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9250 8709 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9251 8710 (cmd->cmd_flags & CFLAG_CONFIG) ||
9252 8711 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9253 8712 cmd->cmd_flags |= CFLAG_FINISHED;
9254 8713 cv_broadcast(&mpt->m_passthru_cv);
9255 8714 cv_broadcast(&mpt->m_config_cv);
9256 8715 cv_broadcast(&mpt->m_fw_diag_cv);
9257 8716 } else {
9258 8717 mptsas_doneq_add(mpt, cmd);
9259 8718 }
9260 8719 }
8720 +
8721 + /*
8722 + * Flush the tx_waitq
8723 + */
8724 + mutex_enter(&mpt->m_tx_waitq_mutex);
8725 + while ((cmd = mptsas_tx_waitq_rm(mpt)) != NULL) {
8726 + mutex_exit(&mpt->m_tx_waitq_mutex);
8727 + mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
8728 + mptsas_doneq_add(mpt, cmd);
8729 + mutex_enter(&mpt->m_tx_waitq_mutex);
8730 + }
8731 + mutex_exit(&mpt->m_tx_waitq_mutex);
8732 +
8733 + /*
8734 + * Drain the taskqs prior to reallocating resources.
8735 + */
8736 + mutex_exit(&mpt->m_mutex);
8737 + ddi_taskq_wait(mpt->m_event_taskq);
8738 + ddi_taskq_wait(mpt->m_dr_taskq);
8739 + mutex_enter(&mpt->m_mutex);
9261 8740 }
9262 8741
9263 8742 /*
9264 8743 * set pkt_reason and OR in pkt_statistics flag
9265 8744 */
9266 8745 static void
9267 8746 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
9268 8747 uint_t stat)
9269 8748 {
9270 8749 #ifndef __lock_lint
9271 8750 _NOTE(ARGUNUSED(mpt))
9272 8751 #endif
9273 8752
9274 8753 NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
9275 8754 (void *)cmd, reason, stat));
9276 8755
9277 8756 if (cmd) {
9278 8757 if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
9279 8758 cmd->cmd_pkt->pkt_reason = reason;
9280 8759 }
9281 8760 cmd->cmd_pkt->pkt_statistics |= stat;
9282 8761 }
9283 8762 }
9284 8763
9285 8764 static void
9286 8765 mptsas_start_watch_reset_delay()
9287 8766 {
9288 8767 NDBG22(("mptsas_start_watch_reset_delay"));
9289 8768
9290 8769 mutex_enter(&mptsas_global_mutex);
9291 8770 if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
9292 8771 mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
9293 8772 drv_usectohz((clock_t)
9294 8773 MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
9295 8774 ASSERT(mptsas_reset_watch != NULL);
9296 8775 }
9297 8776 mutex_exit(&mptsas_global_mutex);
9298 8777 }
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
9299 8778
9300 8779 static void
9301 8780 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
9302 8781 {
9303 8782 mptsas_target_t *ptgt = NULL;
9304 8783
9305 8784 NDBG22(("mptsas_setup_bus_reset_delay"));
9306 8785 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9307 8786 MPTSAS_HASH_FIRST);
9308 8787 while (ptgt != NULL) {
9309 - mutex_enter(&ptgt->m_tgt_intr_mutex);
9310 8788 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9311 8789 ptgt->m_reset_delay = mpt->m_scsi_reset_delay;
9312 - mutex_exit(&ptgt->m_tgt_intr_mutex);
9313 8790
9314 8791 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9315 8792 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9316 8793 }
9317 8794
9318 8795 mptsas_start_watch_reset_delay();
9319 8796 }
9320 8797
9321 8798 /*
9322 8799 * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
9323 8800 * mpt instance for active reset delays
9324 8801 */
9325 8802 static void
9326 8803 mptsas_watch_reset_delay(void *arg)
9327 8804 {
9328 8805 #ifndef __lock_lint
9329 8806 _NOTE(ARGUNUSED(arg))
9330 8807 #endif
9331 8808
9332 8809 mptsas_t *mpt;
9333 8810 int not_done = 0;
9334 8811
9335 8812 NDBG22(("mptsas_watch_reset_delay"));
9336 8813
9337 8814 mutex_enter(&mptsas_global_mutex);
9338 8815 mptsas_reset_watch = 0;
9339 8816 mutex_exit(&mptsas_global_mutex);
9340 8817 rw_enter(&mptsas_global_rwlock, RW_READER);
9341 8818 for (mpt = mptsas_head; mpt != NULL; mpt = mpt->m_next) {
9342 8819 if (mpt->m_tran == 0) {
9343 8820 continue;
9344 8821 }
9345 8822 mutex_enter(&mpt->m_mutex);
9346 8823 not_done += mptsas_watch_reset_delay_subr(mpt);
9347 8824 mutex_exit(&mpt->m_mutex);
9348 8825 }
9349 8826 rw_exit(&mptsas_global_rwlock);
9350 8827
9351 8828 if (not_done) {
9352 8829 mptsas_start_watch_reset_delay();
9353 8830 }
9354 8831 }
9355 8832
9356 8833 static int
9357 8834 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
9358 8835 {
9359 8836 int done = 0;
↓ open down ↓ |
37 lines elided |
↑ open up ↑ |
9360 8837 int restart = 0;
9361 8838 mptsas_target_t *ptgt = NULL;
9362 8839
9363 8840 NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
9364 8841
9365 8842 ASSERT(mutex_owned(&mpt->m_mutex));
9366 8843
9367 8844 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
9368 8845 MPTSAS_HASH_FIRST);
9369 8846 while (ptgt != NULL) {
9370 - mutex_enter(&ptgt->m_tgt_intr_mutex);
9371 8847 if (ptgt->m_reset_delay != 0) {
9372 8848 ptgt->m_reset_delay -=
9373 8849 MPTSAS_WATCH_RESET_DELAY_TICK;
9374 8850 if (ptgt->m_reset_delay <= 0) {
9375 8851 ptgt->m_reset_delay = 0;
9376 8852 mptsas_set_throttle(mpt, ptgt,
9377 8853 MAX_THROTTLE);
9378 8854 restart++;
9379 8855 } else {
9380 8856 done = -1;
9381 8857 }
9382 8858 }
9383 - mutex_exit(&ptgt->m_tgt_intr_mutex);
9384 8859
9385 8860 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
9386 8861 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
9387 8862 }
9388 8863
9389 8864 if (restart > 0) {
9390 8865 mptsas_restart_hba(mpt);
9391 8866 }
9392 8867 return (done);
9393 8868 }
9394 8869
9395 8870 #ifdef MPTSAS_TEST
9396 8871 static void
9397 8872 mptsas_test_reset(mptsas_t *mpt, int target)
9398 8873 {
9399 8874 mptsas_target_t *ptgt = NULL;
9400 8875
9401 8876 if (mptsas_rtest == target) {
9402 8877 if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
9403 8878 mptsas_rtest = -1;
9404 8879 }
9405 8880 if (mptsas_rtest == -1) {
9406 8881 NDBG22(("mptsas_test_reset success"));
9407 8882 }
9408 8883 }
9409 8884 }
9410 8885 #endif
9411 8886
9412 8887 /*
9413 8888 * abort handling:
9414 8889 *
9415 8890 * Notes:
9416 8891 * - if pkt is not NULL, abort just that command
9417 8892 * - if pkt is NULL, abort all outstanding commands for target
9418 8893 */
9419 8894 static int
9420 8895 mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
9421 8896 {
9422 8897 mptsas_t *mpt = ADDR2MPT(ap);
9423 8898 int rval;
9424 8899 mptsas_tgt_private_t *tgt_private;
9425 8900 int target, lun;
9426 8901
9427 8902 tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
9428 8903 tran_tgt_private;
9429 8904 ASSERT(tgt_private != NULL);
9430 8905 target = tgt_private->t_private->m_devhdl;
9431 8906 lun = tgt_private->t_lun;
9432 8907
9433 8908 NDBG23(("mptsas_scsi_abort: target=%d.%d", target, lun));
9434 8909
9435 8910 mutex_enter(&mpt->m_mutex);
9436 8911 rval = mptsas_do_scsi_abort(mpt, target, lun, pkt);
9437 8912 mutex_exit(&mpt->m_mutex);
9438 8913 return (rval);
9439 8914 }
9440 8915
9441 8916 static int
9442 8917 mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, struct scsi_pkt *pkt)
9443 8918 {
9444 8919 mptsas_cmd_t *sp = NULL;
9445 8920 mptsas_slots_t *slots = mpt->m_active;
9446 8921 int rval = FALSE;
9447 8922
9448 8923 ASSERT(mutex_owned(&mpt->m_mutex));
9449 8924
9450 8925 /*
9451 8926 * Abort the command pkt on the target/lun in ap. If pkt is
9452 8927 * NULL, abort all outstanding commands on that target/lun.
9453 8928 * If you can abort them, return 1, else return 0.
9454 8929 * Each packet that's aborted should be sent back to the target
9455 8930 * driver through the callback routine, with pkt_reason set to
9456 8931 * CMD_ABORTED.
9457 8932 *
9458 8933 * abort cmd pkt on HBA hardware; clean out of outstanding
9459 8934 * command lists, etc.
9460 8935 */
9461 8936 if (pkt != NULL) {
9462 8937 /* abort the specified packet */
9463 8938 sp = PKT2CMD(pkt);
9464 8939
9465 8940 if (sp->cmd_queued) {
9466 8941 NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
9467 8942 (void *)sp));
9468 8943 mptsas_waitq_delete(mpt, sp);
↓ open down ↓ |
75 lines elided |
↑ open up ↑ |
9469 8944 mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
9470 8945 STAT_ABORTED);
9471 8946 mptsas_doneq_add(mpt, sp);
9472 8947 rval = TRUE;
9473 8948 goto done;
9474 8949 }
9475 8950
9476 8951 /*
9477 8952 * Have mpt firmware abort this command
9478 8953 */
9479 - mutex_enter(&mpt->m_intr_mutex);
8954 +
9480 8955 if (slots->m_slot[sp->cmd_slot] != NULL) {
9481 - mutex_exit(&mpt->m_intr_mutex);
9482 8956 rval = mptsas_ioc_task_management(mpt,
9483 8957 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
9484 8958 lun, NULL, 0, 0);
9485 8959
9486 8960 /*
9487 8961 * The transport layer expects only TRUE and FALSE.
9488 8962 * Therefore, if mptsas_ioc_task_management returns
9489 8963 * FAILED we will return FALSE.
9490 8964 */
9491 8965 if (rval == FAILED)
9492 8966 rval = FALSE;
9493 8967 goto done;
9494 8968 }
9495 - mutex_exit(&mpt->m_intr_mutex);
9496 8969 }
9497 8970
9498 8971 /*
9499 8972 * If pkt is NULL then abort task set
9500 8973 */
9501 8974 rval = mptsas_ioc_task_management(mpt,
9502 8975 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0);
9503 8976
9504 8977 /*
9505 8978 * The transport layer expects only TRUE and FALSE.
9506 8979 * Therefore, if mptsas_ioc_task_management returns
9507 8980 * FAILED we will return FALSE.
9508 8981 */
9509 8982 if (rval == FAILED)
9510 8983 rval = FALSE;
9511 8984
9512 8985 #ifdef MPTSAS_TEST
9513 8986 if (rval && mptsas_test_stop) {
9514 8987 debug_enter("mptsas_do_scsi_abort");
9515 8988 }
9516 8989 #endif
9517 8990
9518 8991 done:
9519 8992 mptsas_doneq_empty(mpt);
9520 8993 return (rval);
9521 8994 }
9522 8995
9523 8996 /*
9524 8997 * capability handling:
9525 8998 * (*tran_getcap). Get the capability named, and return its value.
9526 8999 */
9527 9000 static int
9528 9001 mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
9529 9002 {
9530 9003 mptsas_t *mpt = ADDR2MPT(ap);
9531 9004 int ckey;
9532 9005 int rval = FALSE;
9533 9006
9534 9007 NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
9535 9008 ap->a_target, cap, tgtonly));
9536 9009
9537 9010 mutex_enter(&mpt->m_mutex);
9538 9011
9539 9012 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9540 9013 mutex_exit(&mpt->m_mutex);
9541 9014 return (UNDEFINED);
9542 9015 }
9543 9016
9544 9017 switch (ckey) {
9545 9018 case SCSI_CAP_DMA_MAX:
9546 9019 rval = (int)mpt->m_msg_dma_attr.dma_attr_maxxfer;
9547 9020 break;
9548 9021 case SCSI_CAP_ARQ:
9549 9022 rval = TRUE;
9550 9023 break;
9551 9024 case SCSI_CAP_MSG_OUT:
9552 9025 case SCSI_CAP_PARITY:
9553 9026 case SCSI_CAP_UNTAGGED_QING:
9554 9027 rval = TRUE;
9555 9028 break;
9556 9029 case SCSI_CAP_TAGGED_QING:
9557 9030 rval = TRUE;
9558 9031 break;
9559 9032 case SCSI_CAP_RESET_NOTIFICATION:
9560 9033 rval = TRUE;
9561 9034 break;
9562 9035 case SCSI_CAP_LINKED_CMDS:
9563 9036 rval = FALSE;
9564 9037 break;
9565 9038 case SCSI_CAP_QFULL_RETRIES:
9566 9039 rval = ((mptsas_tgt_private_t *)(ap->a_hba_tran->
9567 9040 tran_tgt_private))->t_private->m_qfull_retries;
9568 9041 break;
9569 9042 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9570 9043 rval = drv_hztousec(((mptsas_tgt_private_t *)
9571 9044 (ap->a_hba_tran->tran_tgt_private))->
9572 9045 t_private->m_qfull_retry_interval) / 1000;
9573 9046 break;
9574 9047 case SCSI_CAP_CDB_LEN:
9575 9048 rval = CDB_GROUP4;
9576 9049 break;
9577 9050 case SCSI_CAP_INTERCONNECT_TYPE:
9578 9051 rval = INTERCONNECT_SAS;
9579 9052 break;
9580 9053 case SCSI_CAP_TRAN_LAYER_RETRIES:
9581 9054 if (mpt->m_ioc_capabilities &
9582 9055 MPI2_IOCFACTS_CAPABILITY_TLR)
9583 9056 rval = TRUE;
9584 9057 else
9585 9058 rval = FALSE;
9586 9059 break;
9587 9060 default:
9588 9061 rval = UNDEFINED;
9589 9062 break;
9590 9063 }
9591 9064
9592 9065 NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
9593 9066
9594 9067 mutex_exit(&mpt->m_mutex);
9595 9068 return (rval);
9596 9069 }
↓ open down ↓ |
91 lines elided |
↑ open up ↑ |
9597 9070
9598 9071 /*
9599 9072 * (*tran_setcap). Set the capability named to the value given.
9600 9073 */
9601 9074 static int
9602 9075 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
9603 9076 {
9604 9077 mptsas_t *mpt = ADDR2MPT(ap);
9605 9078 int ckey;
9606 9079 int rval = FALSE;
9607 - mptsas_target_t *ptgt;
9608 9080
9609 9081 NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
9610 9082 ap->a_target, cap, value, tgtonly));
9611 9083
9612 9084 if (!tgtonly) {
9613 9085 return (rval);
9614 9086 }
9615 9087
9616 9088 mutex_enter(&mpt->m_mutex);
9617 9089
9618 9090 if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9619 9091 mutex_exit(&mpt->m_mutex);
9620 9092 return (UNDEFINED);
9621 9093 }
9622 9094
9623 9095 switch (ckey) {
9624 9096 case SCSI_CAP_DMA_MAX:
9625 9097 case SCSI_CAP_MSG_OUT:
9626 9098 case SCSI_CAP_PARITY:
9627 9099 case SCSI_CAP_INITIATOR_ID:
9628 9100 case SCSI_CAP_LINKED_CMDS:
9629 9101 case SCSI_CAP_UNTAGGED_QING:
9630 9102 case SCSI_CAP_RESET_NOTIFICATION:
9631 9103 /*
9632 9104 * None of these are settable via
9633 9105 * the capability interface.
9634 9106 */
9635 9107 break;
9636 9108 case SCSI_CAP_ARQ:
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
9637 9109 /*
9638 9110 * We cannot turn off arq so return false if asked to
9639 9111 */
9640 9112 if (value) {
9641 9113 rval = TRUE;
9642 9114 } else {
9643 9115 rval = FALSE;
9644 9116 }
9645 9117 break;
9646 9118 case SCSI_CAP_TAGGED_QING:
9647 - ptgt = ((mptsas_tgt_private_t *)
9648 - (ap->a_hba_tran->tran_tgt_private))->t_private;
9649 - mutex_enter(&ptgt->m_tgt_intr_mutex);
9650 - mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
9651 - mutex_exit(&ptgt->m_tgt_intr_mutex);
9119 + mptsas_set_throttle(mpt, ((mptsas_tgt_private_t *)
9120 + (ap->a_hba_tran->tran_tgt_private))->t_private,
9121 + MAX_THROTTLE);
9652 9122 rval = TRUE;
9653 9123 break;
9654 9124 case SCSI_CAP_QFULL_RETRIES:
9655 9125 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9656 9126 t_private->m_qfull_retries = (uchar_t)value;
9657 9127 rval = TRUE;
9658 9128 break;
9659 9129 case SCSI_CAP_QFULL_RETRY_INTERVAL:
9660 9130 ((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
9661 9131 t_private->m_qfull_retry_interval =
9662 9132 drv_usectohz(value * 1000);
9663 9133 rval = TRUE;
9664 9134 break;
9665 9135 default:
9666 9136 rval = UNDEFINED;
9667 9137 break;
9668 9138 }
9669 9139 mutex_exit(&mpt->m_mutex);
9670 9140 return (rval);
9671 9141 }
9672 9142
9673 9143 /*
9674 9144 * Utility routine for mptsas_ifsetcap/ifgetcap
9675 9145 */
9676 9146 /*ARGSUSED*/
9677 9147 static int
9678 9148 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp)
9679 9149 {
9680 9150 NDBG24(("mptsas_scsi_capchk: cap=%s", cap));
9681 9151
9682 9152 if (!cap)
9683 9153 return (FALSE);
9684 9154
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
9685 9155 *cidxp = scsi_hba_lookup_capstr(cap);
9686 9156 return (TRUE);
9687 9157 }
9688 9158
9689 9159 static int
9690 9160 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
9691 9161 {
9692 9162 mptsas_slots_t *old_active = mpt->m_active;
9693 9163 mptsas_slots_t *new_active;
9694 9164 size_t size;
9695 - int rval = -1, nslot, i;
9696 - mptsas_slot_free_e_t *pe;
9165 + int rval = -1, i;
9697 9166
9698 - if (mptsas_outstanding_cmds_n(mpt)) {
9699 - NDBG9(("cannot change size of active slots array"));
9700 - return (rval);
9701 - }
9167 + /*
9168 + * if there are active commands, then we cannot
9169 + * change size of active slots array.
9170 + */
9171 + ASSERT(mpt->m_ncmds == 0);
9702 9172
9703 9173 size = MPTSAS_SLOTS_SIZE(mpt);
9704 9174 new_active = kmem_zalloc(size, flag);
9705 9175 if (new_active == NULL) {
9706 9176 NDBG1(("new active alloc failed"));
9707 9177 return (rval);
9708 9178 }
9709 9179 /*
9710 9180 * Since SMID 0 is reserved and the TM slot is reserved, the
9711 9181 * number of slots that can be used at any one time is
9712 9182 * m_max_requests - 2.
9713 9183 */
9714 - new_active->m_n_slots = nslot = (mpt->m_max_requests - 2);
9184 + new_active->m_n_slots = (mpt->m_max_requests - 2);
9715 9185 new_active->m_size = size;
9716 9186 new_active->m_tags = 1;
9717 -
9718 9187 if (old_active) {
9719 9188 new_active->m_tgttbl = old_active->m_tgttbl;
9720 9189 new_active->m_smptbl = old_active->m_smptbl;
9721 9190 new_active->m_num_raid_configs =
9722 9191 old_active->m_num_raid_configs;
9723 9192 for (i = 0; i < new_active->m_num_raid_configs; i++) {
9724 9193 new_active->m_raidconfig[i] =
9725 9194 old_active->m_raidconfig[i];
9726 9195 }
9727 9196 mptsas_free_active_slots(mpt);
9728 9197 }
9729 -
9730 - if (max_ncpus & (max_ncpus - 1)) {
9731 - mpt->m_slot_freeq_pair_n = (1 << highbit(max_ncpus));
9732 - } else {
9733 - mpt->m_slot_freeq_pair_n = max_ncpus;
9734 - }
9735 - mpt->m_slot_freeq_pairp = kmem_zalloc(
9736 - mpt->m_slot_freeq_pair_n *
9737 - sizeof (mptsas_slot_freeq_pair_t), KM_SLEEP);
9738 - for (i = 0; i < mpt->m_slot_freeq_pair_n; i++) {
9739 - list_create(&mpt->m_slot_freeq_pairp[i].
9740 - m_slot_allocq.s.m_fq_list,
9741 - sizeof (mptsas_slot_free_e_t),
9742 - offsetof(mptsas_slot_free_e_t, node));
9743 - list_create(&mpt->m_slot_freeq_pairp[i].
9744 - m_slot_releq.s.m_fq_list,
9745 - sizeof (mptsas_slot_free_e_t),
9746 - offsetof(mptsas_slot_free_e_t, node));
9747 - mpt->m_slot_freeq_pairp[i].m_slot_allocq.s.m_fq_n = 0;
9748 - mpt->m_slot_freeq_pairp[i].m_slot_releq.s.m_fq_n = 0;
9749 - mutex_init(&mpt->m_slot_freeq_pairp[i].
9750 - m_slot_allocq.s.m_fq_mutex, NULL, MUTEX_DRIVER,
9751 - DDI_INTR_PRI(mpt->m_intr_pri));
9752 - mutex_init(&mpt->m_slot_freeq_pairp[i].
9753 - m_slot_releq.s.m_fq_mutex, NULL, MUTEX_DRIVER,
9754 - DDI_INTR_PRI(mpt->m_intr_pri));
9755 - }
9756 - pe = mpt->m_slot_free_ae = kmem_zalloc(nslot *
9757 - sizeof (mptsas_slot_free_e_t), KM_SLEEP);
9758 - /*
9759 - * An array of Mpi2ReplyDescriptorsUnion_t is defined here.
9760 - * We are trying to eliminate the m_mutex in the context
9761 - * reply code path in the ISR. Since the read of the
9762 - * ReplyDescriptor and update/write of the ReplyIndex must
9763 - * be atomic (since the poll thread may also update them at
9764 - * the same time) so we first read out of the ReplyDescriptor
9765 - * into this array and update the ReplyIndex register with a
9766 - * separate mutex m_intr_mutex protected, and then release the
9767 - * mutex and process all of them. the length of the array is
9768 - * defined as max as 128(128*64=8k), which is
9769 - * assumed as the maxmium depth of the interrupt coalese.
9770 - */
9771 - mpt->m_reply = kmem_zalloc(MPI_ADDRESS_COALSCE_MAX *
9772 - sizeof (Mpi2ReplyDescriptorsUnion_t), KM_SLEEP);
9773 - for (i = 0; i < nslot; i++, pe++) {
9774 - pe->slot = i + 1; /* SMID 0 is reserved */
9775 - pe->cpuid = i % mpt->m_slot_freeq_pair_n;
9776 - list_insert_tail(&mpt->m_slot_freeq_pairp
9777 - [i % mpt->m_slot_freeq_pair_n]
9778 - .m_slot_allocq.s.m_fq_list, pe);
9779 - mpt->m_slot_freeq_pairp[i % mpt->m_slot_freeq_pair_n]
9780 - .m_slot_allocq.s.m_fq_n++;
9781 - mpt->m_slot_freeq_pairp[i % mpt->m_slot_freeq_pair_n]
9782 - .m_slot_allocq.s.m_fq_n_init++;
9783 - }
9784 -
9785 9198 mpt->m_active = new_active;
9786 9199 rval = 0;
9787 9200
9788 9201 return (rval);
9789 9202 }
9790 9203
9791 9204 static void
9792 9205 mptsas_free_active_slots(mptsas_t *mpt)
9793 9206 {
9794 9207 mptsas_slots_t *active = mpt->m_active;
9795 9208 size_t size;
9796 - mptsas_slot_free_e_t *pe;
9797 - int i;
9798 9209
9799 9210 if (active == NULL)
9800 9211 return;
9801 -
9802 - if (mpt->m_slot_freeq_pairp) {
9803 - for (i = 0; i < mpt->m_slot_freeq_pair_n; i++) {
9804 - while ((pe = list_head(&mpt->m_slot_freeq_pairp
9805 - [i].m_slot_allocq.s.m_fq_list)) != NULL) {
9806 - list_remove(&mpt->m_slot_freeq_pairp[i]
9807 - .m_slot_allocq.s.m_fq_list, pe);
9808 - }
9809 - list_destroy(&mpt->m_slot_freeq_pairp
9810 - [i].m_slot_allocq.s.m_fq_list);
9811 - while ((pe = list_head(&mpt->m_slot_freeq_pairp
9812 - [i].m_slot_releq.s.m_fq_list)) != NULL) {
9813 - list_remove(&mpt->m_slot_freeq_pairp[i]
9814 - .m_slot_releq.s.m_fq_list, pe);
9815 - }
9816 - list_destroy(&mpt->m_slot_freeq_pairp
9817 - [i].m_slot_releq.s.m_fq_list);
9818 - mutex_destroy(&mpt->m_slot_freeq_pairp
9819 - [i].m_slot_allocq.s.m_fq_mutex);
9820 - mutex_destroy(&mpt->m_slot_freeq_pairp
9821 - [i].m_slot_releq.s.m_fq_mutex);
9822 - }
9823 - kmem_free(mpt->m_slot_freeq_pairp, mpt->m_slot_freeq_pair_n *
9824 - sizeof (mptsas_slot_freeq_pair_t));
9825 - }
9826 - if (mpt->m_slot_free_ae)
9827 - kmem_free(mpt->m_slot_free_ae, mpt->m_active->m_n_slots *
9828 - sizeof (mptsas_slot_free_e_t));
9829 -
9830 - if (mpt->m_reply)
9831 - kmem_free(mpt->m_reply, MPI_ADDRESS_COALSCE_MAX *
9832 - sizeof (Mpi2ReplyDescriptorsUnion_t));
9833 -
9834 9212 size = active->m_size;
9835 9213 kmem_free(active, size);
9836 9214 mpt->m_active = NULL;
9837 9215 }
9838 9216
9839 9217 /*
9840 9218 * Error logging, printing, and debug print routines.
9841 9219 */
9842 9220 static char *mptsas_label = "mpt_sas";
9843 9221
9844 9222 /*PRINTFLIKE3*/
9845 9223 void
9846 9224 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
9847 9225 {
9848 9226 dev_info_t *dev;
9849 9227 va_list ap;
9850 9228
9851 9229 if (mpt) {
9852 9230 dev = mpt->m_dip;
9853 9231 } else {
9854 9232 dev = 0;
9855 9233 }
9856 9234
9857 9235 mutex_enter(&mptsas_log_mutex);
9858 9236
9859 9237 va_start(ap, fmt);
9860 9238 (void) vsprintf(mptsas_log_buf, fmt, ap);
9861 9239 va_end(ap);
9862 9240
9863 9241 if (level == CE_CONT) {
9864 9242 scsi_log(dev, mptsas_label, level, "%s\n", mptsas_log_buf);
9865 9243 } else {
9866 9244 scsi_log(dev, mptsas_label, level, "%s", mptsas_log_buf);
9867 9245 }
9868 9246
9869 9247 mutex_exit(&mptsas_log_mutex);
9870 9248 }
9871 9249
9872 9250 #ifdef MPTSAS_DEBUG
9873 9251 /*PRINTFLIKE1*/
9874 9252 void
9875 9253 mptsas_printf(char *fmt, ...)
9876 9254 {
9877 9255 dev_info_t *dev = 0;
9878 9256 va_list ap;
9879 9257
9880 9258 mutex_enter(&mptsas_log_mutex);
9881 9259
9882 9260 va_start(ap, fmt);
9883 9261 (void) vsprintf(mptsas_log_buf, fmt, ap);
9884 9262 va_end(ap);
9885 9263
9886 9264 #ifdef PROM_PRINTF
9887 9265 prom_printf("%s:\t%s\n", mptsas_label, mptsas_log_buf);
9888 9266 #else
9889 9267 scsi_log(dev, mptsas_label, SCSI_DEBUG, "%s\n", mptsas_log_buf);
9890 9268 #endif
9891 9269 mutex_exit(&mptsas_log_mutex);
9892 9270 }
9893 9271 #endif
9894 9272
9895 9273 /*
9896 9274 * timeout handling
9897 9275 */
9898 9276 static void
9899 9277 mptsas_watch(void *arg)
9900 9278 {
9901 9279 #ifndef __lock_lint
9902 9280 _NOTE(ARGUNUSED(arg))
9903 9281 #endif
9904 9282
9905 9283 mptsas_t *mpt;
9906 9284 uint32_t doorbell;
9907 9285
9908 9286 NDBG30(("mptsas_watch"));
9909 9287
9910 9288 rw_enter(&mptsas_global_rwlock, RW_READER);
9911 9289 for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
9912 9290
9913 9291 mutex_enter(&mpt->m_mutex);
9914 9292
9915 9293 /* Skip device if not powered on */
9916 9294 if (mpt->m_options & MPTSAS_OPT_PM) {
9917 9295 if (mpt->m_power_level == PM_LEVEL_D0) {
9918 9296 (void) pm_busy_component(mpt->m_dip, 0);
9919 9297 mpt->m_busy = 1;
9920 9298 } else {
9921 9299 mutex_exit(&mpt->m_mutex);
9922 9300 continue;
9923 9301 }
9924 9302 }
9925 9303
9926 9304 /*
9927 9305 * Check if controller is in a FAULT state. If so, reset it.
9928 9306 */
9929 9307 doorbell = ddi_get32(mpt->m_datap, &mpt->m_reg->Doorbell);
9930 9308 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
9931 9309 doorbell &= MPI2_DOORBELL_DATA_MASK;
9932 9310 mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
9933 9311 "code: %04x", doorbell);
9934 9312 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
9935 9313 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
9936 9314 mptsas_log(mpt, CE_WARN, "Reset failed"
9937 9315 "after fault was detected");
9938 9316 }
9939 9317 }
9940 9318
9941 9319 /*
9942 9320 * For now, always call mptsas_watchsubr.
9943 9321 */
9944 9322 mptsas_watchsubr(mpt);
9945 9323
9946 9324 if (mpt->m_options & MPTSAS_OPT_PM) {
9947 9325 mpt->m_busy = 0;
9948 9326 (void) pm_idle_component(mpt->m_dip, 0);
9949 9327 }
9950 9328
9951 9329 mutex_exit(&mpt->m_mutex);
9952 9330 }
9953 9331 rw_exit(&mptsas_global_rwlock);
9954 9332
9955 9333 mutex_enter(&mptsas_global_mutex);
9956 9334 if (mptsas_timeouts_enabled)
9957 9335 mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
9958 9336 mutex_exit(&mptsas_global_mutex);
9959 9337 }
9960 9338
9961 9339 static void
9962 9340 mptsas_watchsubr(mptsas_t *mpt)
9963 9341 {
9964 9342 int i;
9965 9343 mptsas_cmd_t *cmd;
9966 9344 mptsas_target_t *ptgt = NULL;
9967 9345
9968 9346 NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
9969 9347
↓ open down ↓ |
126 lines elided |
↑ open up ↑ |
9970 9348 #ifdef MPTSAS_TEST
9971 9349 if (mptsas_enable_untagged) {
9972 9350 mptsas_test_untagged++;
9973 9351 }
9974 9352 #endif
9975 9353
9976 9354 /*
9977 9355 * Check for commands stuck in active slot
9978 9356 * Account for TM requests, which use the last SMID.
9979 9357 */
9980 - mutex_enter(&mpt->m_intr_mutex);
9981 9358 for (i = 0; i <= mpt->m_active->m_n_slots; i++) {
9982 9359 if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
9983 9360 if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
9984 9361 cmd->cmd_active_timeout -=
9985 9362 mptsas_scsi_watchdog_tick;
9986 9363 if (cmd->cmd_active_timeout <= 0) {
9987 9364 /*
9988 9365 * There seems to be a command stuck
9989 9366 * in the active slot. Drain throttle.
9990 9367 */
9991 - ptgt = cmd->cmd_tgt_addr;
9992 - mutex_enter(&ptgt->m_tgt_intr_mutex);
9993 - mptsas_set_throttle(mpt, ptgt,
9368 + mptsas_set_throttle(mpt,
9369 + cmd->cmd_tgt_addr,
9994 9370 DRAIN_THROTTLE);
9995 - mutex_exit(&ptgt->m_tgt_intr_mutex);
9996 9371 }
9997 9372 }
9998 9373 if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9999 9374 (cmd->cmd_flags & CFLAG_CONFIG) ||
10000 9375 (cmd->cmd_flags & CFLAG_FW_DIAG)) {
10001 9376 cmd->cmd_active_timeout -=
10002 9377 mptsas_scsi_watchdog_tick;
10003 9378 if (cmd->cmd_active_timeout <= 0) {
10004 9379 /*
10005 9380 * passthrough command timeout
10006 9381 */
10007 9382 cmd->cmd_flags |= (CFLAG_FINISHED |
10008 9383 CFLAG_TIMEOUT);
10009 9384 cv_broadcast(&mpt->m_passthru_cv);
10010 9385 cv_broadcast(&mpt->m_config_cv);
10011 9386 cv_broadcast(&mpt->m_fw_diag_cv);
10012 9387 }
10013 9388 }
10014 9389 }
10015 9390 }
10016 - mutex_exit(&mpt->m_intr_mutex);
10017 9391
10018 9392 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
10019 9393 MPTSAS_HASH_FIRST);
10020 9394 while (ptgt != NULL) {
10021 9395 /*
10022 - * In order to avoid using m_mutex in the key code path in ISR,
10023 - * separate mutexs are introduced to protect those elements
10024 - * shown in ISR.
10025 - */
10026 - mutex_enter(&ptgt->m_tgt_intr_mutex);
10027 -
10028 - /*
10029 9396 * If we were draining due to a qfull condition,
10030 9397 * go back to full throttle.
10031 9398 */
10032 9399 if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
10033 9400 (ptgt->m_t_throttle > HOLD_THROTTLE) &&
10034 9401 (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
10035 9402 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10036 9403 mptsas_restart_hba(mpt);
10037 9404 }
10038 9405
10039 9406 if ((ptgt->m_t_ncmds > 0) &&
10040 9407 (ptgt->m_timebase)) {
10041 9408
10042 9409 if (ptgt->m_timebase <=
10043 9410 mptsas_scsi_watchdog_tick) {
10044 9411 ptgt->m_timebase +=
10045 9412 mptsas_scsi_watchdog_tick;
10046 - mutex_exit(&ptgt->m_tgt_intr_mutex);
10047 9413 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10048 9414 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10049 9415 continue;
10050 9416 }
10051 9417
10052 9418 ptgt->m_timeout -= mptsas_scsi_watchdog_tick;
10053 9419
9420 + if (ptgt->m_timeout_count > 0) {
9421 + ptgt->m_timeout_interval +=
9422 + mptsas_scsi_watchdog_tick;
9423 + }
9424 + if (ptgt->m_timeout_interval >
9425 + mptsas_timeout_interval) {
9426 + ptgt->m_timeout_interval = 0;
9427 + ptgt->m_timeout_count = 0;
9428 + }
9429 +
10054 9430 if (ptgt->m_timeout < 0) {
10055 - mutex_exit(&ptgt->m_tgt_intr_mutex);
10056 - mptsas_cmd_timeout(mpt, ptgt->m_devhdl);
9431 + ptgt->m_timeout_count++;
9432 + if (ptgt->m_timeout_count >
9433 + mptsas_timeout_threshold) {
9434 + ptgt->m_timeout_count = 0;
9435 + mptsas_kill_target(mpt, ptgt);
9436 + } else {
9437 + mptsas_cmd_timeout(mpt, ptgt->m_devhdl);
9438 + }
10057 9439 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10058 9440 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10059 9441 continue;
10060 9442 }
10061 9443
10062 9444 if ((ptgt->m_timeout) <=
10063 9445 mptsas_scsi_watchdog_tick) {
10064 9446 NDBG23(("pending timeout"));
10065 9447 mptsas_set_throttle(mpt, ptgt,
10066 9448 DRAIN_THROTTLE);
10067 9449 }
10068 9450 }
10069 - mutex_exit(&ptgt->m_tgt_intr_mutex);
9451 +
10070 9452 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10071 9453 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10072 9454 }
10073 9455 }
10074 9456
10075 9457 /*
10076 9458 * timeout recovery
10077 9459 */
10078 9460 static void
10079 9461 mptsas_cmd_timeout(mptsas_t *mpt, uint16_t devhdl)
10080 9462 {
10081 9463
10082 9464 NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
10083 9465 mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
10084 9466 "Target %d", devhdl);
10085 9467
10086 9468 /*
10087 9469 * If the current target is not the target passed in,
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
10088 9470 * try to reset that target.
10089 9471 */
10090 9472 NDBG29(("mptsas_cmd_timeout: device reset"));
10091 9473 if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
10092 9474 mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
10093 9475 "recovery failed!", devhdl);
10094 9476 }
10095 9477 }
10096 9478
10097 9479 /*
9480 + * target causing too many timeouts
9481 + */
9482 +static void
9483 +mptsas_kill_target(mptsas_t *mpt, mptsas_target_t *ptgt)
9484 +{
9485 + mptsas_topo_change_list_t *topo_node = NULL;
9486 +
9487 + NDBG29(("mptsas_tgt_kill: target=%d", ptgt->m_devhdl));
9488 + mptsas_log(mpt, CE_WARN, "timeout threshold exceeded for "
9489 + "Target %d", ptgt->m_devhdl);
9490 +
9491 + topo_node = kmem_zalloc(sizeof (mptsas_topo_change_list_t), KM_SLEEP);
9492 + topo_node->mpt = mpt;
9493 + topo_node->un.phymask = ptgt->m_phymask;
9494 + topo_node->event = MPTSAS_DR_EVENT_OFFLINE_TARGET;
9495 + topo_node->devhdl = ptgt->m_devhdl;
9496 + if (ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
9497 + topo_node->flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
9498 + else
9499 + topo_node->flags = MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
9500 + topo_node->object = NULL;
9501 +
9502 + /*
9503 + * Launch DR taskq to fake topology change
9504 + */
9505 + if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
9506 + mptsas_handle_dr, (void *)topo_node,
9507 + DDI_NOSLEEP)) != DDI_SUCCESS) {
9508 + mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
9509 + "for fake offline event failed. \n");
9510 + }
9511 +}
9512 +
9513 +/*
10098 9514 * Device / Hotplug control
10099 9515 */
10100 9516 static int
10101 9517 mptsas_scsi_quiesce(dev_info_t *dip)
10102 9518 {
10103 9519 mptsas_t *mpt;
10104 9520 scsi_hba_tran_t *tran;
10105 9521
10106 9522 tran = ddi_get_driver_private(dip);
10107 9523 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
10108 9524 return (-1);
10109 9525
10110 9526 return (mptsas_quiesce_bus(mpt));
10111 9527 }
10112 9528
10113 9529 static int
10114 9530 mptsas_scsi_unquiesce(dev_info_t *dip)
10115 9531 {
10116 9532 mptsas_t *mpt;
10117 9533 scsi_hba_tran_t *tran;
10118 9534
10119 9535 tran = ddi_get_driver_private(dip);
10120 9536 if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
10121 9537 return (-1);
10122 9538
10123 9539 return (mptsas_unquiesce_bus(mpt));
10124 9540 }
10125 9541
10126 9542 static int
10127 9543 mptsas_quiesce_bus(mptsas_t *mpt)
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
10128 9544 {
10129 9545 mptsas_target_t *ptgt = NULL;
10130 9546
10131 9547 NDBG28(("mptsas_quiesce_bus"));
10132 9548 mutex_enter(&mpt->m_mutex);
10133 9549
10134 9550 /* Set all the throttles to zero */
10135 9551 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
10136 9552 MPTSAS_HASH_FIRST);
10137 9553 while (ptgt != NULL) {
10138 - mutex_enter(&ptgt->m_tgt_intr_mutex);
10139 9554 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10140 - mutex_exit(&ptgt->m_tgt_intr_mutex);
10141 9555
10142 9556 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10143 9557 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10144 9558 }
10145 9559
10146 9560 /* If there are any outstanding commands in the queue */
10147 - mutex_enter(&mpt->m_intr_mutex);
10148 - if (mptsas_outstanding_cmds_n(mpt)) {
10149 - mutex_exit(&mpt->m_intr_mutex);
9561 + if (mpt->m_ncmds) {
10150 9562 mpt->m_softstate |= MPTSAS_SS_DRAINING;
10151 9563 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
10152 9564 mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
10153 9565 if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
10154 9566 /*
10155 9567 * Quiesce has been interrupted
10156 9568 */
10157 9569 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
10158 9570 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10159 9571 &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
10160 9572 while (ptgt != NULL) {
10161 - mutex_enter(&ptgt->m_tgt_intr_mutex);
10162 9573 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10163 - mutex_exit(&ptgt->m_tgt_intr_mutex);
10164 9574
10165 9575 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10166 9576 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10167 9577 }
10168 9578 mptsas_restart_hba(mpt);
10169 9579 if (mpt->m_quiesce_timeid != 0) {
10170 9580 timeout_id_t tid = mpt->m_quiesce_timeid;
10171 9581 mpt->m_quiesce_timeid = 0;
10172 9582 mutex_exit(&mpt->m_mutex);
10173 9583 (void) untimeout(tid);
10174 9584 return (-1);
10175 9585 }
10176 9586 mutex_exit(&mpt->m_mutex);
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
10177 9587 return (-1);
10178 9588 } else {
10179 9589 /* Bus has been quiesced */
10180 9590 ASSERT(mpt->m_quiesce_timeid == 0);
10181 9591 mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
10182 9592 mpt->m_softstate |= MPTSAS_SS_QUIESCED;
10183 9593 mutex_exit(&mpt->m_mutex);
10184 9594 return (0);
10185 9595 }
10186 9596 }
10187 - mutex_exit(&mpt->m_intr_mutex);
10188 9597 /* Bus was not busy - QUIESCED */
10189 9598 mutex_exit(&mpt->m_mutex);
10190 9599
10191 9600 return (0);
10192 9601 }
10193 9602
10194 9603 static int
10195 9604 mptsas_unquiesce_bus(mptsas_t *mpt)
10196 9605 {
10197 9606 mptsas_target_t *ptgt = NULL;
10198 9607
10199 9608 NDBG28(("mptsas_unquiesce_bus"));
10200 9609 mutex_enter(&mpt->m_mutex);
10201 9610 mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
10202 9611 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
10203 9612 MPTSAS_HASH_FIRST);
10204 9613 while (ptgt != NULL) {
10205 - mutex_enter(&ptgt->m_tgt_intr_mutex);
10206 9614 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10207 - mutex_exit(&ptgt->m_tgt_intr_mutex);
10208 9615
10209 9616 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10210 9617 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10211 9618 }
10212 9619 mptsas_restart_hba(mpt);
10213 9620 mutex_exit(&mpt->m_mutex);
10214 9621 return (0);
10215 9622 }
10216 9623
10217 9624 static void
10218 9625 mptsas_ncmds_checkdrain(void *arg)
10219 9626 {
10220 9627 mptsas_t *mpt = arg;
10221 9628 mptsas_target_t *ptgt = NULL;
10222 9629
10223 9630 mutex_enter(&mpt->m_mutex);
10224 9631 if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
10225 9632 mpt->m_quiesce_timeid = 0;
10226 - mutex_enter(&mpt->m_intr_mutex);
10227 - if (mptsas_outstanding_cmds_n(mpt)) {
10228 - mutex_exit(&mpt->m_intr_mutex);
9633 + if (mpt->m_ncmds == 0) {
9634 + /* Command queue has been drained */
9635 + cv_signal(&mpt->m_cv);
9636 + } else {
10229 9637 /*
10230 9638 * The throttle may have been reset because
10231 9639 * of a SCSI bus reset
10232 9640 */
10233 9641 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10234 9642 &mpt->m_active->m_tgttbl, MPTSAS_HASH_FIRST);
10235 9643 while (ptgt != NULL) {
10236 - mutex_enter(&ptgt->m_tgt_intr_mutex);
10237 9644 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10238 - mutex_exit(&ptgt->m_tgt_intr_mutex);
10239 9645
10240 9646 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
10241 9647 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
10242 9648 }
10243 9649
10244 9650 mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
10245 9651 mpt, (MPTSAS_QUIESCE_TIMEOUT *
10246 9652 drv_usectohz(1000000)));
10247 - } else {
10248 - mutex_exit(&mpt->m_intr_mutex);
10249 - /* Command queue has been drained */
10250 - cv_signal(&mpt->m_cv);
10251 9653 }
10252 9654 }
10253 9655 mutex_exit(&mpt->m_mutex);
10254 9656 }
10255 9657
10256 9658 /*ARGSUSED*/
10257 9659 static void
10258 9660 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
10259 9661 {
10260 9662 int i;
10261 9663 uint8_t *cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
10262 9664 char buf[128];
10263 9665
10264 9666 buf[0] = '\0';
10265 9667 NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd,
10266 9668 Tgt(cmd), Lun(cmd)));
10267 9669 (void) sprintf(&buf[0], "\tcdb=[");
10268 9670 for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
10269 9671 (void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
10270 9672 }
10271 9673 (void) sprintf(&buf[strlen(buf)], " ]");
10272 9674 NDBG25(("?%s\n", buf));
10273 9675 NDBG25(("?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
10274 9676 cmd->cmd_pkt->pkt_flags, cmd->cmd_pkt->pkt_statistics,
10275 9677 cmd->cmd_pkt->pkt_state));
10276 9678 NDBG25(("?pkt_scbp=0x%x cmd_flags=0x%x\n", cmd->cmd_pkt->pkt_scbp ?
10277 9679 *(cmd->cmd_pkt->pkt_scbp) : 0, cmd->cmd_flags));
10278 9680 }
10279 9681
10280 9682 static void
10281 9683 mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd)
10282 9684 {
10283 9685 caddr_t memp;
10284 9686 pMPI2RequestHeader_t request_hdrp;
10285 9687 struct scsi_pkt *pkt = cmd->cmd_pkt;
10286 9688 mptsas_pt_request_t *pt = pkt->pkt_ha_private;
10287 9689 uint32_t request_size, data_size, dataout_size;
10288 9690 uint32_t direction;
10289 9691 ddi_dma_cookie_t data_cookie;
10290 9692 ddi_dma_cookie_t dataout_cookie;
10291 9693 uint32_t request_desc_low, request_desc_high = 0;
10292 9694 uint32_t i, sense_bufp;
10293 9695 uint8_t desc_type;
10294 9696 uint8_t *request, function;
10295 9697 ddi_dma_handle_t dma_hdl = mpt->m_dma_req_frame_hdl;
10296 9698 ddi_acc_handle_t acc_hdl = mpt->m_acc_req_frame_hdl;
10297 9699
10298 9700 desc_type = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10299 9701
10300 9702 request = pt->request;
10301 9703 direction = pt->direction;
10302 9704 request_size = pt->request_size;
10303 9705 data_size = pt->data_size;
10304 9706 dataout_size = pt->dataout_size;
10305 9707 data_cookie = pt->data_cookie;
10306 9708 dataout_cookie = pt->dataout_cookie;
10307 9709
10308 9710 /*
10309 9711 * Store the passthrough message in memory location
10310 9712 * corresponding to our slot number
10311 9713 */
10312 9714 memp = mpt->m_req_frame + (mpt->m_req_frame_size * cmd->cmd_slot);
10313 9715 request_hdrp = (pMPI2RequestHeader_t)memp;
10314 9716 bzero(memp, mpt->m_req_frame_size);
10315 9717
10316 9718 for (i = 0; i < request_size; i++) {
10317 9719 bcopy(request + i, memp + i, 1);
10318 9720 }
10319 9721
10320 9722 if (data_size || dataout_size) {
10321 9723 pMpi2SGESimple64_t sgep;
10322 9724 uint32_t sge_flags;
10323 9725
10324 9726 sgep = (pMpi2SGESimple64_t)((uint8_t *)request_hdrp +
10325 9727 request_size);
10326 9728 if (dataout_size) {
10327 9729
10328 9730 sge_flags = dataout_size |
10329 9731 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10330 9732 MPI2_SGE_FLAGS_END_OF_BUFFER |
10331 9733 MPI2_SGE_FLAGS_HOST_TO_IOC |
10332 9734 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10333 9735 MPI2_SGE_FLAGS_SHIFT);
10334 9736 ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
10335 9737 ddi_put32(acc_hdl, &sgep->Address.Low,
10336 9738 (uint32_t)(dataout_cookie.dmac_laddress &
10337 9739 0xffffffffull));
10338 9740 ddi_put32(acc_hdl, &sgep->Address.High,
10339 9741 (uint32_t)(dataout_cookie.dmac_laddress
10340 9742 >> 32));
10341 9743 sgep++;
10342 9744 }
10343 9745 sge_flags = data_size;
10344 9746 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10345 9747 MPI2_SGE_FLAGS_LAST_ELEMENT |
10346 9748 MPI2_SGE_FLAGS_END_OF_BUFFER |
10347 9749 MPI2_SGE_FLAGS_END_OF_LIST |
10348 9750 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10349 9751 MPI2_SGE_FLAGS_SHIFT);
10350 9752 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10351 9753 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
10352 9754 MPI2_SGE_FLAGS_SHIFT);
10353 9755 } else {
10354 9756 sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
10355 9757 MPI2_SGE_FLAGS_SHIFT);
10356 9758 }
10357 9759 ddi_put32(acc_hdl, &sgep->FlagsLength,
10358 9760 sge_flags);
10359 9761 ddi_put32(acc_hdl, &sgep->Address.Low,
10360 9762 (uint32_t)(data_cookie.dmac_laddress &
10361 9763 0xffffffffull));
10362 9764 ddi_put32(acc_hdl, &sgep->Address.High,
10363 9765 (uint32_t)(data_cookie.dmac_laddress >> 32));
10364 9766 }
10365 9767
10366 9768 function = request_hdrp->Function;
10367 9769 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10368 9770 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10369 9771 pMpi2SCSIIORequest_t scsi_io_req;
10370 9772
10371 9773 scsi_io_req = (pMpi2SCSIIORequest_t)request_hdrp;
10372 9774 /*
10373 9775 * Put SGE for data and data_out buffer at the end of
10374 9776 * scsi_io_request message header.(64 bytes in total)
10375 9777 * Following above SGEs, the residual space will be
10376 9778 * used by sense data.
10377 9779 */
10378 9780 ddi_put8(acc_hdl,
10379 9781 &scsi_io_req->SenseBufferLength,
10380 9782 (uint8_t)(request_size - 64));
10381 9783
10382 9784 sense_bufp = mpt->m_req_frame_dma_addr +
10383 9785 (mpt->m_req_frame_size * cmd->cmd_slot);
10384 9786 sense_bufp += 64;
10385 9787 ddi_put32(acc_hdl,
10386 9788 &scsi_io_req->SenseBufferLowAddress, sense_bufp);
10387 9789
10388 9790 /*
10389 9791 * Set SGLOffset0 value
10390 9792 */
10391 9793 ddi_put8(acc_hdl, &scsi_io_req->SGLOffset0,
10392 9794 offsetof(MPI2_SCSI_IO_REQUEST, SGL) / 4);
10393 9795
10394 9796 /*
10395 9797 * Setup descriptor info. RAID passthrough must use the
10396 9798 * default request descriptor which is already set, so if this
10397 9799 * is a SCSI IO request, change the descriptor to SCSI IO.
10398 9800 */
10399 9801 if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
10400 9802 desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
10401 9803 request_desc_high = (ddi_get16(acc_hdl,
10402 9804 &scsi_io_req->DevHandle) << 16);
10403 9805 }
↓ open down ↓ |
143 lines elided |
↑ open up ↑ |
10404 9806 }
10405 9807
10406 9808 /*
10407 9809 * We must wait till the message has been completed before
10408 9810 * beginning the next message so we wait for this one to
10409 9811 * finish.
10410 9812 */
10411 9813 (void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
10412 9814 request_desc_low = (cmd->cmd_slot << 16) + desc_type;
10413 9815 cmd->cmd_rfm = NULL;
10414 - mpt->m_active->m_slot[cmd->cmd_slot] = cmd;
10415 9816 MPTSAS_START_CMD(mpt, request_desc_low, request_desc_high);
10416 9817 if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
10417 9818 (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
10418 9819 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10419 9820 }
10420 9821 }
10421 9822
10422 9823
10423 9824
10424 9825 static int
10425 9826 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
10426 9827 uint8_t *data, uint32_t request_size, uint32_t reply_size,
10427 9828 uint32_t data_size, uint32_t direction, uint8_t *dataout,
10428 9829 uint32_t dataout_size, short timeout, int mode)
10429 9830 {
10430 9831 mptsas_pt_request_t pt;
10431 9832 mptsas_dma_alloc_state_t data_dma_state;
10432 9833 mptsas_dma_alloc_state_t dataout_dma_state;
10433 9834 caddr_t memp;
10434 9835 mptsas_cmd_t *cmd = NULL;
10435 9836 struct scsi_pkt *pkt;
10436 9837 uint32_t reply_len = 0, sense_len = 0;
10437 9838 pMPI2RequestHeader_t request_hdrp;
10438 9839 pMPI2RequestHeader_t request_msg;
10439 9840 pMPI2DefaultReply_t reply_msg;
10440 9841 Mpi2SCSIIOReply_t rep_msg;
10441 9842 int i, status = 0, pt_flags = 0, rv = 0;
10442 9843 int rvalue;
10443 9844 uint8_t function;
10444 9845
10445 9846 ASSERT(mutex_owned(&mpt->m_mutex));
10446 9847
10447 9848 reply_msg = (pMPI2DefaultReply_t)(&rep_msg);
10448 9849 bzero(reply_msg, sizeof (MPI2_DEFAULT_REPLY));
10449 9850 request_msg = kmem_zalloc(request_size, KM_SLEEP);
10450 9851
10451 9852 mutex_exit(&mpt->m_mutex);
10452 9853 /*
10453 9854 * copy in the request buffer since it could be used by
10454 9855 * another thread when the pt request into waitq
10455 9856 */
10456 9857 if (ddi_copyin(request, request_msg, request_size, mode)) {
10457 9858 mutex_enter(&mpt->m_mutex);
10458 9859 status = EFAULT;
10459 9860 mptsas_log(mpt, CE_WARN, "failed to copy request data");
10460 9861 goto out;
10461 9862 }
10462 9863 mutex_enter(&mpt->m_mutex);
10463 9864
10464 9865 function = request_msg->Function;
10465 9866 if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
10466 9867 pMpi2SCSITaskManagementRequest_t task;
10467 9868 task = (pMpi2SCSITaskManagementRequest_t)request_msg;
10468 9869 mptsas_setup_bus_reset_delay(mpt);
10469 9870 rv = mptsas_ioc_task_management(mpt, task->TaskType,
10470 9871 task->DevHandle, (int)task->LUN[1], reply, reply_size,
10471 9872 mode);
10472 9873
10473 9874 if (rv != TRUE) {
10474 9875 status = EIO;
10475 9876 mptsas_log(mpt, CE_WARN, "task management failed");
10476 9877 }
10477 9878 goto out;
10478 9879 }
10479 9880
10480 9881 if (data_size != 0) {
10481 9882 data_dma_state.size = data_size;
10482 9883 if (mptsas_dma_alloc(mpt, &data_dma_state) != DDI_SUCCESS) {
10483 9884 status = ENOMEM;
10484 9885 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
10485 9886 "resource");
10486 9887 goto out;
10487 9888 }
10488 9889 pt_flags |= MPTSAS_DATA_ALLOCATED;
10489 9890 if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10490 9891 mutex_exit(&mpt->m_mutex);
10491 9892 for (i = 0; i < data_size; i++) {
10492 9893 if (ddi_copyin(data + i, (uint8_t *)
10493 9894 data_dma_state.memp + i, 1, mode)) {
10494 9895 mutex_enter(&mpt->m_mutex);
10495 9896 status = EFAULT;
10496 9897 mptsas_log(mpt, CE_WARN, "failed to "
10497 9898 "copy read data");
10498 9899 goto out;
10499 9900 }
10500 9901 }
10501 9902 mutex_enter(&mpt->m_mutex);
10502 9903 }
10503 9904 }
10504 9905
10505 9906 if (dataout_size != 0) {
10506 9907 dataout_dma_state.size = dataout_size;
10507 9908 if (mptsas_dma_alloc(mpt, &dataout_dma_state) != DDI_SUCCESS) {
10508 9909 status = ENOMEM;
10509 9910 mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
10510 9911 "resource");
10511 9912 goto out;
10512 9913 }
10513 9914 pt_flags |= MPTSAS_DATAOUT_ALLOCATED;
10514 9915 mutex_exit(&mpt->m_mutex);
10515 9916 for (i = 0; i < dataout_size; i++) {
10516 9917 if (ddi_copyin(dataout + i, (uint8_t *)
10517 9918 dataout_dma_state.memp + i, 1, mode)) {
10518 9919 mutex_enter(&mpt->m_mutex);
10519 9920 mptsas_log(mpt, CE_WARN, "failed to copy out"
10520 9921 " data");
10521 9922 status = EFAULT;
10522 9923 goto out;
10523 9924 }
10524 9925 }
10525 9926 mutex_enter(&mpt->m_mutex);
10526 9927 }
10527 9928
10528 9929 if ((rvalue = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10529 9930 status = EAGAIN;
10530 9931 mptsas_log(mpt, CE_NOTE, "event ack command pool is full");
10531 9932 goto out;
10532 9933 }
10533 9934 pt_flags |= MPTSAS_REQUEST_POOL_CMD;
10534 9935
10535 9936 bzero((caddr_t)cmd, sizeof (*cmd));
10536 9937 bzero((caddr_t)pkt, scsi_pkt_size());
10537 9938 bzero((caddr_t)&pt, sizeof (pt));
10538 9939
10539 9940 cmd->ioc_cmd_slot = (uint32_t)(rvalue);
10540 9941
10541 9942 pt.request = (uint8_t *)request_msg;
10542 9943 pt.direction = direction;
10543 9944 pt.request_size = request_size;
10544 9945 pt.data_size = data_size;
10545 9946 pt.dataout_size = dataout_size;
10546 9947 pt.data_cookie = data_dma_state.cookie;
10547 9948 pt.dataout_cookie = dataout_dma_state.cookie;
10548 9949
10549 9950 /*
10550 9951 * Form a blank cmd/pkt to store the acknowledgement message
10551 9952 */
10552 9953 pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb[0];
10553 9954 pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
10554 9955 pkt->pkt_ha_private = (opaque_t)&pt;
10555 9956 pkt->pkt_flags = FLAG_HEAD;
10556 9957 pkt->pkt_time = timeout;
10557 9958 cmd->cmd_pkt = pkt;
10558 9959 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_PASSTHRU;
10559 9960
10560 9961 /*
10561 9962 * Save the command in a slot
10562 9963 */
10563 9964 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10564 9965 /*
10565 9966 * Once passthru command get slot, set cmd_flags
10566 9967 * CFLAG_PREPARED.
10567 9968 */
10568 9969 cmd->cmd_flags |= CFLAG_PREPARED;
10569 9970 mptsas_start_passthru(mpt, cmd);
10570 9971 } else {
10571 9972 mptsas_waitq_add(mpt, cmd);
10572 9973 }
10573 9974
10574 9975 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10575 9976 cv_wait(&mpt->m_passthru_cv, &mpt->m_mutex);
10576 9977 }
10577 9978
10578 9979 if (cmd->cmd_flags & CFLAG_PREPARED) {
10579 9980 memp = mpt->m_req_frame + (mpt->m_req_frame_size *
10580 9981 cmd->cmd_slot);
10581 9982 request_hdrp = (pMPI2RequestHeader_t)memp;
10582 9983 }
10583 9984
10584 9985 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10585 9986 status = ETIMEDOUT;
10586 9987 mptsas_log(mpt, CE_WARN, "passthrough command timeout");
10587 9988 pt_flags |= MPTSAS_CMD_TIMEOUT;
10588 9989 goto out;
10589 9990 }
10590 9991
10591 9992 if (cmd->cmd_rfm) {
10592 9993 /*
10593 9994 * cmd_rfm is zero means the command reply is a CONTEXT
10594 9995 * reply and no PCI Write to post the free reply SMFA
10595 9996 * because no reply message frame is used.
10596 9997 * cmd_rfm is non-zero means the reply is a ADDRESS
10597 9998 * reply and reply message frame is used.
10598 9999 */
10599 10000 pt_flags |= MPTSAS_ADDRESS_REPLY;
10600 10001 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10601 10002 DDI_DMA_SYNC_FORCPU);
10602 10003 reply_msg = (pMPI2DefaultReply_t)
10603 10004 (mpt->m_reply_frame + (cmd->cmd_rfm -
10604 10005 mpt->m_reply_frame_dma_addr));
10605 10006 }
10606 10007
10607 10008 mptsas_fma_check(mpt, cmd);
10608 10009 if (pkt->pkt_reason == CMD_TRAN_ERR) {
10609 10010 status = EAGAIN;
10610 10011 mptsas_log(mpt, CE_WARN, "passthru fma error");
10611 10012 goto out;
10612 10013 }
10613 10014 if (pkt->pkt_reason == CMD_RESET) {
10614 10015 status = EAGAIN;
10615 10016 mptsas_log(mpt, CE_WARN, "ioc reset abort passthru");
10616 10017 goto out;
10617 10018 }
10618 10019
10619 10020 if (pkt->pkt_reason == CMD_INCOMPLETE) {
10620 10021 status = EIO;
10621 10022 mptsas_log(mpt, CE_WARN, "passthrough command incomplete");
10622 10023 goto out;
10623 10024 }
10624 10025
10625 10026 mutex_exit(&mpt->m_mutex);
10626 10027 if (cmd->cmd_flags & CFLAG_PREPARED) {
10627 10028 function = request_hdrp->Function;
10628 10029 if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10629 10030 (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10630 10031 reply_len = sizeof (MPI2_SCSI_IO_REPLY);
10631 10032 sense_len = reply_size - reply_len;
10632 10033 } else {
10633 10034 reply_len = reply_size;
10634 10035 sense_len = 0;
10635 10036 }
10636 10037
10637 10038 for (i = 0; i < reply_len; i++) {
10638 10039 if (ddi_copyout((uint8_t *)reply_msg + i, reply + i, 1,
10639 10040 mode)) {
10640 10041 mutex_enter(&mpt->m_mutex);
10641 10042 status = EFAULT;
10642 10043 mptsas_log(mpt, CE_WARN, "failed to copy out "
10643 10044 "reply data");
10644 10045 goto out;
10645 10046 }
10646 10047 }
10647 10048 for (i = 0; i < sense_len; i++) {
10648 10049 if (ddi_copyout((uint8_t *)request_hdrp + 64 + i,
10649 10050 reply + reply_len + i, 1, mode)) {
10650 10051 mutex_enter(&mpt->m_mutex);
10651 10052 status = EFAULT;
10652 10053 mptsas_log(mpt, CE_WARN, "failed to copy out "
10653 10054 "sense data");
10654 10055 goto out;
10655 10056 }
10656 10057 }
10657 10058 }
10658 10059
10659 10060 if (data_size) {
10660 10061 if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10661 10062 (void) ddi_dma_sync(data_dma_state.handle, 0, 0,
10662 10063 DDI_DMA_SYNC_FORCPU);
10663 10064 for (i = 0; i < data_size; i++) {
10664 10065 if (ddi_copyout((uint8_t *)(
10665 10066 data_dma_state.memp + i), data + i, 1,
10666 10067 mode)) {
10667 10068 mutex_enter(&mpt->m_mutex);
10668 10069 status = EFAULT;
10669 10070 mptsas_log(mpt, CE_WARN, "failed to "
10670 10071 "copy out the reply data");
10671 10072 goto out;
10672 10073 }
10673 10074 }
10674 10075 }
10675 10076 }
10676 10077 mutex_enter(&mpt->m_mutex);
10677 10078 out:
10678 10079 /*
10679 10080 * Put the reply frame back on the free queue, increment the free
10680 10081 * index, and write the new index to the free index register. But only
10681 10082 * if this reply is an ADDRESS reply.
10682 10083 */
10683 10084 if (pt_flags & MPTSAS_ADDRESS_REPLY) {
10684 10085 ddi_put32(mpt->m_acc_free_queue_hdl,
10685 10086 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10686 10087 cmd->cmd_rfm);
10687 10088 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10688 10089 DDI_DMA_SYNC_FORDEV);
10689 10090 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10690 10091 mpt->m_free_index = 0;
10691 10092 }
10692 10093 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10693 10094 mpt->m_free_index);
10694 10095 }
10695 10096 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10696 10097 mptsas_remove_cmd(mpt, cmd);
10697 10098 pt_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10698 10099 }
10699 10100 if (pt_flags & MPTSAS_REQUEST_POOL_CMD)
10700 10101 mptsas_return_to_pool(mpt, cmd);
10701 10102 if (pt_flags & MPTSAS_DATA_ALLOCATED) {
10702 10103 if (mptsas_check_dma_handle(data_dma_state.handle) !=
10703 10104 DDI_SUCCESS) {
10704 10105 ddi_fm_service_impact(mpt->m_dip,
10705 10106 DDI_SERVICE_UNAFFECTED);
10706 10107 status = EFAULT;
10707 10108 }
10708 10109 mptsas_dma_free(&data_dma_state);
10709 10110 }
10710 10111 if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
10711 10112 if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
10712 10113 DDI_SUCCESS) {
10713 10114 ddi_fm_service_impact(mpt->m_dip,
10714 10115 DDI_SERVICE_UNAFFECTED);
10715 10116 status = EFAULT;
10716 10117 }
10717 10118 mptsas_dma_free(&dataout_dma_state);
10718 10119 }
10719 10120 if (pt_flags & MPTSAS_CMD_TIMEOUT) {
10720 10121 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
10721 10122 mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
10722 10123 }
10723 10124 }
10724 10125 if (request_msg)
10725 10126 kmem_free(request_msg, request_size);
10726 10127
10727 10128 return (status);
10728 10129 }
10729 10130
10730 10131 static int
10731 10132 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
10732 10133 {
10733 10134 /*
10734 10135 * If timeout is 0, set timeout to default of 60 seconds.
10735 10136 */
10736 10137 if (data->Timeout == 0) {
10737 10138 data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
10738 10139 }
10739 10140
10740 10141 if (((data->DataSize == 0) &&
10741 10142 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_NONE)) ||
10742 10143 ((data->DataSize != 0) &&
10743 10144 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_READ) ||
10744 10145 (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_WRITE) ||
10745 10146 ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) &&
10746 10147 (data->DataOutSize != 0))))) {
10747 10148 if (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) {
10748 10149 data->DataDirection = MPTSAS_PASS_THRU_DIRECTION_READ;
10749 10150 } else {
10750 10151 data->DataOutSize = 0;
10751 10152 }
10752 10153 /*
10753 10154 * Send passthru request messages
10754 10155 */
10755 10156 return (mptsas_do_passthru(mpt,
10756 10157 (uint8_t *)((uintptr_t)data->PtrRequest),
10757 10158 (uint8_t *)((uintptr_t)data->PtrReply),
10758 10159 (uint8_t *)((uintptr_t)data->PtrData),
10759 10160 data->RequestSize, data->ReplySize,
10760 10161 data->DataSize, data->DataDirection,
10761 10162 (uint8_t *)((uintptr_t)data->PtrDataOut),
10762 10163 data->DataOutSize, data->Timeout, mode));
10763 10164 } else {
10764 10165 return (EINVAL);
10765 10166 }
10766 10167 }
10767 10168
10768 10169 static uint8_t
10769 10170 mptsas_get_fw_diag_buffer_number(mptsas_t *mpt, uint32_t unique_id)
10770 10171 {
10771 10172 uint8_t index;
10772 10173
10773 10174 for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
10774 10175 if (mpt->m_fw_diag_buffer_list[index].unique_id == unique_id) {
10775 10176 return (index);
10776 10177 }
10777 10178 }
10778 10179
10779 10180 return (MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND);
10780 10181 }
10781 10182
10782 10183 static void
10783 10184 mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd)
10784 10185 {
10785 10186 pMpi2DiagBufferPostRequest_t pDiag_post_msg;
10786 10187 pMpi2DiagReleaseRequest_t pDiag_release_msg;
10787 10188 struct scsi_pkt *pkt = cmd->cmd_pkt;
10788 10189 mptsas_diag_request_t *diag = pkt->pkt_ha_private;
10789 10190 uint32_t request_desc_low, i;
10790 10191
10791 10192 ASSERT(mutex_owned(&mpt->m_mutex));
10792 10193
10793 10194 /*
10794 10195 * Form the diag message depending on the post or release function.
10795 10196 */
10796 10197 if (diag->function == MPI2_FUNCTION_DIAG_BUFFER_POST) {
10797 10198 pDiag_post_msg = (pMpi2DiagBufferPostRequest_t)
10798 10199 (mpt->m_req_frame + (mpt->m_req_frame_size *
10799 10200 cmd->cmd_slot));
10800 10201 bzero(pDiag_post_msg, mpt->m_req_frame_size);
10801 10202 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->Function,
10802 10203 diag->function);
10803 10204 ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->BufferType,
10804 10205 diag->pBuffer->buffer_type);
10805 10206 ddi_put8(mpt->m_acc_req_frame_hdl,
10806 10207 &pDiag_post_msg->ExtendedType,
10807 10208 diag->pBuffer->extended_type);
10808 10209 ddi_put32(mpt->m_acc_req_frame_hdl,
10809 10210 &pDiag_post_msg->BufferLength,
10810 10211 diag->pBuffer->buffer_data.size);
10811 10212 for (i = 0; i < (sizeof (pDiag_post_msg->ProductSpecific) / 4);
10812 10213 i++) {
10813 10214 ddi_put32(mpt->m_acc_req_frame_hdl,
10814 10215 &pDiag_post_msg->ProductSpecific[i],
10815 10216 diag->pBuffer->product_specific[i]);
10816 10217 }
10817 10218 ddi_put32(mpt->m_acc_req_frame_hdl,
10818 10219 &pDiag_post_msg->BufferAddress.Low,
10819 10220 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
10820 10221 & 0xffffffffull));
10821 10222 ddi_put32(mpt->m_acc_req_frame_hdl,
10822 10223 &pDiag_post_msg->BufferAddress.High,
10823 10224 (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
10824 10225 >> 32));
10825 10226 } else {
10826 10227 pDiag_release_msg = (pMpi2DiagReleaseRequest_t)
10827 10228 (mpt->m_req_frame + (mpt->m_req_frame_size *
10828 10229 cmd->cmd_slot));
10829 10230 bzero(pDiag_release_msg, mpt->m_req_frame_size);
10830 10231 ddi_put8(mpt->m_acc_req_frame_hdl,
10831 10232 &pDiag_release_msg->Function, diag->function);
10832 10233 ddi_put8(mpt->m_acc_req_frame_hdl,
10833 10234 &pDiag_release_msg->BufferType,
10834 10235 diag->pBuffer->buffer_type);
↓ open down ↓ |
410 lines elided |
↑ open up ↑ |
10835 10236 }
10836 10237
10837 10238 /*
10838 10239 * Send the message
10839 10240 */
10840 10241 (void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
10841 10242 DDI_DMA_SYNC_FORDEV);
10842 10243 request_desc_low = (cmd->cmd_slot << 16) +
10843 10244 MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10844 10245 cmd->cmd_rfm = NULL;
10845 - mpt->m_active->m_slot[cmd->cmd_slot] = cmd;
10846 10246 MPTSAS_START_CMD(mpt, request_desc_low, 0);
10847 10247 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
10848 10248 DDI_SUCCESS) ||
10849 10249 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
10850 10250 DDI_SUCCESS)) {
10851 10251 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10852 10252 }
10853 10253 }
10854 10254
10855 10255 static int
10856 10256 mptsas_post_fw_diag_buffer(mptsas_t *mpt,
10857 10257 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
10858 10258 {
10859 10259 mptsas_diag_request_t diag;
10860 10260 int status, slot_num, post_flags = 0;
10861 10261 mptsas_cmd_t *cmd = NULL;
10862 10262 struct scsi_pkt *pkt;
10863 10263 pMpi2DiagBufferPostReply_t reply;
10864 10264 uint16_t iocstatus;
10865 10265 uint32_t iocloginfo, transfer_length;
10866 10266
10867 10267 /*
10868 10268 * If buffer is not enabled, just leave.
10869 10269 */
10870 10270 *return_code = MPTSAS_FW_DIAG_ERROR_POST_FAILED;
10871 10271 if (!pBuffer->enabled) {
10872 10272 status = DDI_FAILURE;
10873 10273 goto out;
10874 10274 }
10875 10275
10876 10276 /*
10877 10277 * Clear some flags initially.
10878 10278 */
10879 10279 pBuffer->force_release = FALSE;
10880 10280 pBuffer->valid_data = FALSE;
10881 10281 pBuffer->owned_by_firmware = FALSE;
10882 10282
10883 10283 /*
10884 10284 * Get a cmd buffer from the cmd buffer pool
10885 10285 */
10886 10286 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
10887 10287 status = DDI_FAILURE;
10888 10288 mptsas_log(mpt, CE_NOTE, "command pool is full: Post FW Diag");
10889 10289 goto out;
10890 10290 }
10891 10291 post_flags |= MPTSAS_REQUEST_POOL_CMD;
10892 10292
10893 10293 bzero((caddr_t)cmd, sizeof (*cmd));
10894 10294 bzero((caddr_t)pkt, scsi_pkt_size());
10895 10295
10896 10296 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
10897 10297
10898 10298 diag.pBuffer = pBuffer;
10899 10299 diag.function = MPI2_FUNCTION_DIAG_BUFFER_POST;
10900 10300
10901 10301 /*
10902 10302 * Form a blank cmd/pkt to store the acknowledgement message
10903 10303 */
10904 10304 pkt->pkt_ha_private = (opaque_t)&diag;
10905 10305 pkt->pkt_flags = FLAG_HEAD;
10906 10306 pkt->pkt_time = 60;
10907 10307 cmd->cmd_pkt = pkt;
10908 10308 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
10909 10309
10910 10310 /*
10911 10311 * Save the command in a slot
10912 10312 */
10913 10313 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
10914 10314 /*
10915 10315 * Once passthru command get slot, set cmd_flags
10916 10316 * CFLAG_PREPARED.
10917 10317 */
10918 10318 cmd->cmd_flags |= CFLAG_PREPARED;
10919 10319 mptsas_start_diag(mpt, cmd);
10920 10320 } else {
10921 10321 mptsas_waitq_add(mpt, cmd);
10922 10322 }
10923 10323
10924 10324 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
10925 10325 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
10926 10326 }
10927 10327
10928 10328 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
10929 10329 status = DDI_FAILURE;
10930 10330 mptsas_log(mpt, CE_WARN, "Post FW Diag command timeout");
10931 10331 goto out;
10932 10332 }
10933 10333
10934 10334 /*
10935 10335 * cmd_rfm points to the reply message if a reply was given. Check the
10936 10336 * IOCStatus to make sure everything went OK with the FW diag request
10937 10337 * and set buffer flags.
10938 10338 */
10939 10339 if (cmd->cmd_rfm) {
10940 10340 post_flags |= MPTSAS_ADDRESS_REPLY;
10941 10341 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
10942 10342 DDI_DMA_SYNC_FORCPU);
10943 10343 reply = (pMpi2DiagBufferPostReply_t)(mpt->m_reply_frame +
10944 10344 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
10945 10345
10946 10346 /*
10947 10347 * Get the reply message data
10948 10348 */
10949 10349 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
10950 10350 &reply->IOCStatus);
10951 10351 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
10952 10352 &reply->IOCLogInfo);
10953 10353 transfer_length = ddi_get32(mpt->m_acc_reply_frame_hdl,
10954 10354 &reply->TransferLength);
10955 10355
10956 10356 /*
10957 10357 * If post failed quit.
10958 10358 */
10959 10359 if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
10960 10360 status = DDI_FAILURE;
10961 10361 NDBG13(("post FW Diag Buffer failed: IOCStatus=0x%x, "
10962 10362 "IOCLogInfo=0x%x, TransferLength=0x%x", iocstatus,
10963 10363 iocloginfo, transfer_length));
10964 10364 goto out;
10965 10365 }
10966 10366
10967 10367 /*
10968 10368 * Post was successful.
10969 10369 */
10970 10370 pBuffer->valid_data = TRUE;
10971 10371 pBuffer->owned_by_firmware = TRUE;
10972 10372 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
10973 10373 status = DDI_SUCCESS;
10974 10374 }
10975 10375
10976 10376 out:
10977 10377 /*
10978 10378 * Put the reply frame back on the free queue, increment the free
10979 10379 * index, and write the new index to the free index register. But only
10980 10380 * if this reply is an ADDRESS reply.
10981 10381 */
10982 10382 if (post_flags & MPTSAS_ADDRESS_REPLY) {
10983 10383 ddi_put32(mpt->m_acc_free_queue_hdl,
10984 10384 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
10985 10385 cmd->cmd_rfm);
10986 10386 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
10987 10387 DDI_DMA_SYNC_FORDEV);
10988 10388 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
10989 10389 mpt->m_free_index = 0;
10990 10390 }
10991 10391 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
10992 10392 mpt->m_free_index);
10993 10393 }
10994 10394 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
10995 10395 mptsas_remove_cmd(mpt, cmd);
10996 10396 post_flags &= (~MPTSAS_REQUEST_POOL_CMD);
10997 10397 }
10998 10398 if (post_flags & MPTSAS_REQUEST_POOL_CMD) {
10999 10399 mptsas_return_to_pool(mpt, cmd);
11000 10400 }
11001 10401
11002 10402 return (status);
11003 10403 }
11004 10404
11005 10405 static int
11006 10406 mptsas_release_fw_diag_buffer(mptsas_t *mpt,
11007 10407 mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
11008 10408 uint32_t diag_type)
11009 10409 {
11010 10410 mptsas_diag_request_t diag;
11011 10411 int status, slot_num, rel_flags = 0;
11012 10412 mptsas_cmd_t *cmd = NULL;
11013 10413 struct scsi_pkt *pkt;
11014 10414 pMpi2DiagReleaseReply_t reply;
11015 10415 uint16_t iocstatus;
11016 10416 uint32_t iocloginfo;
11017 10417
11018 10418 /*
11019 10419 * If buffer is not enabled, just leave.
11020 10420 */
11021 10421 *return_code = MPTSAS_FW_DIAG_ERROR_RELEASE_FAILED;
11022 10422 if (!pBuffer->enabled) {
11023 10423 mptsas_log(mpt, CE_NOTE, "This buffer type is not supported "
11024 10424 "by the IOC");
11025 10425 status = DDI_FAILURE;
11026 10426 goto out;
11027 10427 }
11028 10428
11029 10429 /*
11030 10430 * Clear some flags initially.
11031 10431 */
11032 10432 pBuffer->force_release = FALSE;
11033 10433 pBuffer->valid_data = FALSE;
11034 10434 pBuffer->owned_by_firmware = FALSE;
11035 10435
11036 10436 /*
11037 10437 * Get a cmd buffer from the cmd buffer pool
11038 10438 */
11039 10439 if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11040 10440 status = DDI_FAILURE;
11041 10441 mptsas_log(mpt, CE_NOTE, "command pool is full: Release FW "
11042 10442 "Diag");
11043 10443 goto out;
11044 10444 }
11045 10445 rel_flags |= MPTSAS_REQUEST_POOL_CMD;
11046 10446
11047 10447 bzero((caddr_t)cmd, sizeof (*cmd));
11048 10448 bzero((caddr_t)pkt, scsi_pkt_size());
11049 10449
11050 10450 cmd->ioc_cmd_slot = (uint32_t)(slot_num);
11051 10451
11052 10452 diag.pBuffer = pBuffer;
11053 10453 diag.function = MPI2_FUNCTION_DIAG_RELEASE;
11054 10454
11055 10455 /*
11056 10456 * Form a blank cmd/pkt to store the acknowledgement message
11057 10457 */
11058 10458 pkt->pkt_ha_private = (opaque_t)&diag;
11059 10459 pkt->pkt_flags = FLAG_HEAD;
11060 10460 pkt->pkt_time = 60;
11061 10461 cmd->cmd_pkt = pkt;
11062 10462 cmd->cmd_flags = CFLAG_CMDIOC | CFLAG_FW_DIAG;
11063 10463
11064 10464 /*
11065 10465 * Save the command in a slot
11066 10466 */
11067 10467 if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11068 10468 /*
11069 10469 * Once passthru command get slot, set cmd_flags
11070 10470 * CFLAG_PREPARED.
11071 10471 */
11072 10472 cmd->cmd_flags |= CFLAG_PREPARED;
11073 10473 mptsas_start_diag(mpt, cmd);
11074 10474 } else {
11075 10475 mptsas_waitq_add(mpt, cmd);
11076 10476 }
11077 10477
11078 10478 while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11079 10479 cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
11080 10480 }
11081 10481
11082 10482 if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11083 10483 status = DDI_FAILURE;
11084 10484 mptsas_log(mpt, CE_WARN, "Release FW Diag command timeout");
11085 10485 goto out;
11086 10486 }
11087 10487
11088 10488 /*
11089 10489 * cmd_rfm points to the reply message if a reply was given. Check the
11090 10490 * IOCStatus to make sure everything went OK with the FW diag request
11091 10491 * and set buffer flags.
11092 10492 */
11093 10493 if (cmd->cmd_rfm) {
11094 10494 rel_flags |= MPTSAS_ADDRESS_REPLY;
11095 10495 (void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11096 10496 DDI_DMA_SYNC_FORCPU);
11097 10497 reply = (pMpi2DiagReleaseReply_t)(mpt->m_reply_frame +
11098 10498 (cmd->cmd_rfm - mpt->m_reply_frame_dma_addr));
11099 10499
11100 10500 /*
11101 10501 * Get the reply message data
11102 10502 */
11103 10503 iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
11104 10504 &reply->IOCStatus);
11105 10505 iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
11106 10506 &reply->IOCLogInfo);
11107 10507
11108 10508 /*
11109 10509 * If release failed quit.
11110 10510 */
11111 10511 if ((iocstatus != MPI2_IOCSTATUS_SUCCESS) ||
11112 10512 pBuffer->owned_by_firmware) {
11113 10513 status = DDI_FAILURE;
11114 10514 NDBG13(("release FW Diag Buffer failed: "
11115 10515 "IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus,
11116 10516 iocloginfo));
11117 10517 goto out;
11118 10518 }
11119 10519
11120 10520 /*
11121 10521 * Release was successful.
11122 10522 */
11123 10523 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11124 10524 status = DDI_SUCCESS;
11125 10525
11126 10526 /*
11127 10527 * If this was for an UNREGISTER diag type command, clear the
11128 10528 * unique ID.
11129 10529 */
11130 10530 if (diag_type == MPTSAS_FW_DIAG_TYPE_UNREGISTER) {
11131 10531 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
11132 10532 }
11133 10533 }
11134 10534
11135 10535 out:
11136 10536 /*
11137 10537 * Put the reply frame back on the free queue, increment the free
11138 10538 * index, and write the new index to the free index register. But only
11139 10539 * if this reply is an ADDRESS reply.
11140 10540 */
11141 10541 if (rel_flags & MPTSAS_ADDRESS_REPLY) {
11142 10542 ddi_put32(mpt->m_acc_free_queue_hdl,
11143 10543 &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11144 10544 cmd->cmd_rfm);
11145 10545 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11146 10546 DDI_DMA_SYNC_FORDEV);
11147 10547 if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11148 10548 mpt->m_free_index = 0;
11149 10549 }
11150 10550 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11151 10551 mpt->m_free_index);
11152 10552 }
11153 10553 if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
11154 10554 mptsas_remove_cmd(mpt, cmd);
11155 10555 rel_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11156 10556 }
11157 10557 if (rel_flags & MPTSAS_REQUEST_POOL_CMD) {
11158 10558 mptsas_return_to_pool(mpt, cmd);
11159 10559 }
11160 10560
11161 10561 return (status);
11162 10562 }
11163 10563
11164 10564 static int
11165 10565 mptsas_diag_register(mptsas_t *mpt, mptsas_fw_diag_register_t *diag_register,
11166 10566 uint32_t *return_code)
11167 10567 {
11168 10568 mptsas_fw_diagnostic_buffer_t *pBuffer;
11169 10569 uint8_t extended_type, buffer_type, i;
11170 10570 uint32_t buffer_size;
11171 10571 uint32_t unique_id;
11172 10572 int status;
11173 10573
11174 10574 ASSERT(mutex_owned(&mpt->m_mutex));
11175 10575
11176 10576 extended_type = diag_register->ExtendedType;
11177 10577 buffer_type = diag_register->BufferType;
11178 10578 buffer_size = diag_register->RequestedBufferSize;
11179 10579 unique_id = diag_register->UniqueId;
11180 10580
11181 10581 /*
11182 10582 * Check for valid buffer type
11183 10583 */
11184 10584 if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
11185 10585 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11186 10586 return (DDI_FAILURE);
11187 10587 }
11188 10588
11189 10589 /*
11190 10590 * Get the current buffer and look up the unique ID. The unique ID
11191 10591 * should not be found. If it is, the ID is already in use.
11192 10592 */
11193 10593 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11194 10594 pBuffer = &mpt->m_fw_diag_buffer_list[buffer_type];
11195 10595 if (i != MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11196 10596 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11197 10597 return (DDI_FAILURE);
11198 10598 }
11199 10599
11200 10600 /*
11201 10601 * The buffer's unique ID should not be registered yet, and the given
11202 10602 * unique ID cannot be 0.
11203 10603 */
11204 10604 if ((pBuffer->unique_id != MPTSAS_FW_DIAG_INVALID_UID) ||
11205 10605 (unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
11206 10606 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11207 10607 return (DDI_FAILURE);
11208 10608 }
11209 10609
11210 10610 /*
11211 10611 * If this buffer is already posted as immediate, just change owner.
11212 10612 */
11213 10613 if (pBuffer->immediate && pBuffer->owned_by_firmware &&
11214 10614 (pBuffer->unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
11215 10615 pBuffer->immediate = FALSE;
11216 10616 pBuffer->unique_id = unique_id;
11217 10617 return (DDI_SUCCESS);
11218 10618 }
11219 10619
11220 10620 /*
11221 10621 * Post a new buffer after checking if it's enabled. The DMA buffer
11222 10622 * that is allocated will be contiguous (sgl_len = 1).
11223 10623 */
11224 10624 if (!pBuffer->enabled) {
11225 10625 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
11226 10626 return (DDI_FAILURE);
11227 10627 }
11228 10628 bzero(&pBuffer->buffer_data, sizeof (mptsas_dma_alloc_state_t));
11229 10629 pBuffer->buffer_data.size = buffer_size;
11230 10630 if (mptsas_dma_alloc(mpt, &pBuffer->buffer_data) != DDI_SUCCESS) {
11231 10631 mptsas_log(mpt, CE_WARN, "failed to alloc DMA resource for "
11232 10632 "diag buffer: size = %d bytes", buffer_size);
11233 10633 *return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
11234 10634 return (DDI_FAILURE);
11235 10635 }
11236 10636
11237 10637 /*
11238 10638 * Copy the given info to the diag buffer and post the buffer.
11239 10639 */
11240 10640 pBuffer->buffer_type = buffer_type;
11241 10641 pBuffer->immediate = FALSE;
11242 10642 if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
11243 10643 for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
11244 10644 i++) {
11245 10645 pBuffer->product_specific[i] =
11246 10646 diag_register->ProductSpecific[i];
11247 10647 }
11248 10648 }
11249 10649 pBuffer->extended_type = extended_type;
11250 10650 pBuffer->unique_id = unique_id;
11251 10651 status = mptsas_post_fw_diag_buffer(mpt, pBuffer, return_code);
11252 10652
11253 10653 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
11254 10654 DDI_SUCCESS) {
11255 10655 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed in "
11256 10656 "mptsas_diag_register.");
11257 10657 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11258 10658 status = DDI_FAILURE;
11259 10659 }
11260 10660
11261 10661 /*
11262 10662 * In case there was a failure, free the DMA buffer.
11263 10663 */
11264 10664 if (status == DDI_FAILURE) {
11265 10665 mptsas_dma_free(&pBuffer->buffer_data);
11266 10666 }
11267 10667
11268 10668 return (status);
11269 10669 }
11270 10670
11271 10671 static int
11272 10672 mptsas_diag_unregister(mptsas_t *mpt,
11273 10673 mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
11274 10674 {
11275 10675 mptsas_fw_diagnostic_buffer_t *pBuffer;
11276 10676 uint8_t i;
11277 10677 uint32_t unique_id;
11278 10678 int status;
11279 10679
11280 10680 ASSERT(mutex_owned(&mpt->m_mutex));
11281 10681
11282 10682 unique_id = diag_unregister->UniqueId;
11283 10683
11284 10684 /*
11285 10685 * Get the current buffer and look up the unique ID. The unique ID
11286 10686 * should be there.
11287 10687 */
11288 10688 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11289 10689 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11290 10690 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11291 10691 return (DDI_FAILURE);
11292 10692 }
11293 10693
11294 10694 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11295 10695
11296 10696 /*
11297 10697 * Try to release the buffer from FW before freeing it. If release
11298 10698 * fails, don't free the DMA buffer in case FW tries to access it
11299 10699 * later. If buffer is not owned by firmware, can't release it.
11300 10700 */
11301 10701 if (!pBuffer->owned_by_firmware) {
11302 10702 status = DDI_SUCCESS;
11303 10703 } else {
11304 10704 status = mptsas_release_fw_diag_buffer(mpt, pBuffer,
11305 10705 return_code, MPTSAS_FW_DIAG_TYPE_UNREGISTER);
11306 10706 }
11307 10707
11308 10708 /*
11309 10709 * At this point, return the current status no matter what happens with
11310 10710 * the DMA buffer.
11311 10711 */
11312 10712 pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
11313 10713 if (status == DDI_SUCCESS) {
11314 10714 if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
11315 10715 DDI_SUCCESS) {
11316 10716 mptsas_log(mpt, CE_WARN, "Check of DMA handle failed "
11317 10717 "in mptsas_diag_unregister.");
11318 10718 ddi_fm_service_impact(mpt->m_dip,
11319 10719 DDI_SERVICE_UNAFFECTED);
11320 10720 }
11321 10721 mptsas_dma_free(&pBuffer->buffer_data);
11322 10722 }
11323 10723
11324 10724 return (status);
11325 10725 }
11326 10726
11327 10727 static int
11328 10728 mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
11329 10729 uint32_t *return_code)
11330 10730 {
11331 10731 mptsas_fw_diagnostic_buffer_t *pBuffer;
11332 10732 uint8_t i;
11333 10733 uint32_t unique_id;
11334 10734
11335 10735 ASSERT(mutex_owned(&mpt->m_mutex));
11336 10736
11337 10737 unique_id = diag_query->UniqueId;
11338 10738
11339 10739 /*
11340 10740 * If ID is valid, query on ID.
11341 10741 * If ID is invalid, query on buffer type.
11342 10742 */
11343 10743 if (unique_id == MPTSAS_FW_DIAG_INVALID_UID) {
11344 10744 i = diag_query->BufferType;
11345 10745 if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
11346 10746 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11347 10747 return (DDI_FAILURE);
11348 10748 }
11349 10749 } else {
11350 10750 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11351 10751 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11352 10752 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11353 10753 return (DDI_FAILURE);
11354 10754 }
11355 10755 }
11356 10756
11357 10757 /*
11358 10758 * Fill query structure with the diag buffer info.
11359 10759 */
11360 10760 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11361 10761 diag_query->BufferType = pBuffer->buffer_type;
11362 10762 diag_query->ExtendedType = pBuffer->extended_type;
11363 10763 if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
11364 10764 for (i = 0; i < (sizeof (diag_query->ProductSpecific) / 4);
11365 10765 i++) {
11366 10766 diag_query->ProductSpecific[i] =
11367 10767 pBuffer->product_specific[i];
11368 10768 }
11369 10769 }
11370 10770 diag_query->TotalBufferSize = pBuffer->buffer_data.size;
11371 10771 diag_query->DriverAddedBufferSize = 0;
11372 10772 diag_query->UniqueId = pBuffer->unique_id;
11373 10773 diag_query->ApplicationFlags = 0;
11374 10774 diag_query->DiagnosticFlags = 0;
11375 10775
11376 10776 /*
11377 10777 * Set/Clear application flags
11378 10778 */
11379 10779 if (pBuffer->immediate) {
11380 10780 diag_query->ApplicationFlags &= ~MPTSAS_FW_DIAG_FLAG_APP_OWNED;
11381 10781 } else {
11382 10782 diag_query->ApplicationFlags |= MPTSAS_FW_DIAG_FLAG_APP_OWNED;
11383 10783 }
11384 10784 if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
11385 10785 diag_query->ApplicationFlags |=
11386 10786 MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
11387 10787 } else {
11388 10788 diag_query->ApplicationFlags &=
11389 10789 ~MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
11390 10790 }
11391 10791 if (pBuffer->owned_by_firmware) {
11392 10792 diag_query->ApplicationFlags |=
11393 10793 MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
11394 10794 } else {
11395 10795 diag_query->ApplicationFlags &=
11396 10796 ~MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
11397 10797 }
11398 10798
11399 10799 return (DDI_SUCCESS);
11400 10800 }
11401 10801
11402 10802 static int
11403 10803 mptsas_diag_read_buffer(mptsas_t *mpt,
11404 10804 mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
11405 10805 uint32_t *return_code, int ioctl_mode)
11406 10806 {
11407 10807 mptsas_fw_diagnostic_buffer_t *pBuffer;
11408 10808 uint8_t i, *pData;
11409 10809 uint32_t unique_id, byte;
11410 10810 int status;
11411 10811
11412 10812 ASSERT(mutex_owned(&mpt->m_mutex));
11413 10813
11414 10814 unique_id = diag_read_buffer->UniqueId;
11415 10815
11416 10816 /*
11417 10817 * Get the current buffer and look up the unique ID. The unique ID
11418 10818 * should be there.
11419 10819 */
11420 10820 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11421 10821 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11422 10822 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11423 10823 return (DDI_FAILURE);
11424 10824 }
11425 10825
11426 10826 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11427 10827
11428 10828 /*
11429 10829 * Make sure requested read is within limits
11430 10830 */
11431 10831 if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
11432 10832 pBuffer->buffer_data.size) {
11433 10833 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11434 10834 return (DDI_FAILURE);
11435 10835 }
11436 10836
11437 10837 /*
11438 10838 * Copy the requested data from DMA to the diag_read_buffer. The DMA
11439 10839 * buffer that was allocated is one contiguous buffer.
11440 10840 */
11441 10841 pData = (uint8_t *)(pBuffer->buffer_data.memp +
11442 10842 diag_read_buffer->StartingOffset);
11443 10843 (void) ddi_dma_sync(pBuffer->buffer_data.handle, 0, 0,
11444 10844 DDI_DMA_SYNC_FORCPU);
11445 10845 for (byte = 0; byte < diag_read_buffer->BytesToRead; byte++) {
11446 10846 if (ddi_copyout(pData + byte, ioctl_buf + byte, 1, ioctl_mode)
11447 10847 != 0) {
11448 10848 return (DDI_FAILURE);
11449 10849 }
11450 10850 }
11451 10851 diag_read_buffer->Status = 0;
11452 10852
11453 10853 /*
11454 10854 * Set or clear the Force Release flag.
11455 10855 */
11456 10856 if (pBuffer->force_release) {
11457 10857 diag_read_buffer->Flags |= MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
11458 10858 } else {
11459 10859 diag_read_buffer->Flags &= ~MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
11460 10860 }
11461 10861
11462 10862 /*
11463 10863 * If buffer is to be reregistered, make sure it's not already owned by
11464 10864 * firmware first.
11465 10865 */
11466 10866 status = DDI_SUCCESS;
11467 10867 if (!pBuffer->owned_by_firmware) {
11468 10868 if (diag_read_buffer->Flags & MPTSAS_FW_DIAG_FLAG_REREGISTER) {
11469 10869 status = mptsas_post_fw_diag_buffer(mpt, pBuffer,
11470 10870 return_code);
11471 10871 }
11472 10872 }
11473 10873
11474 10874 return (status);
11475 10875 }
11476 10876
11477 10877 static int
11478 10878 mptsas_diag_release(mptsas_t *mpt, mptsas_fw_diag_release_t *diag_release,
11479 10879 uint32_t *return_code)
11480 10880 {
11481 10881 mptsas_fw_diagnostic_buffer_t *pBuffer;
11482 10882 uint8_t i;
11483 10883 uint32_t unique_id;
11484 10884 int status;
11485 10885
11486 10886 ASSERT(mutex_owned(&mpt->m_mutex));
11487 10887
11488 10888 unique_id = diag_release->UniqueId;
11489 10889
11490 10890 /*
11491 10891 * Get the current buffer and look up the unique ID. The unique ID
11492 10892 * should be there.
11493 10893 */
11494 10894 i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11495 10895 if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11496 10896 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11497 10897 return (DDI_FAILURE);
11498 10898 }
11499 10899
11500 10900 pBuffer = &mpt->m_fw_diag_buffer_list[i];
11501 10901
11502 10902 /*
11503 10903 * If buffer is not owned by firmware, it's already been released.
11504 10904 */
11505 10905 if (!pBuffer->owned_by_firmware) {
11506 10906 *return_code = MPTSAS_FW_DIAG_ERROR_ALREADY_RELEASED;
11507 10907 return (DDI_FAILURE);
11508 10908 }
11509 10909
11510 10910 /*
11511 10911 * Release the buffer.
11512 10912 */
11513 10913 status = mptsas_release_fw_diag_buffer(mpt, pBuffer, return_code,
11514 10914 MPTSAS_FW_DIAG_TYPE_RELEASE);
11515 10915 return (status);
11516 10916 }
11517 10917
11518 10918 static int
11519 10919 mptsas_do_diag_action(mptsas_t *mpt, uint32_t action, uint8_t *diag_action,
11520 10920 uint32_t length, uint32_t *return_code, int ioctl_mode)
11521 10921 {
11522 10922 mptsas_fw_diag_register_t diag_register;
11523 10923 mptsas_fw_diag_unregister_t diag_unregister;
11524 10924 mptsas_fw_diag_query_t diag_query;
11525 10925 mptsas_diag_read_buffer_t diag_read_buffer;
11526 10926 mptsas_fw_diag_release_t diag_release;
11527 10927 int status = DDI_SUCCESS;
11528 10928 uint32_t original_return_code, read_buf_len;
11529 10929
11530 10930 ASSERT(mutex_owned(&mpt->m_mutex));
11531 10931
11532 10932 original_return_code = *return_code;
11533 10933 *return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11534 10934
11535 10935 switch (action) {
11536 10936 case MPTSAS_FW_DIAG_TYPE_REGISTER:
11537 10937 if (!length) {
11538 10938 *return_code =
11539 10939 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11540 10940 status = DDI_FAILURE;
11541 10941 break;
11542 10942 }
11543 10943 if (ddi_copyin(diag_action, &diag_register,
11544 10944 sizeof (diag_register), ioctl_mode) != 0) {
11545 10945 return (DDI_FAILURE);
11546 10946 }
11547 10947 status = mptsas_diag_register(mpt, &diag_register,
11548 10948 return_code);
11549 10949 break;
11550 10950
11551 10951 case MPTSAS_FW_DIAG_TYPE_UNREGISTER:
11552 10952 if (length < sizeof (diag_unregister)) {
11553 10953 *return_code =
11554 10954 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11555 10955 status = DDI_FAILURE;
11556 10956 break;
11557 10957 }
11558 10958 if (ddi_copyin(diag_action, &diag_unregister,
11559 10959 sizeof (diag_unregister), ioctl_mode) != 0) {
11560 10960 return (DDI_FAILURE);
11561 10961 }
11562 10962 status = mptsas_diag_unregister(mpt, &diag_unregister,
11563 10963 return_code);
11564 10964 break;
11565 10965
11566 10966 case MPTSAS_FW_DIAG_TYPE_QUERY:
11567 10967 if (length < sizeof (diag_query)) {
11568 10968 *return_code =
11569 10969 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11570 10970 status = DDI_FAILURE;
11571 10971 break;
11572 10972 }
11573 10973 if (ddi_copyin(diag_action, &diag_query,
11574 10974 sizeof (diag_query), ioctl_mode) != 0) {
11575 10975 return (DDI_FAILURE);
11576 10976 }
11577 10977 status = mptsas_diag_query(mpt, &diag_query,
11578 10978 return_code);
11579 10979 if (status == DDI_SUCCESS) {
11580 10980 if (ddi_copyout(&diag_query, diag_action,
11581 10981 sizeof (diag_query), ioctl_mode) != 0) {
11582 10982 return (DDI_FAILURE);
11583 10983 }
11584 10984 }
11585 10985 break;
11586 10986
11587 10987 case MPTSAS_FW_DIAG_TYPE_READ_BUFFER:
11588 10988 if (ddi_copyin(diag_action, &diag_read_buffer,
11589 10989 sizeof (diag_read_buffer) - 4, ioctl_mode) != 0) {
11590 10990 return (DDI_FAILURE);
11591 10991 }
11592 10992 read_buf_len = sizeof (diag_read_buffer) -
11593 10993 sizeof (diag_read_buffer.DataBuffer) +
11594 10994 diag_read_buffer.BytesToRead;
11595 10995 if (length < read_buf_len) {
11596 10996 *return_code =
11597 10997 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11598 10998 status = DDI_FAILURE;
11599 10999 break;
11600 11000 }
11601 11001 status = mptsas_diag_read_buffer(mpt,
11602 11002 &diag_read_buffer, diag_action +
11603 11003 sizeof (diag_read_buffer) - 4, return_code,
11604 11004 ioctl_mode);
11605 11005 if (status == DDI_SUCCESS) {
11606 11006 if (ddi_copyout(&diag_read_buffer, diag_action,
11607 11007 sizeof (diag_read_buffer) - 4, ioctl_mode)
11608 11008 != 0) {
11609 11009 return (DDI_FAILURE);
11610 11010 }
11611 11011 }
11612 11012 break;
11613 11013
11614 11014 case MPTSAS_FW_DIAG_TYPE_RELEASE:
11615 11015 if (length < sizeof (diag_release)) {
11616 11016 *return_code =
11617 11017 MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11618 11018 status = DDI_FAILURE;
11619 11019 break;
11620 11020 }
11621 11021 if (ddi_copyin(diag_action, &diag_release,
11622 11022 sizeof (diag_release), ioctl_mode) != 0) {
11623 11023 return (DDI_FAILURE);
11624 11024 }
11625 11025 status = mptsas_diag_release(mpt, &diag_release,
11626 11026 return_code);
11627 11027 break;
11628 11028
11629 11029 default:
11630 11030 *return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11631 11031 status = DDI_FAILURE;
11632 11032 break;
11633 11033 }
11634 11034
11635 11035 if ((status == DDI_FAILURE) &&
11636 11036 (original_return_code == MPTSAS_FW_DIAG_NEW) &&
11637 11037 (*return_code != MPTSAS_FW_DIAG_ERROR_SUCCESS)) {
11638 11038 status = DDI_SUCCESS;
11639 11039 }
11640 11040
11641 11041 return (status);
11642 11042 }
11643 11043
11644 11044 static int
11645 11045 mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *user_data, int mode)
11646 11046 {
11647 11047 int status;
11648 11048 mptsas_diag_action_t driver_data;
11649 11049
11650 11050 ASSERT(mutex_owned(&mpt->m_mutex));
11651 11051
11652 11052 /*
11653 11053 * Copy the user data to a driver data buffer.
11654 11054 */
11655 11055 if (ddi_copyin(user_data, &driver_data, sizeof (mptsas_diag_action_t),
11656 11056 mode) == 0) {
11657 11057 /*
11658 11058 * Send diag action request if Action is valid
11659 11059 */
11660 11060 if (driver_data.Action == MPTSAS_FW_DIAG_TYPE_REGISTER ||
11661 11061 driver_data.Action == MPTSAS_FW_DIAG_TYPE_UNREGISTER ||
11662 11062 driver_data.Action == MPTSAS_FW_DIAG_TYPE_QUERY ||
11663 11063 driver_data.Action == MPTSAS_FW_DIAG_TYPE_READ_BUFFER ||
11664 11064 driver_data.Action == MPTSAS_FW_DIAG_TYPE_RELEASE) {
11665 11065 status = mptsas_do_diag_action(mpt, driver_data.Action,
11666 11066 (void *)(uintptr_t)driver_data.PtrDiagAction,
11667 11067 driver_data.Length, &driver_data.ReturnCode,
11668 11068 mode);
11669 11069 if (status == DDI_SUCCESS) {
11670 11070 if (ddi_copyout(&driver_data.ReturnCode,
11671 11071 &user_data->ReturnCode,
11672 11072 sizeof (user_data->ReturnCode), mode)
11673 11073 != 0) {
11674 11074 status = EFAULT;
11675 11075 } else {
11676 11076 status = 0;
11677 11077 }
11678 11078 } else {
11679 11079 status = EIO;
11680 11080 }
11681 11081 } else {
11682 11082 status = EINVAL;
11683 11083 }
11684 11084 } else {
11685 11085 status = EFAULT;
11686 11086 }
11687 11087
11688 11088 return (status);
11689 11089 }
11690 11090
11691 11091 /*
11692 11092 * This routine handles the "event query" ioctl.
11693 11093 */
11694 11094 static int
11695 11095 mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, int mode,
11696 11096 int *rval)
11697 11097 {
11698 11098 int status;
11699 11099 mptsas_event_query_t driverdata;
11700 11100 uint8_t i;
11701 11101
11702 11102 driverdata.Entries = MPTSAS_EVENT_QUEUE_SIZE;
11703 11103
11704 11104 mutex_enter(&mpt->m_mutex);
11705 11105 for (i = 0; i < 4; i++) {
11706 11106 driverdata.Types[i] = mpt->m_event_mask[i];
11707 11107 }
11708 11108 mutex_exit(&mpt->m_mutex);
11709 11109
11710 11110 if (ddi_copyout(&driverdata, data, sizeof (driverdata), mode) != 0) {
11711 11111 status = EFAULT;
11712 11112 } else {
11713 11113 *rval = MPTIOCTL_STATUS_GOOD;
11714 11114 status = 0;
11715 11115 }
11716 11116
11717 11117 return (status);
11718 11118 }
11719 11119
11720 11120 /*
11721 11121 * This routine handles the "event enable" ioctl.
11722 11122 */
11723 11123 static int
11724 11124 mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, int mode,
11725 11125 int *rval)
11726 11126 {
11727 11127 int status;
11728 11128 mptsas_event_enable_t driverdata;
11729 11129 uint8_t i;
11730 11130
11731 11131 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11732 11132 mutex_enter(&mpt->m_mutex);
11733 11133 for (i = 0; i < 4; i++) {
11734 11134 mpt->m_event_mask[i] = driverdata.Types[i];
11735 11135 }
11736 11136 mutex_exit(&mpt->m_mutex);
11737 11137
11738 11138 *rval = MPTIOCTL_STATUS_GOOD;
11739 11139 status = 0;
11740 11140 } else {
11741 11141 status = EFAULT;
11742 11142 }
11743 11143 return (status);
11744 11144 }
11745 11145
11746 11146 /*
11747 11147 * This routine handles the "event report" ioctl.
11748 11148 */
11749 11149 static int
11750 11150 mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, int mode,
11751 11151 int *rval)
11752 11152 {
11753 11153 int status;
11754 11154 mptsas_event_report_t driverdata;
11755 11155
11756 11156 mutex_enter(&mpt->m_mutex);
11757 11157
11758 11158 if (ddi_copyin(&data->Size, &driverdata.Size, sizeof (driverdata.Size),
11759 11159 mode) == 0) {
11760 11160 if (driverdata.Size >= sizeof (mpt->m_events)) {
11761 11161 if (ddi_copyout(mpt->m_events, data->Events,
11762 11162 sizeof (mpt->m_events), mode) != 0) {
11763 11163 status = EFAULT;
11764 11164 } else {
11765 11165 if (driverdata.Size > sizeof (mpt->m_events)) {
11766 11166 driverdata.Size =
11767 11167 sizeof (mpt->m_events);
11768 11168 if (ddi_copyout(&driverdata.Size,
11769 11169 &data->Size,
11770 11170 sizeof (driverdata.Size),
11771 11171 mode) != 0) {
11772 11172 status = EFAULT;
11773 11173 } else {
11774 11174 *rval = MPTIOCTL_STATUS_GOOD;
11775 11175 status = 0;
11776 11176 }
11777 11177 } else {
11778 11178 *rval = MPTIOCTL_STATUS_GOOD;
11779 11179 status = 0;
11780 11180 }
11781 11181 }
11782 11182 } else {
11783 11183 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
11784 11184 status = 0;
11785 11185 }
11786 11186 } else {
11787 11187 status = EFAULT;
11788 11188 }
11789 11189
11790 11190 mutex_exit(&mpt->m_mutex);
11791 11191 return (status);
11792 11192 }
11793 11193
11794 11194 static void
11795 11195 mptsas_lookup_pci_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
11796 11196 {
11797 11197 int *reg_data;
11798 11198 uint_t reglen;
11799 11199
11800 11200 /*
11801 11201 * Lookup the 'reg' property and extract the other data
11802 11202 */
11803 11203 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
11804 11204 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
11805 11205 DDI_PROP_SUCCESS) {
11806 11206 /*
11807 11207 * Extract the PCI data from the 'reg' property first DWORD.
11808 11208 * The entry looks like the following:
11809 11209 * First DWORD:
11810 11210 * Bits 0 - 7 8-bit Register number
11811 11211 * Bits 8 - 10 3-bit Function number
11812 11212 * Bits 11 - 15 5-bit Device number
11813 11213 * Bits 16 - 23 8-bit Bus number
11814 11214 * Bits 24 - 25 2-bit Address Space type identifier
11815 11215 *
11816 11216 */
11817 11217 adapter_data->PciInformation.u.bits.BusNumber =
11818 11218 (reg_data[0] & 0x00FF0000) >> 16;
11819 11219 adapter_data->PciInformation.u.bits.DeviceNumber =
11820 11220 (reg_data[0] & 0x0000F800) >> 11;
11821 11221 adapter_data->PciInformation.u.bits.FunctionNumber =
11822 11222 (reg_data[0] & 0x00000700) >> 8;
11823 11223 ddi_prop_free((void *)reg_data);
11824 11224 } else {
11825 11225 /*
11826 11226 * If we can't determine the PCI data then we fill in FF's for
11827 11227 * the data to indicate this.
11828 11228 */
11829 11229 adapter_data->PCIDeviceHwId = 0xFFFFFFFF;
11830 11230 adapter_data->MpiPortNumber = 0xFFFFFFFF;
11831 11231 adapter_data->PciInformation.u.AsDWORD = 0xFFFFFFFF;
11832 11232 }
11833 11233
11834 11234 /*
11835 11235 * Saved in the mpt->m_fwversion
11836 11236 */
11837 11237 adapter_data->MpiFirmwareVersion = mpt->m_fwversion;
11838 11238 }
11839 11239
11840 11240 static void
11841 11241 mptsas_read_adapter_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
11842 11242 {
11843 11243 char *driver_verstr = MPTSAS_MOD_STRING;
11844 11244
11845 11245 mptsas_lookup_pci_data(mpt, adapter_data);
11846 11246 adapter_data->AdapterType = MPTIOCTL_ADAPTER_TYPE_SAS2;
11847 11247 adapter_data->PCIDeviceHwId = (uint32_t)mpt->m_devid;
11848 11248 adapter_data->PCIDeviceHwRev = (uint32_t)mpt->m_revid;
11849 11249 adapter_data->SubSystemId = (uint32_t)mpt->m_ssid;
11850 11250 adapter_data->SubsystemVendorId = (uint32_t)mpt->m_svid;
11851 11251 (void) strcpy((char *)&adapter_data->DriverVersion[0], driver_verstr);
11852 11252 adapter_data->BiosVersion = 0;
11853 11253 (void) mptsas_get_bios_page3(mpt, &adapter_data->BiosVersion);
11854 11254 }
11855 11255
11856 11256 static void
11857 11257 mptsas_read_pci_info(mptsas_t *mpt, mptsas_pci_info_t *pci_info)
11858 11258 {
11859 11259 int *reg_data, i;
11860 11260 uint_t reglen;
11861 11261
11862 11262 /*
11863 11263 * Lookup the 'reg' property and extract the other data
11864 11264 */
11865 11265 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
11866 11266 DDI_PROP_DONTPASS, "reg", ®_data, ®len) ==
11867 11267 DDI_PROP_SUCCESS) {
11868 11268 /*
11869 11269 * Extract the PCI data from the 'reg' property first DWORD.
11870 11270 * The entry looks like the following:
11871 11271 * First DWORD:
11872 11272 * Bits 8 - 10 3-bit Function number
11873 11273 * Bits 11 - 15 5-bit Device number
11874 11274 * Bits 16 - 23 8-bit Bus number
11875 11275 */
11876 11276 pci_info->BusNumber = (reg_data[0] & 0x00FF0000) >> 16;
11877 11277 pci_info->DeviceNumber = (reg_data[0] & 0x0000F800) >> 11;
11878 11278 pci_info->FunctionNumber = (reg_data[0] & 0x00000700) >> 8;
11879 11279 ddi_prop_free((void *)reg_data);
11880 11280 } else {
11881 11281 /*
11882 11282 * If we can't determine the PCI info then we fill in FF's for
11883 11283 * the data to indicate this.
11884 11284 */
11885 11285 pci_info->BusNumber = 0xFFFFFFFF;
11886 11286 pci_info->DeviceNumber = 0xFF;
11887 11287 pci_info->FunctionNumber = 0xFF;
11888 11288 }
11889 11289
11890 11290 /*
11891 11291 * Now get the interrupt vector and the pci header. The vector can
11892 11292 * only be 0 right now. The header is the first 256 bytes of config
11893 11293 * space.
11894 11294 */
11895 11295 pci_info->InterruptVector = 0;
11896 11296 for (i = 0; i < sizeof (pci_info->PciHeader); i++) {
11897 11297 pci_info->PciHeader[i] = pci_config_get8(mpt->m_config_handle,
11898 11298 i);
11899 11299 }
11900 11300 }
11901 11301
11902 11302 static int
11903 11303 mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data, int mode)
11904 11304 {
11905 11305 int status = 0;
11906 11306 mptsas_reg_access_t driverdata;
11907 11307
11908 11308 mutex_enter(&mpt->m_mutex);
11909 11309 if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
11910 11310 switch (driverdata.Command) {
11911 11311 /*
11912 11312 * IO access is not supported.
11913 11313 */
11914 11314 case REG_IO_READ:
11915 11315 case REG_IO_WRITE:
11916 11316 mptsas_log(mpt, CE_WARN, "IO access is not "
11917 11317 "supported. Use memory access.");
11918 11318 status = EINVAL;
11919 11319 break;
11920 11320
11921 11321 case REG_MEM_READ:
11922 11322 driverdata.RegData = ddi_get32(mpt->m_datap,
11923 11323 (uint32_t *)(void *)mpt->m_reg +
11924 11324 driverdata.RegOffset);
11925 11325 if (ddi_copyout(&driverdata.RegData,
11926 11326 &data->RegData,
11927 11327 sizeof (driverdata.RegData), mode) != 0) {
11928 11328 mptsas_log(mpt, CE_WARN, "Register "
11929 11329 "Read Failed");
11930 11330 status = EFAULT;
11931 11331 }
11932 11332 break;
11933 11333
11934 11334 case REG_MEM_WRITE:
11935 11335 ddi_put32(mpt->m_datap,
11936 11336 (uint32_t *)(void *)mpt->m_reg +
11937 11337 driverdata.RegOffset,
11938 11338 driverdata.RegData);
11939 11339 break;
11940 11340
11941 11341 default:
11942 11342 status = EINVAL;
11943 11343 break;
11944 11344 }
11945 11345 } else {
11946 11346 status = EFAULT;
11947 11347 }
11948 11348
11949 11349 mutex_exit(&mpt->m_mutex);
11950 11350 return (status);
11951 11351 }
11952 11352
11953 11353 static int
11954 11354 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
11955 11355 int *rval)
11956 11356 {
11957 11357 int status = 0;
↓ open down ↓ |
1102 lines elided |
↑ open up ↑ |
11958 11358 mptsas_t *mpt;
11959 11359 mptsas_update_flash_t flashdata;
11960 11360 mptsas_pass_thru_t passthru_data;
11961 11361 mptsas_adapter_data_t adapter_data;
11962 11362 mptsas_pci_info_t pci_info;
11963 11363 int copylen;
11964 11364
11965 11365 int iport_flag = 0;
11966 11366 dev_info_t *dip = NULL;
11967 11367 mptsas_phymask_t phymask = 0;
11968 - struct devctl_iocdata *dcp = NULL;
11969 - uint32_t slotstatus = 0;
11970 - char *addr = NULL;
11971 - mptsas_target_t *ptgt = NULL;
11972 11368
11973 11369 *rval = MPTIOCTL_STATUS_GOOD;
11974 11370 if (secpolicy_sys_config(credp, B_FALSE) != 0) {
11975 11371 return (EPERM);
11976 11372 }
11977 11373
11978 11374 mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
11979 11375 if (mpt == NULL) {
11980 11376 /*
11981 11377 * Called from iport node, get the states
11982 11378 */
11983 11379 iport_flag = 1;
11984 11380 dip = mptsas_get_dip_from_dev(dev, &phymask);
11985 11381 if (dip == NULL) {
11986 11382 return (ENXIO);
11987 11383 }
11988 11384 mpt = DIP2MPT(dip);
11989 11385 }
11990 11386 /* Make sure power level is D0 before accessing registers */
11991 11387 mutex_enter(&mpt->m_mutex);
11992 11388 if (mpt->m_options & MPTSAS_OPT_PM) {
11993 11389 (void) pm_busy_component(mpt->m_dip, 0);
11994 11390 if (mpt->m_power_level != PM_LEVEL_D0) {
11995 11391 mutex_exit(&mpt->m_mutex);
11996 11392 if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
11997 11393 DDI_SUCCESS) {
11998 11394 mptsas_log(mpt, CE_WARN,
11999 11395 "mptsas%d: mptsas_ioctl: Raise power "
12000 11396 "request failed.", mpt->m_instance);
12001 11397 (void) pm_idle_component(mpt->m_dip, 0);
12002 11398 return (ENXIO);
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
12003 11399 }
12004 11400 } else {
12005 11401 mutex_exit(&mpt->m_mutex);
12006 11402 }
12007 11403 } else {
12008 11404 mutex_exit(&mpt->m_mutex);
12009 11405 }
12010 11406
12011 11407 if (iport_flag) {
12012 11408 status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
12013 - if (status != 0) {
12014 - goto out;
12015 - }
12016 - /*
12017 - * The following code control the OK2RM LED, it doesn't affect
12018 - * the ioctl return status.
12019 - */
12020 - if ((cmd == DEVCTL_DEVICE_ONLINE) ||
12021 - (cmd == DEVCTL_DEVICE_OFFLINE)) {
12022 - if (ndi_dc_allochdl((void *)data, &dcp) !=
12023 - NDI_SUCCESS) {
12024 - goto out;
12025 - }
12026 - addr = ndi_dc_getaddr(dcp);
12027 - ptgt = mptsas_addr_to_ptgt(mpt, addr, phymask);
12028 - if (ptgt == NULL) {
12029 - NDBG14(("mptsas_ioctl led control: tgt %s not "
12030 - "found", addr));
12031 - ndi_dc_freehdl(dcp);
12032 - goto out;
12033 - }
12034 - mutex_enter(&mpt->m_mutex);
12035 - if (cmd == DEVCTL_DEVICE_ONLINE) {
12036 - ptgt->m_tgt_unconfigured = 0;
12037 - } else if (cmd == DEVCTL_DEVICE_OFFLINE) {
12038 - ptgt->m_tgt_unconfigured = 1;
12039 - }
12040 - slotstatus = 0;
12041 -#ifdef MPTSAS_GET_LED
12042 - /*
12043 - * The get led status can't get a valid/reasonable
12044 - * state, so ignore the get led status, and write the
12045 - * required value directly
12046 - */
12047 - if (mptsas_get_led_status(mpt, ptgt, &slotstatus) !=
12048 - DDI_SUCCESS) {
12049 - NDBG14(("mptsas_ioctl: get LED for tgt %s "
12050 - "failed %x", addr, slotstatus));
12051 - slotstatus = 0;
12052 - }
12053 - NDBG14(("mptsas_ioctl: LED status %x for %s",
12054 - slotstatus, addr));
12055 -#endif
12056 - if (cmd == DEVCTL_DEVICE_OFFLINE) {
12057 - slotstatus |=
12058 - MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
12059 - } else {
12060 - slotstatus &=
12061 - ~MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
12062 - }
12063 - if (mptsas_set_led_status(mpt, ptgt, slotstatus) !=
12064 - DDI_SUCCESS) {
12065 - NDBG14(("mptsas_ioctl: set LED for tgt %s "
12066 - "failed %x", addr, slotstatus));
12067 - }
12068 - mutex_exit(&mpt->m_mutex);
12069 - ndi_dc_freehdl(dcp);
12070 - }
12071 11409 goto out;
12072 11410 }
12073 11411 switch (cmd) {
12074 11412 case MPTIOCTL_UPDATE_FLASH:
12075 11413 if (ddi_copyin((void *)data, &flashdata,
12076 11414 sizeof (struct mptsas_update_flash), mode)) {
12077 11415 status = EFAULT;
12078 11416 break;
12079 11417 }
12080 11418
12081 11419 mutex_enter(&mpt->m_mutex);
12082 11420 if (mptsas_update_flash(mpt,
12083 11421 (caddr_t)(long)flashdata.PtrBuffer,
12084 11422 flashdata.ImageSize, flashdata.ImageType, mode)) {
12085 11423 status = EFAULT;
12086 11424 }
12087 11425
12088 11426 /*
12089 11427 * Reset the chip to start using the new
12090 11428 * firmware. Reset if failed also.
12091 11429 */
12092 11430 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12093 11431 if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
12094 11432 status = EFAULT;
12095 11433 }
12096 11434 mutex_exit(&mpt->m_mutex);
12097 11435 break;
12098 11436 case MPTIOCTL_PASS_THRU:
12099 11437 /*
12100 11438 * The user has requested to pass through a command to
12101 11439 * be executed by the MPT firmware. Call our routine
12102 11440 * which does this. Only allow one passthru IOCTL at
12103 11441 * one time. Other threads will block on
12104 11442 * m_passthru_mutex, which is of adaptive variant.
12105 11443 */
12106 11444 if (ddi_copyin((void *)data, &passthru_data,
12107 11445 sizeof (mptsas_pass_thru_t), mode)) {
12108 11446 status = EFAULT;
12109 11447 break;
12110 11448 }
12111 11449 mutex_enter(&mpt->m_passthru_mutex);
12112 11450 mutex_enter(&mpt->m_mutex);
12113 11451 status = mptsas_pass_thru(mpt, &passthru_data, mode);
12114 11452 mutex_exit(&mpt->m_mutex);
12115 11453 mutex_exit(&mpt->m_passthru_mutex);
12116 11454
12117 11455 break;
12118 11456 case MPTIOCTL_GET_ADAPTER_DATA:
12119 11457 /*
12120 11458 * The user has requested to read adapter data. Call
12121 11459 * our routine which does this.
12122 11460 */
12123 11461 bzero(&adapter_data, sizeof (mptsas_adapter_data_t));
12124 11462 if (ddi_copyin((void *)data, (void *)&adapter_data,
12125 11463 sizeof (mptsas_adapter_data_t), mode)) {
12126 11464 status = EFAULT;
12127 11465 break;
12128 11466 }
12129 11467 if (adapter_data.StructureLength >=
12130 11468 sizeof (mptsas_adapter_data_t)) {
12131 11469 adapter_data.StructureLength = (uint32_t)
12132 11470 sizeof (mptsas_adapter_data_t);
12133 11471 copylen = sizeof (mptsas_adapter_data_t);
12134 11472 mutex_enter(&mpt->m_mutex);
12135 11473 mptsas_read_adapter_data(mpt, &adapter_data);
12136 11474 mutex_exit(&mpt->m_mutex);
12137 11475 } else {
12138 11476 adapter_data.StructureLength = (uint32_t)
12139 11477 sizeof (mptsas_adapter_data_t);
12140 11478 copylen = sizeof (adapter_data.StructureLength);
12141 11479 *rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
12142 11480 }
12143 11481 if (ddi_copyout((void *)(&adapter_data), (void *)data,
12144 11482 copylen, mode) != 0) {
12145 11483 status = EFAULT;
12146 11484 }
12147 11485 break;
12148 11486 case MPTIOCTL_GET_PCI_INFO:
12149 11487 /*
12150 11488 * The user has requested to read pci info. Call
12151 11489 * our routine which does this.
12152 11490 */
12153 11491 bzero(&pci_info, sizeof (mptsas_pci_info_t));
12154 11492 mutex_enter(&mpt->m_mutex);
12155 11493 mptsas_read_pci_info(mpt, &pci_info);
12156 11494 mutex_exit(&mpt->m_mutex);
12157 11495 if (ddi_copyout((void *)(&pci_info), (void *)data,
12158 11496 sizeof (mptsas_pci_info_t), mode) != 0) {
12159 11497 status = EFAULT;
12160 11498 }
12161 11499 break;
12162 11500 case MPTIOCTL_RESET_ADAPTER:
12163 11501 mutex_enter(&mpt->m_mutex);
12164 11502 mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
12165 11503 if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
12166 11504 mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
12167 11505 "failed");
12168 11506 status = EFAULT;
12169 11507 }
12170 11508 mutex_exit(&mpt->m_mutex);
12171 11509 break;
12172 11510 case MPTIOCTL_DIAG_ACTION:
12173 11511 /*
12174 11512 * The user has done a diag buffer action. Call our
12175 11513 * routine which does this. Only allow one diag action
12176 11514 * at one time.
12177 11515 */
12178 11516 mutex_enter(&mpt->m_mutex);
12179 11517 if (mpt->m_diag_action_in_progress) {
12180 11518 mutex_exit(&mpt->m_mutex);
12181 11519 return (EBUSY);
12182 11520 }
12183 11521 mpt->m_diag_action_in_progress = 1;
12184 11522 status = mptsas_diag_action(mpt,
12185 11523 (mptsas_diag_action_t *)data, mode);
12186 11524 mpt->m_diag_action_in_progress = 0;
12187 11525 mutex_exit(&mpt->m_mutex);
12188 11526 break;
12189 11527 case MPTIOCTL_EVENT_QUERY:
12190 11528 /*
12191 11529 * The user has done an event query. Call our routine
12192 11530 * which does this.
12193 11531 */
12194 11532 status = mptsas_event_query(mpt,
12195 11533 (mptsas_event_query_t *)data, mode, rval);
12196 11534 break;
12197 11535 case MPTIOCTL_EVENT_ENABLE:
12198 11536 /*
12199 11537 * The user has done an event enable. Call our routine
12200 11538 * which does this.
12201 11539 */
12202 11540 status = mptsas_event_enable(mpt,
12203 11541 (mptsas_event_enable_t *)data, mode, rval);
12204 11542 break;
12205 11543 case MPTIOCTL_EVENT_REPORT:
12206 11544 /*
12207 11545 * The user has done an event report. Call our routine
12208 11546 * which does this.
12209 11547 */
12210 11548 status = mptsas_event_report(mpt,
12211 11549 (mptsas_event_report_t *)data, mode, rval);
12212 11550 break;
12213 11551 case MPTIOCTL_REG_ACCESS:
12214 11552 /*
12215 11553 * The user has requested register access. Call our
12216 11554 * routine which does this.
12217 11555 */
↓ open down ↓ |
137 lines elided |
↑ open up ↑ |
12218 11556 status = mptsas_reg_access(mpt,
12219 11557 (mptsas_reg_access_t *)data, mode);
12220 11558 break;
12221 11559 default:
12222 11560 status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
12223 11561 rval);
12224 11562 break;
12225 11563 }
12226 11564
12227 11565 out:
12228 - if (mpt->m_options & MPTSAS_OPT_PM)
12229 - (void) pm_idle_component(mpt->m_dip, 0);
12230 11566 return (status);
12231 11567 }
12232 11568
12233 11569 int
12234 11570 mptsas_restart_ioc(mptsas_t *mpt)
12235 11571 {
12236 11572 int rval = DDI_SUCCESS;
12237 11573 mptsas_target_t *ptgt = NULL;
12238 11574
12239 11575 ASSERT(mutex_owned(&mpt->m_mutex));
12240 11576
12241 11577 /*
12242 11578 * Set a flag telling I/O path that we're processing a reset. This is
12243 11579 * needed because after the reset is complete, the hash table still
12244 11580 * needs to be rebuilt. If I/Os are started before the hash table is
12245 11581 * rebuilt, I/O errors will occur. This flag allows I/Os to be marked
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
12246 11582 * so that they can be retried.
12247 11583 */
12248 11584 mpt->m_in_reset = TRUE;
12249 11585
12250 11586 /*
12251 11587 * Set all throttles to HOLD
12252 11588 */
12253 11589 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
12254 11590 MPTSAS_HASH_FIRST);
12255 11591 while (ptgt != NULL) {
12256 - mutex_enter(&ptgt->m_tgt_intr_mutex);
12257 11592 mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
12258 - mutex_exit(&ptgt->m_tgt_intr_mutex);
12259 11593
12260 11594 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
12261 11595 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
12262 11596 }
12263 11597
12264 11598 /*
12265 11599 * Disable interrupts
12266 11600 */
12267 11601 MPTSAS_DISABLE_INTR(mpt);
12268 11602
12269 11603 /*
12270 - * Abort all commands: outstanding commands, commands in waitq
11604 + * Abort all commands: outstanding commands, commands in waitq and
11605 + * tx_waitq.
12271 11606 */
12272 11607 mptsas_flush_hba(mpt);
12273 11608
12274 11609 /*
12275 11610 * Reinitialize the chip.
12276 11611 */
12277 11612 if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
12278 11613 rval = DDI_FAILURE;
12279 11614 }
12280 11615
12281 11616 /*
12282 11617 * Enable interrupts again
12283 11618 */
12284 11619 MPTSAS_ENABLE_INTR(mpt);
12285 11620
12286 11621 /*
12287 11622 * If mptsas_init_chip was successful, update the driver data.
12288 11623 */
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
12289 11624 if (rval == DDI_SUCCESS) {
12290 11625 mptsas_update_driver_data(mpt);
12291 11626 }
12292 11627
12293 11628 /*
12294 11629 * Reset the throttles
12295 11630 */
12296 11631 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
12297 11632 MPTSAS_HASH_FIRST);
12298 11633 while (ptgt != NULL) {
12299 - mutex_enter(&ptgt->m_tgt_intr_mutex);
12300 11634 mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
12301 - mutex_exit(&ptgt->m_tgt_intr_mutex);
12302 11635
12303 11636 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
12304 11637 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
12305 11638 }
12306 11639
12307 11640 mptsas_doneq_empty(mpt);
12308 11641 mptsas_restart_hba(mpt);
12309 11642
12310 11643 if (rval != DDI_SUCCESS) {
12311 11644 mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
12312 11645 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
12313 11646 }
12314 11647
12315 11648 /*
12316 11649 * Clear the reset flag so that I/Os can continue.
12317 11650 */
12318 11651 mpt->m_in_reset = FALSE;
12319 11652
12320 11653 return (rval);
12321 11654 }
12322 11655
12323 11656 static int
12324 11657 mptsas_init_chip(mptsas_t *mpt, int first_time)
12325 11658 {
12326 11659 ddi_dma_cookie_t cookie;
12327 11660 uint32_t i;
12328 11661 int rval;
12329 11662
12330 11663 /*
12331 11664 * Check to see if the firmware image is valid
12332 11665 */
12333 11666 if (ddi_get32(mpt->m_datap, &mpt->m_reg->HostDiagnostic) &
12334 11667 MPI2_DIAG_FLASH_BAD_SIG) {
12335 11668 mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
12336 11669 goto fail;
12337 11670 }
12338 11671
12339 11672 /*
12340 11673 * Reset the chip
12341 11674 */
12342 11675 rval = mptsas_ioc_reset(mpt, first_time);
12343 11676 if (rval == MPTSAS_RESET_FAIL) {
12344 11677 mptsas_log(mpt, CE_WARN, "hard reset failed!");
12345 11678 goto fail;
12346 11679 }
12347 11680
12348 11681 if ((rval == MPTSAS_SUCCESS_MUR) && (!first_time)) {
12349 11682 goto mur;
12350 11683 }
12351 11684 /*
12352 11685 * Setup configuration space
12353 11686 */
12354 11687 if (mptsas_config_space_init(mpt) == FALSE) {
12355 11688 mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
12356 11689 "failed!");
12357 11690 goto fail;
12358 11691 }
12359 11692
12360 11693 /*
12361 11694 * IOC facts can change after a diag reset so all buffers that are
12362 11695 * based on these numbers must be de-allocated and re-allocated. Get
12363 11696 * new IOC facts each time chip is initialized.
12364 11697 */
12365 11698 if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
12366 11699 mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed");
12367 11700 goto fail;
12368 11701 }
12369 11702
12370 11703 if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
12371 11704 goto fail;
12372 11705 }
12373 11706 /*
12374 11707 * Allocate request message frames, reply free queue, reply descriptor
12375 11708 * post queue, and reply message frames using latest IOC facts.
12376 11709 */
12377 11710 if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
12378 11711 mptsas_log(mpt, CE_WARN, "mptsas_alloc_request_frames failed");
12379 11712 goto fail;
12380 11713 }
12381 11714 if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
12382 11715 mptsas_log(mpt, CE_WARN, "mptsas_alloc_free_queue failed!");
12383 11716 goto fail;
12384 11717 }
12385 11718 if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
12386 11719 mptsas_log(mpt, CE_WARN, "mptsas_alloc_post_queue failed!");
12387 11720 goto fail;
12388 11721 }
12389 11722 if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
12390 11723 mptsas_log(mpt, CE_WARN, "mptsas_alloc_reply_frames failed!");
12391 11724 goto fail;
12392 11725 }
12393 11726
12394 11727 mur:
12395 11728 /*
12396 11729 * Re-Initialize ioc to operational state
12397 11730 */
12398 11731 if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
12399 11732 mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
12400 11733 goto fail;
12401 11734 }
12402 11735
12403 11736 mptsas_alloc_reply_args(mpt);
12404 11737
12405 11738 /*
12406 11739 * Initialize reply post index. Reply free index is initialized after
12407 11740 * the next loop.
12408 11741 */
12409 11742 mpt->m_post_index = 0;
12410 11743
12411 11744 /*
12412 11745 * Initialize the Reply Free Queue with the physical addresses of our
12413 11746 * reply frames.
12414 11747 */
12415 11748 cookie.dmac_address = mpt->m_reply_frame_dma_addr;
12416 11749 for (i = 0; i < mpt->m_max_replies; i++) {
12417 11750 ddi_put32(mpt->m_acc_free_queue_hdl,
12418 11751 &((uint32_t *)(void *)mpt->m_free_queue)[i],
12419 11752 cookie.dmac_address);
12420 11753 cookie.dmac_address += mpt->m_reply_frame_size;
12421 11754 }
12422 11755 (void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
12423 11756 DDI_DMA_SYNC_FORDEV);
12424 11757
12425 11758 /*
12426 11759 * Initialize the reply free index to one past the last frame on the
12427 11760 * queue. This will signify that the queue is empty to start with.
12428 11761 */
12429 11762 mpt->m_free_index = i;
12430 11763 ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, i);
12431 11764
12432 11765 /*
12433 11766 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
12434 11767 */
12435 11768 for (i = 0; i < mpt->m_post_queue_depth; i++) {
12436 11769 ddi_put64(mpt->m_acc_post_queue_hdl,
12437 11770 &((uint64_t *)(void *)mpt->m_post_queue)[i],
12438 11771 0xFFFFFFFFFFFFFFFF);
12439 11772 }
12440 11773 (void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
12441 11774 DDI_DMA_SYNC_FORDEV);
12442 11775
12443 11776 /*
12444 11777 * Enable ports
12445 11778 */
12446 11779 if (mptsas_ioc_enable_port(mpt) == DDI_FAILURE) {
12447 11780 mptsas_log(mpt, CE_WARN, "mptsas_ioc_enable_port failed");
12448 11781 goto fail;
12449 11782 }
12450 11783
12451 11784 /*
12452 11785 * enable events
12453 11786 */
12454 11787 if (mptsas_ioc_enable_event_notification(mpt)) {
12455 11788 goto fail;
12456 11789 }
12457 11790
12458 11791 /*
12459 11792 * We need checks in attach and these.
12460 11793 * chip_init is called in mult. places
12461 11794 */
12462 11795
12463 11796 if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
12464 11797 DDI_SUCCESS) ||
12465 11798 (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
12466 11799 DDI_SUCCESS) ||
12467 11800 (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
12468 11801 DDI_SUCCESS) ||
12469 11802 (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
12470 11803 DDI_SUCCESS) ||
12471 11804 (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
12472 11805 DDI_SUCCESS)) {
12473 11806 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
12474 11807 goto fail;
12475 11808 }
12476 11809
12477 11810 /* Check all acc handles */
12478 11811 if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
12479 11812 (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
12480 11813 DDI_SUCCESS) ||
12481 11814 (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
12482 11815 DDI_SUCCESS) ||
12483 11816 (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
12484 11817 DDI_SUCCESS) ||
12485 11818 (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
12486 11819 DDI_SUCCESS) ||
12487 11820 (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
12488 11821 DDI_SUCCESS) ||
12489 11822 (mptsas_check_acc_handle(mpt->m_config_handle) !=
12490 11823 DDI_SUCCESS)) {
12491 11824 ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
12492 11825 goto fail;
12493 11826 }
12494 11827
12495 11828 return (DDI_SUCCESS);
12496 11829
12497 11830 fail:
12498 11831 return (DDI_FAILURE);
12499 11832 }
12500 11833
12501 11834 static int
12502 11835 mptsas_get_pci_cap(mptsas_t *mpt)
12503 11836 {
12504 11837 ushort_t caps_ptr, cap, cap_count;
12505 11838
12506 11839 if (mpt->m_config_handle == NULL)
12507 11840 return (FALSE);
12508 11841 /*
12509 11842 * Check if capabilities list is supported and if so,
12510 11843 * get initial capabilities pointer and clear bits 0,1.
12511 11844 */
12512 11845 if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
12513 11846 & PCI_STAT_CAP) {
12514 11847 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
12515 11848 PCI_CONF_CAP_PTR), 4);
12516 11849 } else {
12517 11850 caps_ptr = PCI_CAP_NEXT_PTR_NULL;
12518 11851 }
12519 11852
12520 11853 /*
12521 11854 * Walk capabilities if supported.
12522 11855 */
12523 11856 for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
12524 11857
12525 11858 /*
12526 11859 * Check that we haven't exceeded the maximum number of
12527 11860 * capabilities and that the pointer is in a valid range.
12528 11861 */
12529 11862 if (++cap_count > 48) {
12530 11863 mptsas_log(mpt, CE_WARN,
12531 11864 "too many device capabilities.\n");
12532 11865 break;
12533 11866 }
12534 11867 if (caps_ptr < 64) {
12535 11868 mptsas_log(mpt, CE_WARN,
12536 11869 "capabilities pointer 0x%x out of range.\n",
12537 11870 caps_ptr);
12538 11871 break;
12539 11872 }
12540 11873
12541 11874 /*
12542 11875 * Get next capability and check that it is valid.
12543 11876 * For now, we only support power management.
12544 11877 */
12545 11878 cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
12546 11879 switch (cap) {
12547 11880 case PCI_CAP_ID_PM:
12548 11881 mptsas_log(mpt, CE_NOTE,
12549 11882 "?mptsas%d supports power management.\n",
12550 11883 mpt->m_instance);
12551 11884 mpt->m_options |= MPTSAS_OPT_PM;
12552 11885
12553 11886 /* Save PMCSR offset */
12554 11887 mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
12555 11888 break;
12556 11889 /*
12557 11890 * The following capabilities are valid. Any others
12558 11891 * will cause a message to be logged.
12559 11892 */
12560 11893 case PCI_CAP_ID_VPD:
12561 11894 case PCI_CAP_ID_MSI:
12562 11895 case PCI_CAP_ID_PCIX:
12563 11896 case PCI_CAP_ID_PCI_E:
12564 11897 case PCI_CAP_ID_MSI_X:
12565 11898 break;
12566 11899 default:
12567 11900 mptsas_log(mpt, CE_NOTE,
12568 11901 "?mptsas%d unrecognized capability "
12569 11902 "0x%x.\n", mpt->m_instance, cap);
12570 11903 break;
12571 11904 }
12572 11905
12573 11906 /*
12574 11907 * Get next capabilities pointer and clear bits 0,1.
12575 11908 */
12576 11909 caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
12577 11910 (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
12578 11911 }
12579 11912 return (TRUE);
12580 11913 }
12581 11914
12582 11915 static int
12583 11916 mptsas_init_pm(mptsas_t *mpt)
12584 11917 {
12585 11918 char pmc_name[16];
12586 11919 char *pmc[] = {
12587 11920 NULL,
12588 11921 "0=Off (PCI D3 State)",
12589 11922 "3=On (PCI D0 State)",
12590 11923 NULL
12591 11924 };
12592 11925 uint16_t pmcsr_stat;
12593 11926
12594 11927 if (mptsas_get_pci_cap(mpt) == FALSE) {
12595 11928 return (DDI_FAILURE);
12596 11929 }
12597 11930 /*
12598 11931 * If PCI's capability does not support PM, then don't need
12599 11932 * to registe the pm-components
12600 11933 */
↓ open down ↓ |
289 lines elided |
↑ open up ↑ |
12601 11934 if (!(mpt->m_options & MPTSAS_OPT_PM))
12602 11935 return (DDI_SUCCESS);
12603 11936 /*
12604 11937 * If power management is supported by this chip, create
12605 11938 * pm-components property for the power management framework
12606 11939 */
12607 11940 (void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
12608 11941 pmc[0] = pmc_name;
12609 11942 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
12610 11943 "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
12611 - mutex_enter(&mpt->m_intr_mutex);
12612 11944 mpt->m_options &= ~MPTSAS_OPT_PM;
12613 - mutex_exit(&mpt->m_intr_mutex);
12614 11945 mptsas_log(mpt, CE_WARN,
12615 11946 "mptsas%d: pm-component property creation failed.",
12616 11947 mpt->m_instance);
12617 11948 return (DDI_FAILURE);
12618 11949 }
12619 11950
12620 11951 /*
12621 11952 * Power on device.
12622 11953 */
12623 11954 (void) pm_busy_component(mpt->m_dip, 0);
12624 11955 pmcsr_stat = pci_config_get16(mpt->m_config_handle,
12625 11956 mpt->m_pmcsr_offset);
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
12626 11957 if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
12627 11958 mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
12628 11959 mpt->m_instance);
12629 11960 pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
12630 11961 PCI_PMCSR_D0);
12631 11962 }
12632 11963 if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
12633 11964 mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
12634 11965 return (DDI_FAILURE);
12635 11966 }
12636 - mutex_enter(&mpt->m_intr_mutex);
12637 11967 mpt->m_power_level = PM_LEVEL_D0;
12638 - mutex_exit(&mpt->m_intr_mutex);
12639 11968 /*
12640 11969 * Set pm idle delay.
12641 11970 */
12642 11971 mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
12643 11972 mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
12644 11973
12645 11974 return (DDI_SUCCESS);
12646 11975 }
12647 11976
12648 11977 static int
12649 11978 mptsas_register_intrs(mptsas_t *mpt)
12650 11979 {
12651 11980 dev_info_t *dip;
12652 11981 int intr_types;
12653 11982
12654 11983 dip = mpt->m_dip;
12655 11984
12656 11985 /* Get supported interrupt types */
12657 11986 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
12658 11987 mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
12659 11988 "failed\n");
12660 11989 return (FALSE);
12661 11990 }
12662 11991
12663 11992 NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
12664 11993
12665 11994 /*
12666 11995 * Try MSI, but fall back to FIXED
12667 11996 */
12668 11997 if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
12669 11998 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
12670 11999 NDBG0(("Using MSI interrupt type"));
12671 12000 mpt->m_intr_type = DDI_INTR_TYPE_MSI;
12672 12001 return (TRUE);
12673 12002 }
12674 12003 }
12675 12004 if (intr_types & DDI_INTR_TYPE_FIXED) {
12676 12005 if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
12677 12006 NDBG0(("Using FIXED interrupt type"));
12678 12007 mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
12679 12008 return (TRUE);
12680 12009 } else {
12681 12010 NDBG0(("FIXED interrupt registration failed"));
12682 12011 return (FALSE);
12683 12012 }
12684 12013 }
12685 12014
12686 12015 return (FALSE);
12687 12016 }
12688 12017
12689 12018 static void
12690 12019 mptsas_unregister_intrs(mptsas_t *mpt)
12691 12020 {
12692 12021 mptsas_rem_intrs(mpt);
12693 12022 }
12694 12023
12695 12024 /*
12696 12025 * mptsas_add_intrs:
12697 12026 *
12698 12027 * Register FIXED or MSI interrupts.
12699 12028 */
12700 12029 static int
12701 12030 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
12702 12031 {
12703 12032 dev_info_t *dip = mpt->m_dip;
12704 12033 int avail, actual, count = 0;
12705 12034 int i, flag, ret;
12706 12035
12707 12036 NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
12708 12037
12709 12038 /* Get number of interrupts */
12710 12039 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
12711 12040 if ((ret != DDI_SUCCESS) || (count <= 0)) {
12712 12041 mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
12713 12042 "ret %d count %d\n", ret, count);
12714 12043
12715 12044 return (DDI_FAILURE);
12716 12045 }
↓ open down ↓ |
68 lines elided |
↑ open up ↑ |
12717 12046
12718 12047 /* Get number of available interrupts */
12719 12048 ret = ddi_intr_get_navail(dip, intr_type, &avail);
12720 12049 if ((ret != DDI_SUCCESS) || (avail == 0)) {
12721 12050 mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
12722 12051 "ret %d avail %d\n", ret, avail);
12723 12052
12724 12053 return (DDI_FAILURE);
12725 12054 }
12726 12055
12727 - if (avail < count) {
12056 + if (0 && avail < count) {
12728 12057 mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, "
12729 12058 "navail() returned %d", count, avail);
12730 12059 }
12731 12060
12732 12061 /* Mpt only have one interrupt routine */
12733 12062 if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
12734 12063 count = 1;
12735 12064 }
12736 12065
12737 12066 /* Allocate an array of interrupt handles */
12738 12067 mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
12739 12068 mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
12740 12069
12741 12070 flag = DDI_INTR_ALLOC_NORMAL;
12742 12071
12743 12072 /* call ddi_intr_alloc() */
12744 12073 ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
12745 12074 count, &actual, flag);
12746 12075
12747 12076 if ((ret != DDI_SUCCESS) || (actual == 0)) {
12748 12077 mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d\n",
12749 12078 ret);
12750 12079 kmem_free(mpt->m_htable, mpt->m_intr_size);
12751 12080 return (DDI_FAILURE);
12752 12081 }
12753 12082
12754 12083 /* use interrupt count returned or abort? */
12755 12084 if (actual < count) {
12756 12085 mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d\n",
12757 12086 count, actual);
12758 12087 }
12759 12088
12760 12089 mpt->m_intr_cnt = actual;
12761 12090
12762 12091 /*
12763 12092 * Get priority for first msi, assume remaining are all the same
12764 12093 */
12765 12094 if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
12766 12095 &mpt->m_intr_pri)) != DDI_SUCCESS) {
12767 12096 mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d\n", ret);
12768 12097
12769 12098 /* Free already allocated intr */
12770 12099 for (i = 0; i < actual; i++) {
12771 12100 (void) ddi_intr_free(mpt->m_htable[i]);
12772 12101 }
12773 12102
12774 12103 kmem_free(mpt->m_htable, mpt->m_intr_size);
12775 12104 return (DDI_FAILURE);
12776 12105 }
12777 12106
12778 12107 /* Test for high level mutex */
12779 12108 if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
12780 12109 mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
12781 12110 "Hi level interrupt not supported\n");
12782 12111
12783 12112 /* Free already allocated intr */
12784 12113 for (i = 0; i < actual; i++) {
12785 12114 (void) ddi_intr_free(mpt->m_htable[i]);
12786 12115 }
12787 12116
12788 12117 kmem_free(mpt->m_htable, mpt->m_intr_size);
12789 12118 return (DDI_FAILURE);
12790 12119 }
12791 12120
12792 12121 /* Call ddi_intr_add_handler() */
12793 12122 for (i = 0; i < actual; i++) {
12794 12123 if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
12795 12124 (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
12796 12125 mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
12797 12126 "failed %d\n", ret);
12798 12127
12799 12128 /* Free already allocated intr */
12800 12129 for (i = 0; i < actual; i++) {
12801 12130 (void) ddi_intr_free(mpt->m_htable[i]);
12802 12131 }
12803 12132
12804 12133 kmem_free(mpt->m_htable, mpt->m_intr_size);
12805 12134 return (DDI_FAILURE);
12806 12135 }
12807 12136 }
12808 12137
12809 12138 if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
12810 12139 != DDI_SUCCESS) {
12811 12140 mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d\n", ret);
12812 12141
12813 12142 /* Free already allocated intr */
12814 12143 for (i = 0; i < actual; i++) {
12815 12144 (void) ddi_intr_free(mpt->m_htable[i]);
12816 12145 }
12817 12146
12818 12147 kmem_free(mpt->m_htable, mpt->m_intr_size);
12819 12148 return (DDI_FAILURE);
12820 12149 }
12821 12150
12822 12151 /*
12823 12152 * Enable interrupts
12824 12153 */
12825 12154 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
12826 12155 /* Call ddi_intr_block_enable() for MSI interrupts */
12827 12156 (void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
12828 12157 } else {
12829 12158 /* Call ddi_intr_enable for MSI or FIXED interrupts */
12830 12159 for (i = 0; i < mpt->m_intr_cnt; i++) {
12831 12160 (void) ddi_intr_enable(mpt->m_htable[i]);
12832 12161 }
12833 12162 }
12834 12163 return (DDI_SUCCESS);
12835 12164 }
12836 12165
12837 12166 /*
12838 12167 * mptsas_rem_intrs:
12839 12168 *
12840 12169 * Unregister FIXED or MSI interrupts
12841 12170 */
12842 12171 static void
12843 12172 mptsas_rem_intrs(mptsas_t *mpt)
12844 12173 {
12845 12174 int i;
12846 12175
12847 12176 NDBG6(("mptsas_rem_intrs"));
12848 12177
12849 12178 /* Disable all interrupts */
12850 12179 if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
12851 12180 /* Call ddi_intr_block_disable() */
12852 12181 (void) ddi_intr_block_disable(mpt->m_htable, mpt->m_intr_cnt);
12853 12182 } else {
12854 12183 for (i = 0; i < mpt->m_intr_cnt; i++) {
12855 12184 (void) ddi_intr_disable(mpt->m_htable[i]);
12856 12185 }
12857 12186 }
12858 12187
12859 12188 /* Call ddi_intr_remove_handler() */
12860 12189 for (i = 0; i < mpt->m_intr_cnt; i++) {
12861 12190 (void) ddi_intr_remove_handler(mpt->m_htable[i]);
12862 12191 (void) ddi_intr_free(mpt->m_htable[i]);
12863 12192 }
12864 12193
12865 12194 kmem_free(mpt->m_htable, mpt->m_intr_size);
12866 12195 }
12867 12196
12868 12197 /*
12869 12198 * The IO fault service error handling callback function
12870 12199 */
12871 12200 /*ARGSUSED*/
12872 12201 static int
12873 12202 mptsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
12874 12203 {
12875 12204 /*
12876 12205 * as the driver can always deal with an error in any dma or
12877 12206 * access handle, we can just return the fme_status value.
12878 12207 */
12879 12208 pci_ereport_post(dip, err, NULL);
12880 12209 return (err->fme_status);
12881 12210 }
12882 12211
12883 12212 /*
12884 12213 * mptsas_fm_init - initialize fma capabilities and register with IO
12885 12214 * fault services.
12886 12215 */
12887 12216 static void
12888 12217 mptsas_fm_init(mptsas_t *mpt)
12889 12218 {
12890 12219 /*
12891 12220 * Need to change iblock to priority for new MSI intr
12892 12221 */
12893 12222 ddi_iblock_cookie_t fm_ibc;
12894 12223
12895 12224 /* Only register with IO Fault Services if we have some capability */
12896 12225 if (mpt->m_fm_capabilities) {
12897 12226 /* Adjust access and dma attributes for FMA */
12898 12227 mpt->m_reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
12899 12228 mpt->m_msg_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12900 12229 mpt->m_io_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12901 12230
12902 12231 /*
12903 12232 * Register capabilities with IO Fault Services.
12904 12233 * mpt->m_fm_capabilities will be updated to indicate
12905 12234 * capabilities actually supported (not requested.)
12906 12235 */
12907 12236 ddi_fm_init(mpt->m_dip, &mpt->m_fm_capabilities, &fm_ibc);
12908 12237
12909 12238 /*
12910 12239 * Initialize pci ereport capabilities if ereport
12911 12240 * capable (should always be.)
12912 12241 */
12913 12242 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
12914 12243 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12915 12244 pci_ereport_setup(mpt->m_dip);
12916 12245 }
12917 12246
12918 12247 /*
12919 12248 * Register error callback if error callback capable.
12920 12249 */
12921 12250 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12922 12251 ddi_fm_handler_register(mpt->m_dip,
12923 12252 mptsas_fm_error_cb, (void *) mpt);
12924 12253 }
12925 12254 }
12926 12255 }
12927 12256
12928 12257 /*
12929 12258 * mptsas_fm_fini - Releases fma capabilities and un-registers with IO
12930 12259 * fault services.
12931 12260 *
12932 12261 */
12933 12262 static void
12934 12263 mptsas_fm_fini(mptsas_t *mpt)
12935 12264 {
12936 12265 /* Only unregister FMA capabilities if registered */
12937 12266 if (mpt->m_fm_capabilities) {
12938 12267
12939 12268 /*
12940 12269 * Un-register error callback if error callback capable.
12941 12270 */
12942 12271
12943 12272 if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12944 12273 ddi_fm_handler_unregister(mpt->m_dip);
12945 12274 }
12946 12275
12947 12276 /*
12948 12277 * Release any resources allocated by pci_ereport_setup()
12949 12278 */
12950 12279
12951 12280 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
12952 12281 DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
12953 12282 pci_ereport_teardown(mpt->m_dip);
12954 12283 }
12955 12284
12956 12285 /* Unregister from IO Fault Services */
12957 12286 ddi_fm_fini(mpt->m_dip);
12958 12287
12959 12288 /* Adjust access and dma attributes for FMA */
12960 12289 mpt->m_reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
12961 12290 mpt->m_msg_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12962 12291 mpt->m_io_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12963 12292
12964 12293 }
12965 12294 }
12966 12295
12967 12296 int
12968 12297 mptsas_check_acc_handle(ddi_acc_handle_t handle)
12969 12298 {
12970 12299 ddi_fm_error_t de;
12971 12300
12972 12301 if (handle == NULL)
12973 12302 return (DDI_FAILURE);
12974 12303 ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
12975 12304 return (de.fme_status);
12976 12305 }
12977 12306
12978 12307 int
12979 12308 mptsas_check_dma_handle(ddi_dma_handle_t handle)
12980 12309 {
12981 12310 ddi_fm_error_t de;
12982 12311
12983 12312 if (handle == NULL)
12984 12313 return (DDI_FAILURE);
12985 12314 ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
12986 12315 return (de.fme_status);
12987 12316 }
12988 12317
12989 12318 void
12990 12319 mptsas_fm_ereport(mptsas_t *mpt, char *detail)
12991 12320 {
12992 12321 uint64_t ena;
12993 12322 char buf[FM_MAX_CLASS];
12994 12323
12995 12324 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
12996 12325 ena = fm_ena_generate(0, FM_ENA_FMT1);
12997 12326 if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities)) {
12998 12327 ddi_fm_ereport_post(mpt->m_dip, buf, ena, DDI_NOSLEEP,
12999 12328 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
13000 12329 }
13001 12330 }
13002 12331
13003 12332 static int
13004 12333 mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
13005 12334 uint16_t *dev_handle, mptsas_target_t **pptgt)
13006 12335 {
13007 12336 int rval;
13008 12337 uint32_t dev_info;
13009 12338 uint64_t sas_wwn;
13010 12339 mptsas_phymask_t phymask;
13011 12340 uint8_t physport, phynum, config, disk;
13012 12341 mptsas_slots_t *slots = mpt->m_active;
13013 12342 uint64_t devicename;
13014 12343 uint16_t pdev_hdl;
13015 12344 mptsas_target_t *tmp_tgt = NULL;
13016 12345 uint16_t bay_num, enclosure;
13017 12346
13018 12347 ASSERT(*pptgt == NULL);
13019 12348
13020 12349 rval = mptsas_get_sas_device_page0(mpt, page_address, dev_handle,
13021 12350 &sas_wwn, &dev_info, &physport, &phynum, &pdev_hdl,
13022 12351 &bay_num, &enclosure);
13023 12352 if (rval != DDI_SUCCESS) {
13024 12353 rval = DEV_INFO_FAIL_PAGE0;
13025 12354 return (rval);
13026 12355 }
13027 12356
13028 12357 if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
13029 12358 MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13030 12359 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) == NULL) {
13031 12360 rval = DEV_INFO_WRONG_DEVICE_TYPE;
13032 12361 return (rval);
13033 12362 }
13034 12363
13035 12364 /*
13036 12365 * Check if the dev handle is for a Phys Disk. If so, set return value
13037 12366 * and exit. Don't add Phys Disks to hash.
13038 12367 */
13039 12368 for (config = 0; config < slots->m_num_raid_configs; config++) {
13040 12369 for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
13041 12370 if (*dev_handle == slots->m_raidconfig[config].
13042 12371 m_physdisk_devhdl[disk]) {
13043 12372 rval = DEV_INFO_PHYS_DISK;
13044 12373 return (rval);
13045 12374 }
13046 12375 }
13047 12376 }
13048 12377
13049 12378 /*
13050 12379 * Get SATA Device Name from SAS device page0 for
13051 12380 * sata device, if device name doesn't exist, set m_sas_wwn to
13052 12381 * 0 for direct attached SATA. For the device behind the expander
13053 12382 * we still can use STP address assigned by expander.
13054 12383 */
13055 12384 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13056 12385 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
13057 12386 mutex_exit(&mpt->m_mutex);
13058 12387 /* alloc a tmp_tgt to send the cmd */
13059 12388 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target),
13060 12389 KM_SLEEP);
13061 12390 tmp_tgt->m_devhdl = *dev_handle;
13062 12391 tmp_tgt->m_deviceinfo = dev_info;
13063 12392 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
13064 12393 tmp_tgt->m_qfull_retry_interval =
13065 12394 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
13066 12395 tmp_tgt->m_t_throttle = MAX_THROTTLE;
13067 12396 devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
13068 12397 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
↓ open down ↓ |
331 lines elided |
↑ open up ↑ |
13069 12398 mutex_enter(&mpt->m_mutex);
13070 12399 if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
13071 12400 sas_wwn = devicename;
13072 12401 } else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
13073 12402 sas_wwn = 0;
13074 12403 }
13075 12404 }
13076 12405
13077 12406 phymask = mptsas_physport_to_phymask(mpt, physport);
13078 12407 *pptgt = mptsas_tgt_alloc(&slots->m_tgttbl, *dev_handle, sas_wwn,
13079 - dev_info, phymask, phynum, mpt);
12408 + dev_info, phymask, phynum);
13080 12409 if (*pptgt == NULL) {
13081 12410 mptsas_log(mpt, CE_WARN, "Failed to allocated target"
13082 12411 "structure!");
13083 12412 rval = DEV_INFO_FAIL_ALLOC;
13084 12413 return (rval);
13085 12414 }
13086 12415 (*pptgt)->m_enclosure = enclosure;
13087 12416 (*pptgt)->m_slot_num = bay_num;
13088 12417 return (DEV_INFO_SUCCESS);
13089 12418 }
13090 12419
13091 12420 uint64_t
13092 12421 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
13093 12422 {
13094 12423 uint64_t sata_guid = 0, *pwwn = NULL;
13095 12424 int target = ptgt->m_devhdl;
13096 12425 uchar_t *inq83 = NULL;
13097 12426 int inq83_len = 0xFF;
13098 12427 uchar_t *dblk = NULL;
13099 12428 int inq83_retry = 3;
13100 12429 int rval = DDI_FAILURE;
13101 12430
13102 12431 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
13103 12432
13104 12433 inq83_retry:
13105 12434 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
13106 12435 inq83_len, NULL, 1);
13107 12436 if (rval != DDI_SUCCESS) {
13108 12437 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
13109 12438 "0x83 for target:%x, lun:%x failed!", target, lun);
13110 12439 goto out;
13111 12440 }
13112 12441 /* According to SAT2, the first descriptor is logic unit name */
13113 12442 dblk = &inq83[4];
13114 12443 if ((dblk[1] & 0x30) != 0) {
13115 12444 mptsas_log(mpt, CE_WARN, "!Descriptor is not lun associated.");
13116 12445 goto out;
13117 12446 }
13118 12447 pwwn = (uint64_t *)(void *)(&dblk[4]);
13119 12448 if ((dblk[4] & 0xf0) == 0x50) {
13120 12449 sata_guid = BE_64(*pwwn);
13121 12450 goto out;
13122 12451 } else if (dblk[4] == 'A') {
13123 12452 NDBG20(("SATA drive has no NAA format GUID."));
13124 12453 goto out;
13125 12454 } else {
13126 12455 /* The data is not ready, wait and retry */
13127 12456 inq83_retry--;
13128 12457 if (inq83_retry <= 0) {
13129 12458 goto out;
13130 12459 }
13131 12460 NDBG20(("The GUID is not ready, retry..."));
13132 12461 delay(1 * drv_usectohz(1000000));
13133 12462 goto inq83_retry;
13134 12463 }
13135 12464 out:
13136 12465 kmem_free(inq83, inq83_len);
13137 12466 return (sata_guid);
13138 12467 }
13139 12468
13140 12469 static int
13141 12470 mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, uchar_t page,
13142 12471 unsigned char *buf, int len, int *reallen, uchar_t evpd)
13143 12472 {
13144 12473 uchar_t cdb[CDB_GROUP0];
13145 12474 struct scsi_address ap;
13146 12475 struct buf *data_bp = NULL;
13147 12476 int resid = 0;
13148 12477 int ret = DDI_FAILURE;
13149 12478
13150 12479 ASSERT(len <= 0xffff);
13151 12480
13152 12481 ap.a_target = MPTSAS_INVALID_DEVHDL;
13153 12482 ap.a_lun = (uchar_t)(lun);
13154 12483 ap.a_hba_tran = mpt->m_tran;
13155 12484
13156 12485 data_bp = scsi_alloc_consistent_buf(&ap,
13157 12486 (struct buf *)NULL, len, B_READ, NULL_FUNC, NULL);
13158 12487 if (data_bp == NULL) {
13159 12488 return (ret);
13160 12489 }
13161 12490 bzero(cdb, CDB_GROUP0);
13162 12491 cdb[0] = SCMD_INQUIRY;
13163 12492 cdb[1] = evpd;
13164 12493 cdb[2] = page;
13165 12494 cdb[3] = (len & 0xff00) >> 8;
13166 12495 cdb[4] = (len & 0x00ff);
13167 12496 cdb[5] = 0;
13168 12497
13169 12498 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP0, data_bp,
13170 12499 &resid);
13171 12500 if (ret == DDI_SUCCESS) {
13172 12501 if (reallen) {
13173 12502 *reallen = len - resid;
13174 12503 }
13175 12504 bcopy((caddr_t)data_bp->b_un.b_addr, buf, len);
13176 12505 }
13177 12506 if (data_bp) {
13178 12507 scsi_free_consistent_buf(data_bp);
13179 12508 }
13180 12509 return (ret);
13181 12510 }
13182 12511
13183 12512 static int
13184 12513 mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
13185 12514 mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
13186 12515 int *resid)
13187 12516 {
13188 12517 struct scsi_pkt *pktp = NULL;
13189 12518 scsi_hba_tran_t *tran_clone = NULL;
13190 12519 mptsas_tgt_private_t *tgt_private = NULL;
13191 12520 int ret = DDI_FAILURE;
13192 12521
13193 12522 /*
13194 12523 * scsi_hba_tran_t->tran_tgt_private is used to pass the address
13195 12524 * information to scsi_init_pkt, allocate a scsi_hba_tran structure
13196 12525 * to simulate the cmds from sd
13197 12526 */
13198 12527 tran_clone = kmem_alloc(
13199 12528 sizeof (scsi_hba_tran_t), KM_SLEEP);
13200 12529 if (tran_clone == NULL) {
13201 12530 goto out;
13202 12531 }
13203 12532 bcopy((caddr_t)mpt->m_tran,
13204 12533 (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
13205 12534 tgt_private = kmem_alloc(
13206 12535 sizeof (mptsas_tgt_private_t), KM_SLEEP);
13207 12536 if (tgt_private == NULL) {
13208 12537 goto out;
13209 12538 }
13210 12539 tgt_private->t_lun = ap->a_lun;
13211 12540 tgt_private->t_private = ptgt;
13212 12541 tran_clone->tran_tgt_private = tgt_private;
13213 12542 ap->a_hba_tran = tran_clone;
13214 12543
13215 12544 pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
13216 12545 data_bp, cdblen, sizeof (struct scsi_arq_status),
13217 12546 0, PKT_CONSISTENT, NULL, NULL);
13218 12547 if (pktp == NULL) {
13219 12548 goto out;
13220 12549 }
13221 12550 bcopy(cdb, pktp->pkt_cdbp, cdblen);
13222 12551 pktp->pkt_flags = FLAG_NOPARITY;
13223 12552 if (scsi_poll(pktp) < 0) {
13224 12553 goto out;
13225 12554 }
13226 12555 if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
13227 12556 goto out;
13228 12557 }
13229 12558 if (resid != NULL) {
13230 12559 *resid = pktp->pkt_resid;
13231 12560 }
13232 12561
13233 12562 ret = DDI_SUCCESS;
13234 12563 out:
13235 12564 if (pktp) {
13236 12565 scsi_destroy_pkt(pktp);
13237 12566 }
13238 12567 if (tran_clone) {
13239 12568 kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
13240 12569 }
13241 12570 if (tgt_private) {
13242 12571 kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
13243 12572 }
13244 12573 return (ret);
13245 12574 }
13246 12575 static int
13247 12576 mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, int *lun)
13248 12577 {
13249 12578 char *cp = NULL;
13250 12579 char *ptr = NULL;
13251 12580 size_t s = 0;
13252 12581 char *wwid_str = NULL;
13253 12582 char *lun_str = NULL;
13254 12583 long lunnum;
13255 12584 long phyid = -1;
13256 12585 int rc = DDI_FAILURE;
13257 12586
13258 12587 ptr = name;
13259 12588 ASSERT(ptr[0] == 'w' || ptr[0] == 'p');
13260 12589 ptr++;
13261 12590 if ((cp = strchr(ptr, ',')) == NULL) {
13262 12591 return (DDI_FAILURE);
13263 12592 }
13264 12593
13265 12594 wwid_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13266 12595 s = (uintptr_t)cp - (uintptr_t)ptr;
13267 12596
13268 12597 bcopy(ptr, wwid_str, s);
13269 12598 wwid_str[s] = '\0';
13270 12599
13271 12600 ptr = ++cp;
13272 12601
13273 12602 if ((cp = strchr(ptr, '\0')) == NULL) {
13274 12603 goto out;
13275 12604 }
13276 12605 lun_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13277 12606 s = (uintptr_t)cp - (uintptr_t)ptr;
13278 12607
13279 12608 bcopy(ptr, lun_str, s);
13280 12609 lun_str[s] = '\0';
13281 12610
13282 12611 if (name[0] == 'p') {
13283 12612 rc = ddi_strtol(wwid_str, NULL, 0x10, &phyid);
13284 12613 } else {
13285 12614 rc = scsi_wwnstr_to_wwn(wwid_str, wwid);
13286 12615 }
13287 12616 if (rc != DDI_SUCCESS)
13288 12617 goto out;
13289 12618
13290 12619 if (phyid != -1) {
13291 12620 ASSERT(phyid < MPTSAS_MAX_PHYS);
13292 12621 *phy = (uint8_t)phyid;
13293 12622 }
13294 12623 rc = ddi_strtol(lun_str, NULL, 0x10, &lunnum);
13295 12624 if (rc != 0)
13296 12625 goto out;
13297 12626
13298 12627 *lun = (int)lunnum;
13299 12628 rc = DDI_SUCCESS;
13300 12629 out:
13301 12630 if (wwid_str)
13302 12631 kmem_free(wwid_str, SCSI_MAXNAMELEN);
13303 12632 if (lun_str)
13304 12633 kmem_free(lun_str, SCSI_MAXNAMELEN);
13305 12634
13306 12635 return (rc);
13307 12636 }
13308 12637
13309 12638 /*
13310 12639 * mptsas_parse_smp_name() is to parse sas wwn string
13311 12640 * which format is "wWWN"
13312 12641 */
13313 12642 static int
13314 12643 mptsas_parse_smp_name(char *name, uint64_t *wwn)
13315 12644 {
13316 12645 char *ptr = name;
13317 12646
13318 12647 if (*ptr != 'w') {
13319 12648 return (DDI_FAILURE);
13320 12649 }
13321 12650
13322 12651 ptr++;
13323 12652 if (scsi_wwnstr_to_wwn(ptr, wwn)) {
13324 12653 return (DDI_FAILURE);
13325 12654 }
13326 12655 return (DDI_SUCCESS);
13327 12656 }
13328 12657
13329 12658 static int
13330 12659 mptsas_bus_config(dev_info_t *pdip, uint_t flag,
13331 12660 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
13332 12661 {
13333 12662 int ret = NDI_FAILURE;
13334 12663 int circ = 0;
13335 12664 int circ1 = 0;
13336 12665 mptsas_t *mpt;
13337 12666 char *ptr = NULL;
13338 12667 char *devnm = NULL;
13339 12668 uint64_t wwid = 0;
13340 12669 uint8_t phy = 0xFF;
13341 12670 int lun = 0;
13342 12671 uint_t mflags = flag;
13343 12672 int bconfig = TRUE;
13344 12673
13345 12674 if (scsi_hba_iport_unit_address(pdip) == 0) {
13346 12675 return (DDI_FAILURE);
13347 12676 }
13348 12677
13349 12678 mpt = DIP2MPT(pdip);
13350 12679 if (!mpt) {
13351 12680 return (DDI_FAILURE);
13352 12681 }
13353 12682 /*
13354 12683 * Hold the nexus across the bus_config
13355 12684 */
13356 12685 ndi_devi_enter(scsi_vhci_dip, &circ);
13357 12686 ndi_devi_enter(pdip, &circ1);
13358 12687 switch (op) {
13359 12688 case BUS_CONFIG_ONE:
13360 12689 /* parse wwid/target name out of name given */
13361 12690 if ((ptr = strchr((char *)arg, '@')) == NULL) {
13362 12691 ret = NDI_FAILURE;
13363 12692 break;
13364 12693 }
13365 12694 ptr++;
13366 12695 if (strncmp((char *)arg, "smp", 3) == 0) {
13367 12696 /*
13368 12697 * This is a SMP target device
13369 12698 */
13370 12699 ret = mptsas_parse_smp_name(ptr, &wwid);
13371 12700 if (ret != DDI_SUCCESS) {
13372 12701 ret = NDI_FAILURE;
13373 12702 break;
13374 12703 }
13375 12704 ret = mptsas_config_smp(pdip, wwid, childp);
13376 12705 } else if ((ptr[0] == 'w') || (ptr[0] == 'p')) {
13377 12706 /*
13378 12707 * OBP could pass down a non-canonical form
13379 12708 * bootpath without LUN part when LUN is 0.
13380 12709 * So driver need adjust the string.
13381 12710 */
13382 12711 if (strchr(ptr, ',') == NULL) {
13383 12712 devnm = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
13384 12713 (void) sprintf(devnm, "%s,0", (char *)arg);
13385 12714 ptr = strchr(devnm, '@');
13386 12715 ptr++;
13387 12716 }
13388 12717
13389 12718 /*
13390 12719 * The device path is wWWID format and the device
13391 12720 * is not SMP target device.
13392 12721 */
13393 12722 ret = mptsas_parse_address(ptr, &wwid, &phy, &lun);
13394 12723 if (ret != DDI_SUCCESS) {
13395 12724 ret = NDI_FAILURE;
13396 12725 break;
13397 12726 }
13398 12727 *childp = NULL;
13399 12728 if (ptr[0] == 'w') {
13400 12729 ret = mptsas_config_one_addr(pdip, wwid,
13401 12730 lun, childp);
13402 12731 } else if (ptr[0] == 'p') {
13403 12732 ret = mptsas_config_one_phy(pdip, phy, lun,
13404 12733 childp);
13405 12734 }
13406 12735
13407 12736 /*
13408 12737 * If this is CD/DVD device in OBP path, the
13409 12738 * ndi_busop_bus_config can be skipped as config one
13410 12739 * operation is done above.
13411 12740 */
13412 12741 if ((ret == NDI_SUCCESS) && (*childp != NULL) &&
13413 12742 (strcmp(ddi_node_name(*childp), "cdrom") == 0) &&
13414 12743 (strncmp((char *)arg, "disk", 4) == 0)) {
13415 12744 bconfig = FALSE;
13416 12745 ndi_hold_devi(*childp);
13417 12746 }
13418 12747 } else {
13419 12748 ret = NDI_FAILURE;
13420 12749 break;
13421 12750 }
13422 12751
13423 12752 /*
13424 12753 * DDI group instructed us to use this flag.
13425 12754 */
13426 12755 mflags |= NDI_MDI_FALLBACK;
13427 12756 break;
13428 12757 case BUS_CONFIG_DRIVER:
13429 12758 case BUS_CONFIG_ALL:
13430 12759 mptsas_config_all(pdip);
13431 12760 ret = NDI_SUCCESS;
13432 12761 break;
13433 12762 }
13434 12763
13435 12764 if ((ret == NDI_SUCCESS) && bconfig) {
13436 12765 ret = ndi_busop_bus_config(pdip, mflags, op,
13437 12766 (devnm == NULL) ? arg : devnm, childp, 0);
13438 12767 }
13439 12768
13440 12769 ndi_devi_exit(pdip, circ1);
13441 12770 ndi_devi_exit(scsi_vhci_dip, circ);
13442 12771 if (devnm != NULL)
13443 12772 kmem_free(devnm, SCSI_MAXNAMELEN);
13444 12773 return (ret);
13445 12774 }
13446 12775
13447 12776 static int
13448 12777 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
13449 12778 mptsas_target_t *ptgt)
13450 12779 {
13451 12780 int rval = DDI_FAILURE;
13452 12781 struct scsi_inquiry *sd_inq = NULL;
13453 12782 mptsas_t *mpt = DIP2MPT(pdip);
13454 12783
13455 12784 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
13456 12785
13457 12786 rval = mptsas_inquiry(mpt, ptgt, lun, 0, (uchar_t *)sd_inq,
13458 12787 SUN_INQSIZE, 0, (uchar_t)0);
13459 12788
13460 12789 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
13461 12790 rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
13462 12791 } else {
13463 12792 rval = DDI_FAILURE;
13464 12793 }
13465 12794
13466 12795 kmem_free(sd_inq, SUN_INQSIZE);
13467 12796 return (rval);
13468 12797 }
13469 12798
13470 12799 static int
13471 12800 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
13472 12801 dev_info_t **lundip)
13473 12802 {
13474 12803 int rval;
13475 12804 mptsas_t *mpt = DIP2MPT(pdip);
13476 12805 int phymask;
13477 12806 mptsas_target_t *ptgt = NULL;
13478 12807
13479 12808 /*
13480 12809 * Get the physical port associated to the iport
13481 12810 */
13482 12811 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13483 12812 "phymask", 0);
13484 12813
13485 12814 ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
13486 12815 if (ptgt == NULL) {
13487 12816 /*
13488 12817 * didn't match any device by searching
13489 12818 */
13490 12819 return (DDI_FAILURE);
13491 12820 }
13492 12821 /*
13493 12822 * If the LUN already exists and the status is online,
13494 12823 * we just return the pointer to dev_info_t directly.
13495 12824 * For the mdi_pathinfo node, we'll handle it in
13496 12825 * mptsas_create_virt_lun()
13497 12826 * TODO should be also in mptsas_handle_dr
13498 12827 */
13499 12828
13500 12829 *lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
13501 12830 if (*lundip != NULL) {
13502 12831 /*
13503 12832 * TODO Another senario is, we hotplug the same disk
13504 12833 * on the same slot, the devhdl changed, is this
13505 12834 * possible?
13506 12835 * tgt_private->t_private != ptgt
13507 12836 */
13508 12837 if (sasaddr != ptgt->m_sas_wwn) {
13509 12838 /*
13510 12839 * The device has changed although the devhdl is the
13511 12840 * same (Enclosure mapping mode, change drive on the
13512 12841 * same slot)
13513 12842 */
13514 12843 return (DDI_FAILURE);
13515 12844 }
13516 12845 return (DDI_SUCCESS);
13517 12846 }
13518 12847
13519 12848 if (phymask == 0) {
13520 12849 /*
13521 12850 * Configure IR volume
13522 12851 */
13523 12852 rval = mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
13524 12853 return (rval);
13525 12854 }
13526 12855 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
13527 12856
13528 12857 return (rval);
13529 12858 }
13530 12859
13531 12860 static int
13532 12861 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
13533 12862 dev_info_t **lundip)
13534 12863 {
13535 12864 int rval;
13536 12865 mptsas_t *mpt = DIP2MPT(pdip);
13537 12866 int phymask;
13538 12867 mptsas_target_t *ptgt = NULL;
13539 12868
13540 12869 /*
13541 12870 * Get the physical port associated to the iport
13542 12871 */
13543 12872 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
13544 12873 "phymask", 0);
13545 12874
13546 12875 ptgt = mptsas_phy_to_tgt(mpt, phymask, phy);
13547 12876 if (ptgt == NULL) {
13548 12877 /*
13549 12878 * didn't match any device by searching
13550 12879 */
13551 12880 return (DDI_FAILURE);
13552 12881 }
13553 12882
13554 12883 /*
13555 12884 * If the LUN already exists and the status is online,
13556 12885 * we just return the pointer to dev_info_t directly.
13557 12886 * For the mdi_pathinfo node, we'll handle it in
13558 12887 * mptsas_create_virt_lun().
13559 12888 */
13560 12889
13561 12890 *lundip = mptsas_find_child_phy(pdip, phy);
13562 12891 if (*lundip != NULL) {
13563 12892 return (DDI_SUCCESS);
13564 12893 }
13565 12894
13566 12895 rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
13567 12896
13568 12897 return (rval);
13569 12898 }
13570 12899
13571 12900 static int
13572 12901 mptsas_retrieve_lundata(int lun_cnt, uint8_t *buf, uint16_t *lun_num,
13573 12902 uint8_t *lun_addr_type)
13574 12903 {
13575 12904 uint32_t lun_idx = 0;
13576 12905
13577 12906 ASSERT(lun_num != NULL);
13578 12907 ASSERT(lun_addr_type != NULL);
13579 12908
13580 12909 lun_idx = (lun_cnt + 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
13581 12910 /* determine report luns addressing type */
13582 12911 switch (buf[lun_idx] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) {
13583 12912 /*
13584 12913 * Vendors in the field have been found to be concatenating
13585 12914 * bus/target/lun to equal the complete lun value instead
13586 12915 * of switching to flat space addressing
13587 12916 */
13588 12917 /* 00b - peripheral device addressing method */
13589 12918 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL:
13590 12919 /* FALLTHRU */
13591 12920 /* 10b - logical unit addressing method */
13592 12921 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT:
13593 12922 /* FALLTHRU */
13594 12923 /* 01b - flat space addressing method */
13595 12924 case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE:
13596 12925 /* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
13597 12926 *lun_addr_type = (buf[lun_idx] &
13598 12927 MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) >> 6;
13599 12928 *lun_num = (buf[lun_idx] & 0x3F) << 8;
13600 12929 *lun_num |= buf[lun_idx + 1];
13601 12930 return (DDI_SUCCESS);
13602 12931 default:
13603 12932 return (DDI_FAILURE);
13604 12933 }
13605 12934 }
13606 12935
13607 12936 static int
13608 12937 mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt)
13609 12938 {
13610 12939 struct buf *repluns_bp = NULL;
13611 12940 struct scsi_address ap;
13612 12941 uchar_t cdb[CDB_GROUP5];
13613 12942 int ret = DDI_FAILURE;
13614 12943 int retry = 0;
13615 12944 int lun_list_len = 0;
13616 12945 uint16_t lun_num = 0;
13617 12946 uint8_t lun_addr_type = 0;
13618 12947 uint32_t lun_cnt = 0;
13619 12948 uint32_t lun_total = 0;
13620 12949 dev_info_t *cdip = NULL;
13621 12950 uint16_t *saved_repluns = NULL;
13622 12951 char *buffer = NULL;
13623 12952 int buf_len = 128;
13624 12953 mptsas_t *mpt = DIP2MPT(pdip);
13625 12954 uint64_t sas_wwn = 0;
13626 12955 uint8_t phy = 0xFF;
13627 12956 uint32_t dev_info = 0;
13628 12957
13629 12958 mutex_enter(&mpt->m_mutex);
13630 12959 sas_wwn = ptgt->m_sas_wwn;
13631 12960 phy = ptgt->m_phynum;
13632 12961 dev_info = ptgt->m_deviceinfo;
13633 12962 mutex_exit(&mpt->m_mutex);
13634 12963
13635 12964 if (sas_wwn == 0) {
13636 12965 /*
13637 12966 * It's a SATA without Device Name
13638 12967 * So don't try multi-LUNs
13639 12968 */
13640 12969 if (mptsas_find_child_phy(pdip, phy)) {
13641 12970 return (DDI_SUCCESS);
13642 12971 } else {
13643 12972 /*
13644 12973 * need configure and create node
13645 12974 */
13646 12975 return (DDI_FAILURE);
13647 12976 }
13648 12977 }
13649 12978
13650 12979 /*
13651 12980 * WWN (SAS address or Device Name exist)
13652 12981 */
13653 12982 if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13654 12983 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
13655 12984 /*
13656 12985 * SATA device with Device Name
13657 12986 * So don't try multi-LUNs
13658 12987 */
13659 12988 if (mptsas_find_child_addr(pdip, sas_wwn, 0)) {
13660 12989 return (DDI_SUCCESS);
13661 12990 } else {
13662 12991 return (DDI_FAILURE);
13663 12992 }
13664 12993 }
13665 12994
13666 12995 do {
13667 12996 ap.a_target = MPTSAS_INVALID_DEVHDL;
13668 12997 ap.a_lun = 0;
13669 12998 ap.a_hba_tran = mpt->m_tran;
13670 12999 repluns_bp = scsi_alloc_consistent_buf(&ap,
13671 13000 (struct buf *)NULL, buf_len, B_READ, NULL_FUNC, NULL);
13672 13001 if (repluns_bp == NULL) {
13673 13002 retry++;
13674 13003 continue;
13675 13004 }
13676 13005 bzero(cdb, CDB_GROUP5);
13677 13006 cdb[0] = SCMD_REPORT_LUNS;
13678 13007 cdb[6] = (buf_len & 0xff000000) >> 24;
13679 13008 cdb[7] = (buf_len & 0x00ff0000) >> 16;
13680 13009 cdb[8] = (buf_len & 0x0000ff00) >> 8;
13681 13010 cdb[9] = (buf_len & 0x000000ff);
13682 13011
13683 13012 ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP5,
13684 13013 repluns_bp, NULL);
13685 13014 if (ret != DDI_SUCCESS) {
13686 13015 scsi_free_consistent_buf(repluns_bp);
13687 13016 retry++;
13688 13017 continue;
13689 13018 }
13690 13019 lun_list_len = BE_32(*(int *)((void *)(
13691 13020 repluns_bp->b_un.b_addr)));
13692 13021 if (buf_len >= lun_list_len + 8) {
13693 13022 ret = DDI_SUCCESS;
13694 13023 break;
13695 13024 }
13696 13025 scsi_free_consistent_buf(repluns_bp);
13697 13026 buf_len = lun_list_len + 8;
13698 13027
13699 13028 } while (retry < 3);
13700 13029
13701 13030 if (ret != DDI_SUCCESS)
13702 13031 return (ret);
13703 13032 buffer = (char *)repluns_bp->b_un.b_addr;
13704 13033 /*
13705 13034 * find out the number of luns returned by the SCSI ReportLun call
13706 13035 * and allocate buffer space
13707 13036 */
13708 13037 lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
13709 13038 saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
13710 13039 if (saved_repluns == NULL) {
13711 13040 scsi_free_consistent_buf(repluns_bp);
13712 13041 return (DDI_FAILURE);
13713 13042 }
13714 13043 for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
13715 13044 if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
13716 13045 &lun_num, &lun_addr_type) != DDI_SUCCESS) {
13717 13046 continue;
13718 13047 }
13719 13048 saved_repluns[lun_cnt] = lun_num;
13720 13049 if (cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num))
13721 13050 ret = DDI_SUCCESS;
13722 13051 else
13723 13052 ret = mptsas_probe_lun(pdip, lun_num, &cdip,
13724 13053 ptgt);
13725 13054 if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
13726 13055 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
13727 13056 MPTSAS_DEV_GONE);
13728 13057 }
13729 13058 }
13730 13059 mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
13731 13060 kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
13732 13061 scsi_free_consistent_buf(repluns_bp);
13733 13062 return (DDI_SUCCESS);
13734 13063 }
13735 13064
13736 13065 static int
13737 13066 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
13738 13067 {
13739 13068 int rval = DDI_FAILURE;
13740 13069 struct scsi_inquiry *sd_inq = NULL;
13741 13070 mptsas_t *mpt = DIP2MPT(pdip);
13742 13071 mptsas_target_t *ptgt = NULL;
13743 13072
13744 13073 mutex_enter(&mpt->m_mutex);
13745 13074 ptgt = mptsas_search_by_devhdl(&mpt->m_active->m_tgttbl, target);
13746 13075 mutex_exit(&mpt->m_mutex);
13747 13076 if (ptgt == NULL) {
13748 13077 mptsas_log(mpt, CE_WARN, "Volume with VolDevHandle of 0x%x "
13749 13078 "not found.", target);
13750 13079 return (rval);
13751 13080 }
13752 13081
13753 13082 sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
13754 13083 rval = mptsas_inquiry(mpt, ptgt, 0, 0, (uchar_t *)sd_inq,
13755 13084 SUN_INQSIZE, 0, (uchar_t)0);
13756 13085
13757 13086 if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
13758 13087 rval = mptsas_create_phys_lun(pdip, sd_inq, NULL, dip, ptgt,
13759 13088 0);
13760 13089 } else {
13761 13090 rval = DDI_FAILURE;
13762 13091 }
13763 13092
13764 13093 kmem_free(sd_inq, SUN_INQSIZE);
13765 13094 return (rval);
13766 13095 }
13767 13096
13768 13097 /*
13769 13098 * configure all RAID volumes for virtual iport
13770 13099 */
13771 13100 static void
13772 13101 mptsas_config_all_viport(dev_info_t *pdip)
13773 13102 {
13774 13103 mptsas_t *mpt = DIP2MPT(pdip);
13775 13104 int config, vol;
13776 13105 int target;
13777 13106 dev_info_t *lundip = NULL;
13778 13107 mptsas_slots_t *slots = mpt->m_active;
13779 13108
13780 13109 /*
13781 13110 * Get latest RAID info and search for any Volume DevHandles. If any
13782 13111 * are found, configure the volume.
13783 13112 */
13784 13113 mutex_enter(&mpt->m_mutex);
13785 13114 for (config = 0; config < slots->m_num_raid_configs; config++) {
13786 13115 for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
13787 13116 if (slots->m_raidconfig[config].m_raidvol[vol].m_israid
13788 13117 == 1) {
13789 13118 target = slots->m_raidconfig[config].
13790 13119 m_raidvol[vol].m_raidhandle;
13791 13120 mutex_exit(&mpt->m_mutex);
13792 13121 (void) mptsas_config_raid(pdip, target,
13793 13122 &lundip);
13794 13123 mutex_enter(&mpt->m_mutex);
13795 13124 }
13796 13125 }
13797 13126 }
13798 13127 mutex_exit(&mpt->m_mutex);
13799 13128 }
13800 13129
13801 13130 static void
13802 13131 mptsas_offline_missed_luns(dev_info_t *pdip, uint16_t *repluns,
13803 13132 int lun_cnt, mptsas_target_t *ptgt)
13804 13133 {
13805 13134 dev_info_t *child = NULL, *savechild = NULL;
13806 13135 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
13807 13136 uint64_t sas_wwn, wwid;
13808 13137 uint8_t phy;
13809 13138 int lun;
13810 13139 int i;
13811 13140 int find;
13812 13141 char *addr;
13813 13142 char *nodename;
13814 13143 mptsas_t *mpt = DIP2MPT(pdip);
13815 13144
13816 13145 mutex_enter(&mpt->m_mutex);
13817 13146 wwid = ptgt->m_sas_wwn;
13818 13147 mutex_exit(&mpt->m_mutex);
13819 13148
13820 13149 child = ddi_get_child(pdip);
13821 13150 while (child) {
13822 13151 find = 0;
13823 13152 savechild = child;
13824 13153 child = ddi_get_next_sibling(child);
13825 13154
13826 13155 nodename = ddi_node_name(savechild);
13827 13156 if (strcmp(nodename, "smp") == 0) {
13828 13157 continue;
13829 13158 }
13830 13159
13831 13160 addr = ddi_get_name_addr(savechild);
13832 13161 if (addr == NULL) {
13833 13162 continue;
13834 13163 }
13835 13164
13836 13165 if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
13837 13166 DDI_SUCCESS) {
13838 13167 continue;
13839 13168 }
13840 13169
13841 13170 if (wwid == sas_wwn) {
13842 13171 for (i = 0; i < lun_cnt; i++) {
13843 13172 if (repluns[i] == lun) {
13844 13173 find = 1;
13845 13174 break;
13846 13175 }
13847 13176 }
13848 13177 } else {
13849 13178 continue;
13850 13179 }
13851 13180 if (find == 0) {
13852 13181 /*
13853 13182 * The lun has not been there already
13854 13183 */
13855 13184 (void) mptsas_offline_lun(pdip, savechild, NULL,
13856 13185 NDI_DEVI_REMOVE);
13857 13186 }
13858 13187 }
13859 13188
13860 13189 pip = mdi_get_next_client_path(pdip, NULL);
13861 13190 while (pip) {
13862 13191 find = 0;
13863 13192 savepip = pip;
13864 13193 addr = MDI_PI(pip)->pi_addr;
13865 13194
13866 13195 pip = mdi_get_next_client_path(pdip, pip);
13867 13196
13868 13197 if (addr == NULL) {
13869 13198 continue;
13870 13199 }
13871 13200
13872 13201 if (mptsas_parse_address(addr, &sas_wwn, &phy,
13873 13202 &lun) != DDI_SUCCESS) {
13874 13203 continue;
13875 13204 }
13876 13205
13877 13206 if (sas_wwn == wwid) {
13878 13207 for (i = 0; i < lun_cnt; i++) {
13879 13208 if (repluns[i] == lun) {
13880 13209 find = 1;
13881 13210 break;
13882 13211 }
13883 13212 }
13884 13213 } else {
13885 13214 continue;
13886 13215 }
13887 13216
13888 13217 if (find == 0) {
13889 13218 /*
13890 13219 * The lun has not been there already
13891 13220 */
13892 13221 (void) mptsas_offline_lun(pdip, NULL, savepip,
13893 13222 NDI_DEVI_REMOVE);
13894 13223 }
13895 13224 }
13896 13225 }
13897 13226
13898 13227 void
13899 13228 mptsas_update_hashtab(struct mptsas *mpt)
13900 13229 {
13901 13230 uint32_t page_address;
13902 13231 int rval = 0;
13903 13232 uint16_t dev_handle;
13904 13233 mptsas_target_t *ptgt = NULL;
13905 13234 mptsas_smp_t smp_node;
13906 13235
13907 13236 /*
13908 13237 * Get latest RAID info.
13909 13238 */
13910 13239 (void) mptsas_get_raid_info(mpt);
13911 13240
13912 13241 dev_handle = mpt->m_smp_devhdl;
13913 13242 for (; mpt->m_done_traverse_smp == 0; ) {
13914 13243 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
13915 13244 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)dev_handle;
13916 13245 if (mptsas_get_sas_expander_page0(mpt, page_address, &smp_node)
13917 13246 != DDI_SUCCESS) {
13918 13247 break;
13919 13248 }
13920 13249 mpt->m_smp_devhdl = dev_handle = smp_node.m_devhdl;
13921 13250 (void) mptsas_smp_alloc(&mpt->m_active->m_smptbl, &smp_node);
13922 13251 }
13923 13252
13924 13253 /*
13925 13254 * Config target devices
13926 13255 */
13927 13256 dev_handle = mpt->m_dev_handle;
13928 13257
13929 13258 /*
13930 13259 * Do loop to get sas device page 0 by GetNextHandle till the
13931 13260 * the last handle. If the sas device is a SATA/SSP target,
13932 13261 * we try to config it.
13933 13262 */
13934 13263 for (; mpt->m_done_traverse_dev == 0; ) {
13935 13264 ptgt = NULL;
13936 13265 page_address =
13937 13266 (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
13938 13267 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
13939 13268 (uint32_t)dev_handle;
13940 13269 rval = mptsas_get_target_device_info(mpt, page_address,
13941 13270 &dev_handle, &ptgt);
13942 13271 if ((rval == DEV_INFO_FAIL_PAGE0) ||
13943 13272 (rval == DEV_INFO_FAIL_ALLOC)) {
13944 13273 break;
13945 13274 }
13946 13275
13947 13276 mpt->m_dev_handle = dev_handle;
13948 13277 }
13949 13278
13950 13279 }
13951 13280
13952 13281 void
13953 13282 mptsas_invalid_hashtab(mptsas_hash_table_t *hashtab)
13954 13283 {
13955 13284 mptsas_hash_data_t *data;
13956 13285 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST);
13957 13286 while (data != NULL) {
13958 13287 data->devhdl = MPTSAS_INVALID_DEVHDL;
13959 13288 data->device_info = 0;
13960 13289 /*
13961 13290 * For tgttbl, clear dr_flag.
13962 13291 */
13963 13292 data->dr_flag = MPTSAS_DR_INACTIVE;
13964 13293 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT);
13965 13294 }
13966 13295 }
13967 13296
13968 13297 void
13969 13298 mptsas_update_driver_data(struct mptsas *mpt)
13970 13299 {
13971 13300 /*
13972 13301 * TODO after hard reset, update the driver data structures
13973 13302 * 1. update port/phymask mapping table mpt->m_phy_info
13974 13303 * 2. invalid all the entries in hash table
13975 13304 * m_devhdl = 0xffff and m_deviceinfo = 0
13976 13305 * 3. call sas_device_page/expander_page to update hash table
13977 13306 */
13978 13307 mptsas_update_phymask(mpt);
13979 13308 /*
13980 13309 * Invalid the existing entries
13981 13310 */
13982 13311 mptsas_invalid_hashtab(&mpt->m_active->m_tgttbl);
13983 13312 mptsas_invalid_hashtab(&mpt->m_active->m_smptbl);
13984 13313 mpt->m_done_traverse_dev = 0;
13985 13314 mpt->m_done_traverse_smp = 0;
13986 13315 mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
13987 13316 mptsas_update_hashtab(mpt);
13988 13317 }
13989 13318
13990 13319 static void
13991 13320 mptsas_config_all(dev_info_t *pdip)
13992 13321 {
13993 13322 dev_info_t *smpdip = NULL;
13994 13323 mptsas_t *mpt = DIP2MPT(pdip);
13995 13324 int phymask = 0;
13996 13325 mptsas_phymask_t phy_mask;
13997 13326 mptsas_target_t *ptgt = NULL;
13998 13327 mptsas_smp_t *psmp;
13999 13328
14000 13329 /*
14001 13330 * Get the phymask associated to the iport
14002 13331 */
14003 13332 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14004 13333 "phymask", 0);
14005 13334
14006 13335 /*
14007 13336 * Enumerate RAID volumes here (phymask == 0).
14008 13337 */
14009 13338 if (phymask == 0) {
14010 13339 mptsas_config_all_viport(pdip);
14011 13340 return;
14012 13341 }
14013 13342
14014 13343 mutex_enter(&mpt->m_mutex);
14015 13344
14016 13345 if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp) {
14017 13346 mptsas_update_hashtab(mpt);
14018 13347 }
14019 13348
14020 13349 psmp = (mptsas_smp_t *)mptsas_hash_traverse(&mpt->m_active->m_smptbl,
14021 13350 MPTSAS_HASH_FIRST);
14022 13351 while (psmp != NULL) {
14023 13352 phy_mask = psmp->m_phymask;
14024 13353 if (phy_mask == phymask) {
14025 13354 smpdip = NULL;
14026 13355 mutex_exit(&mpt->m_mutex);
14027 13356 (void) mptsas_online_smp(pdip, psmp, &smpdip);
14028 13357 mutex_enter(&mpt->m_mutex);
14029 13358 }
14030 13359 psmp = (mptsas_smp_t *)mptsas_hash_traverse(
14031 13360 &mpt->m_active->m_smptbl, MPTSAS_HASH_NEXT);
14032 13361 }
14033 13362
14034 13363 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
14035 13364 MPTSAS_HASH_FIRST);
14036 13365 while (ptgt != NULL) {
14037 13366 phy_mask = ptgt->m_phymask;
14038 13367 if (phy_mask == phymask) {
14039 13368 mutex_exit(&mpt->m_mutex);
14040 13369 (void) mptsas_config_target(pdip, ptgt);
14041 13370 mutex_enter(&mpt->m_mutex);
14042 13371 }
14043 13372
14044 13373 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
14045 13374 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
14046 13375 }
14047 13376 mutex_exit(&mpt->m_mutex);
14048 13377 }
14049 13378
14050 13379 static int
14051 13380 mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt)
14052 13381 {
14053 13382 int rval = DDI_FAILURE;
14054 13383 dev_info_t *tdip;
14055 13384
14056 13385 rval = mptsas_config_luns(pdip, ptgt);
14057 13386 if (rval != DDI_SUCCESS) {
14058 13387 /*
14059 13388 * The return value means the SCMD_REPORT_LUNS
14060 13389 * did not execute successfully. The target maybe
14061 13390 * doesn't support such command.
14062 13391 */
14063 13392 rval = mptsas_probe_lun(pdip, 0, &tdip, ptgt);
14064 13393 }
14065 13394 return (rval);
14066 13395 }
14067 13396
14068 13397 /*
14069 13398 * Return fail if not all the childs/paths are freed.
14070 13399 * if there is any path under the HBA, the return value will be always fail
14071 13400 * because we didn't call mdi_pi_free for path
14072 13401 */
14073 13402 static int
14074 13403 mptsas_offline_target(dev_info_t *pdip, char *name)
14075 13404 {
14076 13405 dev_info_t *child = NULL, *prechild = NULL;
14077 13406 mdi_pathinfo_t *pip = NULL, *savepip = NULL;
14078 13407 int tmp_rval, rval = DDI_SUCCESS;
14079 13408 char *addr, *cp;
14080 13409 size_t s;
14081 13410 mptsas_t *mpt = DIP2MPT(pdip);
14082 13411
14083 13412 child = ddi_get_child(pdip);
14084 13413 while (child) {
14085 13414 addr = ddi_get_name_addr(child);
14086 13415 prechild = child;
14087 13416 child = ddi_get_next_sibling(child);
14088 13417
14089 13418 if (addr == NULL) {
14090 13419 continue;
14091 13420 }
14092 13421 if ((cp = strchr(addr, ',')) == NULL) {
14093 13422 continue;
14094 13423 }
14095 13424
14096 13425 s = (uintptr_t)cp - (uintptr_t)addr;
14097 13426
14098 13427 if (strncmp(addr, name, s) != 0) {
14099 13428 continue;
14100 13429 }
14101 13430
14102 13431 tmp_rval = mptsas_offline_lun(pdip, prechild, NULL,
14103 13432 NDI_DEVI_REMOVE);
14104 13433 if (tmp_rval != DDI_SUCCESS) {
14105 13434 rval = DDI_FAILURE;
14106 13435 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14107 13436 prechild, MPTSAS_DEV_GONE) !=
14108 13437 DDI_PROP_SUCCESS) {
14109 13438 mptsas_log(mpt, CE_WARN, "mptsas driver "
14110 13439 "unable to create property for "
14111 13440 "SAS %s (MPTSAS_DEV_GONE)", addr);
14112 13441 }
14113 13442 }
14114 13443 }
14115 13444
14116 13445 pip = mdi_get_next_client_path(pdip, NULL);
14117 13446 while (pip) {
14118 13447 addr = MDI_PI(pip)->pi_addr;
14119 13448 savepip = pip;
14120 13449 pip = mdi_get_next_client_path(pdip, pip);
14121 13450 if (addr == NULL) {
14122 13451 continue;
14123 13452 }
14124 13453
14125 13454 if ((cp = strchr(addr, ',')) == NULL) {
14126 13455 continue;
14127 13456 }
14128 13457
14129 13458 s = (uintptr_t)cp - (uintptr_t)addr;
14130 13459
14131 13460 if (strncmp(addr, name, s) != 0) {
14132 13461 continue;
14133 13462 }
14134 13463
14135 13464 (void) mptsas_offline_lun(pdip, NULL, savepip,
14136 13465 NDI_DEVI_REMOVE);
14137 13466 /*
14138 13467 * driver will not invoke mdi_pi_free, so path will not
14139 13468 * be freed forever, return DDI_FAILURE.
14140 13469 */
14141 13470 rval = DDI_FAILURE;
14142 13471 }
14143 13472 return (rval);
14144 13473 }
14145 13474
14146 13475 static int
14147 13476 mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
14148 13477 mdi_pathinfo_t *rpip, uint_t flags)
14149 13478 {
14150 13479 int rval = DDI_FAILURE;
14151 13480 char *devname;
14152 13481 dev_info_t *cdip, *parent;
14153 13482
14154 13483 if (rpip != NULL) {
14155 13484 parent = scsi_vhci_dip;
14156 13485 cdip = mdi_pi_get_client(rpip);
14157 13486 } else if (rdip != NULL) {
14158 13487 parent = pdip;
14159 13488 cdip = rdip;
14160 13489 } else {
14161 13490 return (DDI_FAILURE);
14162 13491 }
14163 13492
14164 13493 /*
14165 13494 * Make sure node is attached otherwise
14166 13495 * it won't have related cache nodes to
14167 13496 * clean up. i_ddi_devi_attached is
14168 13497 * similiar to i_ddi_node_state(cdip) >=
14169 13498 * DS_ATTACHED.
14170 13499 */
14171 13500 if (i_ddi_devi_attached(cdip)) {
14172 13501
14173 13502 /* Get full devname */
14174 13503 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
14175 13504 (void) ddi_deviname(cdip, devname);
14176 13505 /* Clean cache */
14177 13506 (void) devfs_clean(parent, devname + 1,
14178 13507 DV_CLEAN_FORCE);
14179 13508 kmem_free(devname, MAXNAMELEN + 1);
14180 13509 }
14181 13510 if (rpip != NULL) {
14182 13511 if (MDI_PI_IS_OFFLINE(rpip)) {
14183 13512 rval = DDI_SUCCESS;
14184 13513 } else {
14185 13514 rval = mdi_pi_offline(rpip, 0);
14186 13515 }
14187 13516 } else {
14188 13517 rval = ndi_devi_offline(cdip, flags);
14189 13518 }
14190 13519
14191 13520 return (rval);
14192 13521 }
14193 13522
14194 13523 static dev_info_t *
14195 13524 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
14196 13525 {
14197 13526 dev_info_t *child = NULL;
14198 13527 char *smp_wwn = NULL;
14199 13528
14200 13529 child = ddi_get_child(parent);
14201 13530 while (child) {
14202 13531 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
14203 13532 DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
14204 13533 != DDI_SUCCESS) {
14205 13534 child = ddi_get_next_sibling(child);
14206 13535 continue;
14207 13536 }
14208 13537
14209 13538 if (strcmp(smp_wwn, str_wwn) == 0) {
14210 13539 ddi_prop_free(smp_wwn);
14211 13540 break;
14212 13541 }
14213 13542 child = ddi_get_next_sibling(child);
14214 13543 ddi_prop_free(smp_wwn);
14215 13544 }
14216 13545 return (child);
14217 13546 }
14218 13547
14219 13548 static int
14220 13549 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, uint_t flags)
14221 13550 {
14222 13551 int rval = DDI_FAILURE;
14223 13552 char *devname;
14224 13553 char wwn_str[MPTSAS_WWN_STRLEN];
14225 13554 dev_info_t *cdip;
14226 13555
14227 13556 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_sasaddr);
14228 13557
14229 13558 cdip = mptsas_find_smp_child(pdip, wwn_str);
14230 13559
14231 13560 if (cdip == NULL)
14232 13561 return (DDI_SUCCESS);
14233 13562
14234 13563 /*
14235 13564 * Make sure node is attached otherwise
14236 13565 * it won't have related cache nodes to
14237 13566 * clean up. i_ddi_devi_attached is
14238 13567 * similiar to i_ddi_node_state(cdip) >=
14239 13568 * DS_ATTACHED.
14240 13569 */
14241 13570 if (i_ddi_devi_attached(cdip)) {
14242 13571
14243 13572 /* Get full devname */
14244 13573 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
14245 13574 (void) ddi_deviname(cdip, devname);
14246 13575 /* Clean cache */
14247 13576 (void) devfs_clean(pdip, devname + 1,
14248 13577 DV_CLEAN_FORCE);
14249 13578 kmem_free(devname, MAXNAMELEN + 1);
14250 13579 }
14251 13580
14252 13581 rval = ndi_devi_offline(cdip, flags);
14253 13582
14254 13583 return (rval);
14255 13584 }
14256 13585
14257 13586 static dev_info_t *
14258 13587 mptsas_find_child(dev_info_t *pdip, char *name)
14259 13588 {
14260 13589 dev_info_t *child = NULL;
14261 13590 char *rname = NULL;
14262 13591 int rval = DDI_FAILURE;
14263 13592
14264 13593 rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14265 13594
14266 13595 child = ddi_get_child(pdip);
14267 13596 while (child) {
14268 13597 rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
14269 13598 if (rval != DDI_SUCCESS) {
14270 13599 child = ddi_get_next_sibling(child);
14271 13600 bzero(rname, SCSI_MAXNAMELEN);
14272 13601 continue;
14273 13602 }
14274 13603
14275 13604 if (strcmp(rname, name) == 0) {
14276 13605 break;
14277 13606 }
14278 13607 child = ddi_get_next_sibling(child);
14279 13608 bzero(rname, SCSI_MAXNAMELEN);
14280 13609 }
14281 13610
14282 13611 kmem_free(rname, SCSI_MAXNAMELEN);
14283 13612
14284 13613 return (child);
14285 13614 }
14286 13615
14287 13616
14288 13617 static dev_info_t *
14289 13618 mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, int lun)
14290 13619 {
14291 13620 dev_info_t *child = NULL;
14292 13621 char *name = NULL;
14293 13622 char *addr = NULL;
14294 13623
14295 13624 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14296 13625 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14297 13626 (void) sprintf(name, "%016"PRIx64, sasaddr);
14298 13627 (void) sprintf(addr, "w%s,%x", name, lun);
14299 13628 child = mptsas_find_child(pdip, addr);
14300 13629 kmem_free(name, SCSI_MAXNAMELEN);
14301 13630 kmem_free(addr, SCSI_MAXNAMELEN);
14302 13631 return (child);
14303 13632 }
14304 13633
14305 13634 static dev_info_t *
14306 13635 mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy)
14307 13636 {
14308 13637 dev_info_t *child;
14309 13638 char *addr;
14310 13639
14311 13640 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14312 13641 (void) sprintf(addr, "p%x,0", phy);
14313 13642 child = mptsas_find_child(pdip, addr);
14314 13643 kmem_free(addr, SCSI_MAXNAMELEN);
14315 13644 return (child);
14316 13645 }
14317 13646
14318 13647 static mdi_pathinfo_t *
14319 13648 mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy)
14320 13649 {
14321 13650 mdi_pathinfo_t *path;
14322 13651 char *addr = NULL;
14323 13652
14324 13653 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14325 13654 (void) sprintf(addr, "p%x,0", phy);
14326 13655 path = mdi_pi_find(pdip, NULL, addr);
14327 13656 kmem_free(addr, SCSI_MAXNAMELEN);
14328 13657 return (path);
14329 13658 }
14330 13659
14331 13660 static mdi_pathinfo_t *
14332 13661 mptsas_find_path_addr(dev_info_t *parent, uint64_t sasaddr, int lun)
14333 13662 {
14334 13663 mdi_pathinfo_t *path;
14335 13664 char *name = NULL;
14336 13665 char *addr = NULL;
14337 13666
14338 13667 name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14339 13668 addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14340 13669 (void) sprintf(name, "%016"PRIx64, sasaddr);
14341 13670 (void) sprintf(addr, "w%s,%x", name, lun);
14342 13671 path = mdi_pi_find(parent, NULL, addr);
14343 13672 kmem_free(name, SCSI_MAXNAMELEN);
14344 13673 kmem_free(addr, SCSI_MAXNAMELEN);
14345 13674
14346 13675 return (path);
14347 13676 }
14348 13677
14349 13678 static int
14350 13679 mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
14351 13680 dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
14352 13681 {
14353 13682 int i = 0;
14354 13683 uchar_t *inq83 = NULL;
14355 13684 int inq83_len1 = 0xFF;
14356 13685 int inq83_len = 0;
14357 13686 int rval = DDI_FAILURE;
14358 13687 ddi_devid_t devid;
14359 13688 char *guid = NULL;
14360 13689 int target = ptgt->m_devhdl;
14361 13690 mdi_pathinfo_t *pip = NULL;
14362 13691 mptsas_t *mpt = DIP2MPT(pdip);
14363 13692
14364 13693 /*
14365 13694 * For DVD/CD ROM and tape devices and optical
14366 13695 * devices, we won't try to enumerate them under
14367 13696 * scsi_vhci, so no need to try page83
14368 13697 */
14369 13698 if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
14370 13699 sd_inq->inq_dtype == DTYPE_OPTICAL ||
14371 13700 sd_inq->inq_dtype == DTYPE_ESI))
14372 13701 goto create_lun;
14373 13702
14374 13703 /*
14375 13704 * The LCA returns good SCSI status, but corrupt page 83 data the first
14376 13705 * time it is queried. The solution is to keep trying to request page83
14377 13706 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
14378 13707 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
14379 13708 * give up to get VPD page at this stage and fail the enumeration.
14380 13709 */
14381 13710
14382 13711 inq83 = kmem_zalloc(inq83_len1, KM_SLEEP);
14383 13712
14384 13713 for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
14385 13714 rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
14386 13715 inq83_len1, &inq83_len, 1);
14387 13716 if (rval != 0) {
14388 13717 mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
14389 13718 "0x83 for target:%x, lun:%x failed!", target, lun);
14390 13719 if (mptsas_physical_bind_failed_page_83 != B_FALSE)
14391 13720 goto create_lun;
14392 13721 goto out;
14393 13722 }
14394 13723 /*
14395 13724 * create DEVID from inquiry data
14396 13725 */
14397 13726 if ((rval = ddi_devid_scsi_encode(
14398 13727 DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
14399 13728 sizeof (struct scsi_inquiry), NULL, 0, inq83,
14400 13729 (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
14401 13730 /*
14402 13731 * extract GUID from DEVID
14403 13732 */
14404 13733 guid = ddi_devid_to_guid(devid);
14405 13734
14406 13735 /*
14407 13736 * Do not enable MPXIO if the strlen(guid) is greater
14408 13737 * than MPTSAS_MAX_GUID_LEN, this constrain would be
14409 13738 * handled by framework later.
14410 13739 */
14411 13740 if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
14412 13741 ddi_devid_free_guid(guid);
14413 13742 guid = NULL;
14414 13743 if (mpt->m_mpxio_enable == TRUE) {
14415 13744 mptsas_log(mpt, CE_NOTE, "!Target:%x, "
14416 13745 "lun:%x doesn't have a valid GUID, "
14417 13746 "multipathing for this drive is "
14418 13747 "not enabled", target, lun);
14419 13748 }
14420 13749 }
14421 13750
14422 13751 /*
14423 13752 * devid no longer needed
14424 13753 */
14425 13754 ddi_devid_free(devid);
14426 13755 break;
14427 13756 } else if (rval == DDI_NOT_WELL_FORMED) {
14428 13757 /*
14429 13758 * return value of ddi_devid_scsi_encode equal to
14430 13759 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
14431 13760 * to retry inquiry page 0x83 and get GUID.
14432 13761 */
14433 13762 NDBG20(("Not well formed devid, retry..."));
14434 13763 delay(1 * drv_usectohz(1000000));
14435 13764 continue;
14436 13765 } else {
14437 13766 mptsas_log(mpt, CE_WARN, "!Encode devid failed for "
14438 13767 "path target:%x, lun:%x", target, lun);
14439 13768 rval = DDI_FAILURE;
14440 13769 goto create_lun;
14441 13770 }
14442 13771 }
14443 13772
14444 13773 if (i == mptsas_inq83_retry_timeout) {
14445 13774 mptsas_log(mpt, CE_WARN, "!Repeated page83 requests timeout "
14446 13775 "for path target:%x, lun:%x", target, lun);
14447 13776 }
14448 13777
14449 13778 rval = DDI_FAILURE;
14450 13779
14451 13780 create_lun:
14452 13781 if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
14453 13782 rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
14454 13783 ptgt, lun);
14455 13784 }
14456 13785 if (rval != DDI_SUCCESS) {
14457 13786 rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
14458 13787 ptgt, lun);
14459 13788
14460 13789 }
14461 13790 out:
14462 13791 if (guid != NULL) {
14463 13792 /*
14464 13793 * guid no longer needed
14465 13794 */
14466 13795 ddi_devid_free_guid(guid);
14467 13796 }
14468 13797 if (inq83 != NULL)
14469 13798 kmem_free(inq83, inq83_len1);
14470 13799 return (rval);
14471 13800 }
14472 13801
14473 13802 static int
14474 13803 mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid,
14475 13804 dev_info_t **lun_dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, int lun)
14476 13805 {
14477 13806 int target;
14478 13807 char *nodename = NULL;
14479 13808 char **compatible = NULL;
14480 13809 int ncompatible = 0;
14481 13810 int mdi_rtn = MDI_FAILURE;
14482 13811 int rval = DDI_FAILURE;
14483 13812 char *old_guid = NULL;
14484 13813 mptsas_t *mpt = DIP2MPT(pdip);
14485 13814 char *lun_addr = NULL;
14486 13815 char *wwn_str = NULL;
14487 13816 char *attached_wwn_str = NULL;
14488 13817 char *component = NULL;
14489 13818 uint8_t phy = 0xFF;
14490 13819 uint64_t sas_wwn;
14491 13820 int64_t lun64 = 0;
14492 13821 uint32_t devinfo;
14493 13822 uint16_t dev_hdl;
14494 13823 uint16_t pdev_hdl;
14495 13824 uint64_t dev_sas_wwn;
14496 13825 uint64_t pdev_sas_wwn;
14497 13826 uint32_t pdev_info;
14498 13827 uint8_t physport;
14499 13828 uint8_t phy_id;
14500 13829 uint32_t page_address;
14501 13830 uint16_t bay_num, enclosure;
14502 13831 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
14503 13832 uint32_t dev_info;
14504 13833
14505 13834 mutex_enter(&mpt->m_mutex);
14506 13835 target = ptgt->m_devhdl;
14507 13836 sas_wwn = ptgt->m_sas_wwn;
14508 13837 devinfo = ptgt->m_deviceinfo;
14509 13838 phy = ptgt->m_phynum;
14510 13839 mutex_exit(&mpt->m_mutex);
14511 13840
14512 13841 if (sas_wwn) {
14513 13842 *pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
14514 13843 } else {
14515 13844 *pip = mptsas_find_path_phy(pdip, phy);
14516 13845 }
14517 13846
14518 13847 if (*pip != NULL) {
14519 13848 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
14520 13849 ASSERT(*lun_dip != NULL);
14521 13850 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
14522 13851 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
↓ open down ↓ |
1433 lines elided |
↑ open up ↑ |
14523 13852 MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
14524 13853 if (strncmp(guid, old_guid, strlen(guid)) == 0) {
14525 13854 /*
14526 13855 * Same path back online again.
14527 13856 */
14528 13857 (void) ddi_prop_free(old_guid);
14529 13858 if ((!MDI_PI_IS_ONLINE(*pip)) &&
14530 13859 (!MDI_PI_IS_STANDBY(*pip)) &&
14531 13860 (ptgt->m_tgt_unconfigured == 0)) {
14532 13861 rval = mdi_pi_online(*pip, 0);
14533 - mutex_enter(&mpt->m_mutex);
14534 - (void) mptsas_set_led_status(mpt, ptgt,
14535 - 0);
14536 - mutex_exit(&mpt->m_mutex);
14537 13862 } else {
14538 13863 rval = DDI_SUCCESS;
14539 13864 }
14540 13865 if (rval != DDI_SUCCESS) {
14541 13866 mptsas_log(mpt, CE_WARN, "path:target: "
14542 13867 "%x, lun:%x online failed!", target,
14543 13868 lun);
14544 13869 *pip = NULL;
14545 13870 *lun_dip = NULL;
14546 13871 }
14547 13872 return (rval);
14548 13873 } else {
14549 13874 /*
14550 13875 * The GUID of the LUN has changed which maybe
14551 13876 * because customer mapped another volume to the
14552 13877 * same LUN.
14553 13878 */
14554 13879 mptsas_log(mpt, CE_WARN, "The GUID of the "
14555 13880 "target:%x, lun:%x was changed, maybe "
14556 13881 "because someone mapped another volume "
14557 13882 "to the same LUN", target, lun);
14558 13883 (void) ddi_prop_free(old_guid);
14559 13884 if (!MDI_PI_IS_OFFLINE(*pip)) {
14560 13885 rval = mdi_pi_offline(*pip, 0);
14561 13886 if (rval != MDI_SUCCESS) {
14562 13887 mptsas_log(mpt, CE_WARN, "path:"
14563 13888 "target:%x, lun:%x offline "
14564 13889 "failed!", target, lun);
14565 13890 *pip = NULL;
14566 13891 *lun_dip = NULL;
14567 13892 return (DDI_FAILURE);
14568 13893 }
14569 13894 }
14570 13895 if (mdi_pi_free(*pip, 0) != MDI_SUCCESS) {
14571 13896 mptsas_log(mpt, CE_WARN, "path:target:"
14572 13897 "%x, lun:%x free failed!", target,
14573 13898 lun);
14574 13899 *pip = NULL;
14575 13900 *lun_dip = NULL;
14576 13901 return (DDI_FAILURE);
14577 13902 }
14578 13903 }
14579 13904 } else {
14580 13905 mptsas_log(mpt, CE_WARN, "Can't get client-guid "
14581 13906 "property for path:target:%x, lun:%x", target, lun);
14582 13907 *pip = NULL;
14583 13908 *lun_dip = NULL;
14584 13909 return (DDI_FAILURE);
14585 13910 }
14586 13911 }
14587 13912 scsi_hba_nodename_compatible_get(inq, NULL,
14588 13913 inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
14589 13914
14590 13915 /*
14591 13916 * if nodename can't be determined then print a message and skip it
14592 13917 */
14593 13918 if (nodename == NULL) {
14594 13919 mptsas_log(mpt, CE_WARN, "mptsas driver found no compatible "
14595 13920 "driver for target%d lun %d dtype:0x%02x", target, lun,
14596 13921 inq->inq_dtype);
14597 13922 return (DDI_FAILURE);
14598 13923 }
14599 13924
14600 13925 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
14601 13926 /* The property is needed by MPAPI */
14602 13927 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
14603 13928
14604 13929 lun_addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14605 13930 if (guid) {
14606 13931 (void) sprintf(lun_addr, "w%s,%x", wwn_str, lun);
14607 13932 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14608 13933 } else {
14609 13934 (void) sprintf(lun_addr, "p%x,%x", phy, lun);
14610 13935 (void) sprintf(wwn_str, "p%x", phy);
14611 13936 }
14612 13937
14613 13938 mdi_rtn = mdi_pi_alloc_compatible(pdip, nodename,
14614 13939 guid, lun_addr, compatible, ncompatible,
14615 13940 0, pip);
14616 13941 if (mdi_rtn == MDI_SUCCESS) {
14617 13942
14618 13943 if (mdi_prop_update_string(*pip, MDI_GUID,
14619 13944 guid) != DDI_SUCCESS) {
14620 13945 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14621 13946 "create prop for target %d lun %d (MDI_GUID)",
14622 13947 target, lun);
14623 13948 mdi_rtn = MDI_FAILURE;
14624 13949 goto virt_create_done;
14625 13950 }
14626 13951
14627 13952 if (mdi_prop_update_int(*pip, LUN_PROP,
14628 13953 lun) != DDI_SUCCESS) {
14629 13954 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14630 13955 "create prop for target %d lun %d (LUN_PROP)",
14631 13956 target, lun);
14632 13957 mdi_rtn = MDI_FAILURE;
14633 13958 goto virt_create_done;
14634 13959 }
14635 13960 lun64 = (int64_t)lun;
14636 13961 if (mdi_prop_update_int64(*pip, LUN64_PROP,
14637 13962 lun64) != DDI_SUCCESS) {
14638 13963 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14639 13964 "create prop for target %d (LUN64_PROP)",
14640 13965 target);
14641 13966 mdi_rtn = MDI_FAILURE;
14642 13967 goto virt_create_done;
14643 13968 }
14644 13969 if (mdi_prop_update_string_array(*pip, "compatible",
14645 13970 compatible, ncompatible) !=
14646 13971 DDI_PROP_SUCCESS) {
14647 13972 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14648 13973 "create prop for target %d lun %d (COMPATIBLE)",
14649 13974 target, lun);
14650 13975 mdi_rtn = MDI_FAILURE;
14651 13976 goto virt_create_done;
14652 13977 }
14653 13978 if (sas_wwn && (mdi_prop_update_string(*pip,
14654 13979 SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS)) {
14655 13980 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14656 13981 "create prop for target %d lun %d "
14657 13982 "(target-port)", target, lun);
14658 13983 mdi_rtn = MDI_FAILURE;
14659 13984 goto virt_create_done;
14660 13985 } else if ((sas_wwn == 0) && (mdi_prop_update_int(*pip,
14661 13986 "sata-phy", phy) != DDI_PROP_SUCCESS)) {
14662 13987 /*
14663 13988 * Direct attached SATA device without DeviceName
14664 13989 */
14665 13990 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14666 13991 "create prop for SAS target %d lun %d "
14667 13992 "(sata-phy)", target, lun);
14668 13993 mdi_rtn = MDI_FAILURE;
14669 13994 goto virt_create_done;
14670 13995 }
14671 13996 mutex_enter(&mpt->m_mutex);
14672 13997
14673 13998 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14674 13999 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14675 14000 (uint32_t)ptgt->m_devhdl;
14676 14001 rval = mptsas_get_sas_device_page0(mpt, page_address,
14677 14002 &dev_hdl, &dev_sas_wwn, &dev_info, &physport,
14678 14003 &phy_id, &pdev_hdl, &bay_num, &enclosure);
14679 14004 if (rval != DDI_SUCCESS) {
14680 14005 mutex_exit(&mpt->m_mutex);
14681 14006 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
14682 14007 "parent device for handle %d", page_address);
14683 14008 mdi_rtn = MDI_FAILURE;
14684 14009 goto virt_create_done;
14685 14010 }
14686 14011
14687 14012 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14688 14013 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
14689 14014 rval = mptsas_get_sas_device_page0(mpt, page_address,
14690 14015 &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
14691 14016 &phy_id, &pdev_hdl, &bay_num, &enclosure);
14692 14017 if (rval != DDI_SUCCESS) {
14693 14018 mutex_exit(&mpt->m_mutex);
14694 14019 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
14695 14020 "device info for handle %d", page_address);
14696 14021 mdi_rtn = MDI_FAILURE;
14697 14022 goto virt_create_done;
14698 14023 }
14699 14024
14700 14025 mutex_exit(&mpt->m_mutex);
14701 14026
14702 14027 /*
14703 14028 * If this device direct attached to the controller
14704 14029 * set the attached-port to the base wwid
14705 14030 */
14706 14031 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
14707 14032 != DEVINFO_DIRECT_ATTACHED) {
14708 14033 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14709 14034 pdev_sas_wwn);
14710 14035 } else {
14711 14036 /*
14712 14037 * Update the iport's attached-port to guid
14713 14038 */
14714 14039 if (sas_wwn == 0) {
14715 14040 (void) sprintf(wwn_str, "p%x", phy);
14716 14041 } else {
14717 14042 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14718 14043 }
14719 14044 if (ddi_prop_update_string(DDI_DEV_T_NONE,
14720 14045 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
14721 14046 DDI_PROP_SUCCESS) {
14722 14047 mptsas_log(mpt, CE_WARN,
14723 14048 "mptsas unable to create "
14724 14049 "property for iport target-port"
14725 14050 " %s (sas_wwn)",
14726 14051 wwn_str);
14727 14052 mdi_rtn = MDI_FAILURE;
14728 14053 goto virt_create_done;
14729 14054 }
14730 14055
14731 14056 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
14732 14057 mpt->un.m_base_wwid);
14733 14058 }
14734 14059
14735 14060 if (mdi_prop_update_string(*pip,
14736 14061 SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
14737 14062 DDI_PROP_SUCCESS) {
14738 14063 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14739 14064 "property for iport attached-port %s (sas_wwn)",
14740 14065 attached_wwn_str);
14741 14066 mdi_rtn = MDI_FAILURE;
14742 14067 goto virt_create_done;
14743 14068 }
14744 14069
14745 14070
14746 14071 if (inq->inq_dtype == 0) {
14747 14072 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
14748 14073 /*
14749 14074 * set obp path for pathinfo
14750 14075 */
14751 14076 (void) snprintf(component, MAXPATHLEN,
14752 14077 "disk@%s", lun_addr);
14753 14078
14754 14079 if (mdi_pi_pathname_obp_set(*pip, component) !=
14755 14080 DDI_SUCCESS) {
14756 14081 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
14757 14082 "unable to set obp-path for object %s",
14758 14083 component);
14759 14084 mdi_rtn = MDI_FAILURE;
14760 14085 goto virt_create_done;
14761 14086 }
14762 14087 }
14763 14088
14764 14089 *lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
14765 14090 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14766 14091 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14767 14092 if ((ndi_prop_update_int(DDI_DEV_T_NONE, *lun_dip,
14768 14093 "pm-capable", 1)) !=
14769 14094 DDI_PROP_SUCCESS) {
14770 14095 mptsas_log(mpt, CE_WARN, "mptsas driver"
14771 14096 "failed to create pm-capable "
14772 14097 "property, target %d", target);
14773 14098 mdi_rtn = MDI_FAILURE;
14774 14099 goto virt_create_done;
14775 14100 }
14776 14101 }
14777 14102 /*
14778 14103 * Create the phy-num property
14779 14104 */
↓ open down ↓ |
233 lines elided |
↑ open up ↑ |
14780 14105 if (mdi_prop_update_int(*pip, "phy-num",
14781 14106 ptgt->m_phynum) != DDI_SUCCESS) {
14782 14107 mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
14783 14108 "create phy-num property for target %d lun %d",
14784 14109 target, lun);
14785 14110 mdi_rtn = MDI_FAILURE;
14786 14111 goto virt_create_done;
14787 14112 }
14788 14113 NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
14789 14114 mdi_rtn = mdi_pi_online(*pip, 0);
14790 - if (mdi_rtn == MDI_SUCCESS) {
14791 - mutex_enter(&mpt->m_mutex);
14792 - if (mptsas_set_led_status(mpt, ptgt, 0) !=
14793 - DDI_SUCCESS) {
14794 - NDBG14(("mptsas: clear LED for slot %x "
14795 - "failed", ptgt->m_slot_num));
14796 - }
14797 - mutex_exit(&mpt->m_mutex);
14798 - }
14799 14115 if (mdi_rtn == MDI_NOT_SUPPORTED) {
14800 14116 mdi_rtn = MDI_FAILURE;
14801 14117 }
14802 14118 virt_create_done:
14803 14119 if (*pip && mdi_rtn != MDI_SUCCESS) {
14804 14120 (void) mdi_pi_free(*pip, 0);
14805 14121 *pip = NULL;
14806 14122 *lun_dip = NULL;
14807 14123 }
14808 14124 }
14809 14125
14810 14126 scsi_hba_nodename_compatible_free(nodename, compatible);
14811 14127 if (lun_addr != NULL) {
14812 14128 kmem_free(lun_addr, SCSI_MAXNAMELEN);
14813 14129 }
14814 14130 if (wwn_str != NULL) {
14815 14131 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
14816 14132 }
14817 14133 if (component != NULL) {
14818 14134 kmem_free(component, MAXPATHLEN);
14819 14135 }
14820 14136
14821 14137 return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
14822 14138 }
14823 14139
14824 14140 static int
14825 14141 mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq,
14826 14142 char *guid, dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
14827 14143 {
14828 14144 int target;
14829 14145 int rval;
14830 14146 int ndi_rtn = NDI_FAILURE;
14831 14147 uint64_t be_sas_wwn;
14832 14148 char *nodename = NULL;
14833 14149 char **compatible = NULL;
14834 14150 int ncompatible = 0;
14835 14151 int instance = 0;
14836 14152 mptsas_t *mpt = DIP2MPT(pdip);
14837 14153 char *wwn_str = NULL;
14838 14154 char *component = NULL;
14839 14155 char *attached_wwn_str = NULL;
14840 14156 uint8_t phy = 0xFF;
14841 14157 uint64_t sas_wwn;
14842 14158 uint32_t devinfo;
14843 14159 uint16_t dev_hdl;
14844 14160 uint16_t pdev_hdl;
14845 14161 uint64_t pdev_sas_wwn;
14846 14162 uint64_t dev_sas_wwn;
14847 14163 uint32_t pdev_info;
14848 14164 uint8_t physport;
14849 14165 uint8_t phy_id;
14850 14166 uint32_t page_address;
14851 14167 uint16_t bay_num, enclosure;
14852 14168 char pdev_wwn_str[MPTSAS_WWN_STRLEN];
14853 14169 uint32_t dev_info;
14854 14170 int64_t lun64 = 0;
14855 14171
14856 14172 mutex_enter(&mpt->m_mutex);
14857 14173 target = ptgt->m_devhdl;
14858 14174 sas_wwn = ptgt->m_sas_wwn;
14859 14175 devinfo = ptgt->m_deviceinfo;
14860 14176 phy = ptgt->m_phynum;
14861 14177 mutex_exit(&mpt->m_mutex);
14862 14178
14863 14179 /*
14864 14180 * generate compatible property with binding-set "mpt"
14865 14181 */
14866 14182 scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL,
14867 14183 &nodename, &compatible, &ncompatible);
14868 14184
14869 14185 /*
14870 14186 * if nodename can't be determined then print a message and skip it
14871 14187 */
14872 14188 if (nodename == NULL) {
14873 14189 mptsas_log(mpt, CE_WARN, "mptsas found no compatible driver "
14874 14190 "for target %d lun %d", target, lun);
14875 14191 return (DDI_FAILURE);
14876 14192 }
14877 14193
14878 14194 ndi_rtn = ndi_devi_alloc(pdip, nodename,
14879 14195 DEVI_SID_NODEID, lun_dip);
14880 14196
14881 14197 /*
14882 14198 * if lun alloc success, set props
14883 14199 */
14884 14200 if (ndi_rtn == NDI_SUCCESS) {
14885 14201
14886 14202 if (ndi_prop_update_int(DDI_DEV_T_NONE,
14887 14203 *lun_dip, LUN_PROP, lun) !=
14888 14204 DDI_PROP_SUCCESS) {
14889 14205 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14890 14206 "property for target %d lun %d (LUN_PROP)",
14891 14207 target, lun);
14892 14208 ndi_rtn = NDI_FAILURE;
14893 14209 goto phys_create_done;
14894 14210 }
14895 14211
14896 14212 lun64 = (int64_t)lun;
14897 14213 if (ndi_prop_update_int64(DDI_DEV_T_NONE,
14898 14214 *lun_dip, LUN64_PROP, lun64) !=
14899 14215 DDI_PROP_SUCCESS) {
14900 14216 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14901 14217 "property for target %d lun64 %d (LUN64_PROP)",
14902 14218 target, lun);
14903 14219 ndi_rtn = NDI_FAILURE;
14904 14220 goto phys_create_done;
14905 14221 }
14906 14222 if (ndi_prop_update_string_array(DDI_DEV_T_NONE,
14907 14223 *lun_dip, "compatible", compatible, ncompatible)
14908 14224 != DDI_PROP_SUCCESS) {
14909 14225 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
14910 14226 "property for target %d lun %d (COMPATIBLE)",
14911 14227 target, lun);
14912 14228 ndi_rtn = NDI_FAILURE;
14913 14229 goto phys_create_done;
14914 14230 }
14915 14231
14916 14232 /*
14917 14233 * We need the SAS WWN for non-multipath devices, so
14918 14234 * we'll use the same property as that multipathing
14919 14235 * devices need to present for MPAPI. If we don't have
14920 14236 * a WWN (e.g. parallel SCSI), don't create the prop.
14921 14237 */
14922 14238 wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
14923 14239 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
14924 14240 if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE,
14925 14241 *lun_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str)
14926 14242 != DDI_PROP_SUCCESS) {
14927 14243 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14928 14244 "create property for SAS target %d lun %d "
14929 14245 "(target-port)", target, lun);
14930 14246 ndi_rtn = NDI_FAILURE;
14931 14247 goto phys_create_done;
14932 14248 }
14933 14249
14934 14250 be_sas_wwn = BE_64(sas_wwn);
14935 14251 if (sas_wwn && ndi_prop_update_byte_array(
14936 14252 DDI_DEV_T_NONE, *lun_dip, "port-wwn",
14937 14253 (uchar_t *)&be_sas_wwn, 8) != DDI_PROP_SUCCESS) {
14938 14254 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14939 14255 "create property for SAS target %d lun %d "
14940 14256 "(port-wwn)", target, lun);
14941 14257 ndi_rtn = NDI_FAILURE;
14942 14258 goto phys_create_done;
14943 14259 } else if ((sas_wwn == 0) && (ndi_prop_update_int(
14944 14260 DDI_DEV_T_NONE, *lun_dip, "sata-phy", phy) !=
14945 14261 DDI_PROP_SUCCESS)) {
14946 14262 /*
14947 14263 * Direct attached SATA device without DeviceName
14948 14264 */
14949 14265 mptsas_log(mpt, CE_WARN, "mptsas unable to "
14950 14266 "create property for SAS target %d lun %d "
14951 14267 "(sata-phy)", target, lun);
14952 14268 ndi_rtn = NDI_FAILURE;
14953 14269 goto phys_create_done;
14954 14270 }
14955 14271
14956 14272 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
14957 14273 *lun_dip, SAS_PROP) != DDI_PROP_SUCCESS) {
14958 14274 mptsas_log(mpt, CE_WARN, "mptsas unable to"
14959 14275 "create property for SAS target %d lun %d"
14960 14276 " (SAS_PROP)", target, lun);
14961 14277 ndi_rtn = NDI_FAILURE;
14962 14278 goto phys_create_done;
14963 14279 }
14964 14280 if (guid && (ndi_prop_update_string(DDI_DEV_T_NONE,
14965 14281 *lun_dip, NDI_GUID, guid) != DDI_SUCCESS)) {
14966 14282 mptsas_log(mpt, CE_WARN, "mptsas unable "
14967 14283 "to create guid property for target %d "
14968 14284 "lun %d", target, lun);
14969 14285 ndi_rtn = NDI_FAILURE;
14970 14286 goto phys_create_done;
14971 14287 }
14972 14288
14973 14289 /*
14974 14290 * The following code is to set properties for SM-HBA support,
14975 14291 * it doesn't apply to RAID volumes
14976 14292 */
14977 14293 if (ptgt->m_phymask == 0)
14978 14294 goto phys_raid_lun;
14979 14295
14980 14296 mutex_enter(&mpt->m_mutex);
14981 14297
14982 14298 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14983 14299 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14984 14300 (uint32_t)ptgt->m_devhdl;
14985 14301 rval = mptsas_get_sas_device_page0(mpt, page_address,
14986 14302 &dev_hdl, &dev_sas_wwn, &dev_info,
14987 14303 &physport, &phy_id, &pdev_hdl,
14988 14304 &bay_num, &enclosure);
14989 14305 if (rval != DDI_SUCCESS) {
14990 14306 mutex_exit(&mpt->m_mutex);
14991 14307 mptsas_log(mpt, CE_WARN, "mptsas unable to get"
14992 14308 "parent device for handle %d.", page_address);
14993 14309 ndi_rtn = NDI_FAILURE;
14994 14310 goto phys_create_done;
14995 14311 }
14996 14312
14997 14313 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
14998 14314 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
14999 14315 rval = mptsas_get_sas_device_page0(mpt, page_address,
15000 14316 &dev_hdl, &pdev_sas_wwn, &pdev_info,
15001 14317 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
15002 14318 if (rval != DDI_SUCCESS) {
15003 14319 mutex_exit(&mpt->m_mutex);
15004 14320 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15005 14321 "device for handle %d.", page_address);
15006 14322 ndi_rtn = NDI_FAILURE;
15007 14323 goto phys_create_done;
15008 14324 }
15009 14325
15010 14326 mutex_exit(&mpt->m_mutex);
15011 14327
15012 14328 /*
15013 14329 * If this device direct attached to the controller
15014 14330 * set the attached-port to the base wwid
15015 14331 */
15016 14332 if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15017 14333 != DEVINFO_DIRECT_ATTACHED) {
15018 14334 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15019 14335 pdev_sas_wwn);
15020 14336 } else {
15021 14337 /*
15022 14338 * Update the iport's attached-port to guid
15023 14339 */
15024 14340 if (sas_wwn == 0) {
15025 14341 (void) sprintf(wwn_str, "p%x", phy);
15026 14342 } else {
15027 14343 (void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15028 14344 }
15029 14345 if (ddi_prop_update_string(DDI_DEV_T_NONE,
15030 14346 pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15031 14347 DDI_PROP_SUCCESS) {
15032 14348 mptsas_log(mpt, CE_WARN,
15033 14349 "mptsas unable to create "
15034 14350 "property for iport target-port"
15035 14351 " %s (sas_wwn)",
15036 14352 wwn_str);
15037 14353 ndi_rtn = NDI_FAILURE;
15038 14354 goto phys_create_done;
15039 14355 }
15040 14356
15041 14357 (void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15042 14358 mpt->un.m_base_wwid);
15043 14359 }
15044 14360
15045 14361 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15046 14362 *lun_dip, SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
15047 14363 DDI_PROP_SUCCESS) {
15048 14364 mptsas_log(mpt, CE_WARN,
15049 14365 "mptsas unable to create "
15050 14366 "property for iport attached-port %s (sas_wwn)",
15051 14367 attached_wwn_str);
15052 14368 ndi_rtn = NDI_FAILURE;
15053 14369 goto phys_create_done;
15054 14370 }
15055 14371
15056 14372 if (IS_SATA_DEVICE(dev_info)) {
15057 14373 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15058 14374 *lun_dip, MPTSAS_VARIANT, "sata") !=
15059 14375 DDI_PROP_SUCCESS) {
15060 14376 mptsas_log(mpt, CE_WARN,
15061 14377 "mptsas unable to create "
15062 14378 "property for device variant ");
15063 14379 ndi_rtn = NDI_FAILURE;
15064 14380 goto phys_create_done;
15065 14381 }
15066 14382 }
15067 14383
15068 14384 if (IS_ATAPI_DEVICE(dev_info)) {
15069 14385 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15070 14386 *lun_dip, MPTSAS_VARIANT, "atapi") !=
15071 14387 DDI_PROP_SUCCESS) {
15072 14388 mptsas_log(mpt, CE_WARN,
15073 14389 "mptsas unable to create "
15074 14390 "property for device variant ");
15075 14391 ndi_rtn = NDI_FAILURE;
15076 14392 goto phys_create_done;
15077 14393 }
15078 14394 }
15079 14395
15080 14396 phys_raid_lun:
15081 14397 /*
15082 14398 * if this is a SAS controller, and the target is a SATA
15083 14399 * drive, set the 'pm-capable' property for sd and if on
15084 14400 * an OPL platform, also check if this is an ATAPI
15085 14401 * device.
15086 14402 */
15087 14403 instance = ddi_get_instance(mpt->m_dip);
15088 14404 if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
15089 14405 MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
15090 14406 NDBG2(("mptsas%d: creating pm-capable property, "
15091 14407 "target %d", instance, target));
15092 14408
15093 14409 if ((ndi_prop_update_int(DDI_DEV_T_NONE,
15094 14410 *lun_dip, "pm-capable", 1)) !=
15095 14411 DDI_PROP_SUCCESS) {
15096 14412 mptsas_log(mpt, CE_WARN, "mptsas "
15097 14413 "failed to create pm-capable "
15098 14414 "property, target %d", target);
15099 14415 ndi_rtn = NDI_FAILURE;
15100 14416 goto phys_create_done;
15101 14417 }
15102 14418
15103 14419 }
15104 14420
15105 14421 if ((inq->inq_dtype == 0) || (inq->inq_dtype == 5)) {
15106 14422 /*
15107 14423 * add 'obp-path' properties for devinfo
15108 14424 */
15109 14425 bzero(wwn_str, sizeof (wwn_str));
15110 14426 (void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
15111 14427 component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
15112 14428 if (guid) {
15113 14429 (void) snprintf(component, MAXPATHLEN,
15114 14430 "disk@w%s,%x", wwn_str, lun);
15115 14431 } else {
15116 14432 (void) snprintf(component, MAXPATHLEN,
15117 14433 "disk@p%x,%x", phy, lun);
15118 14434 }
15119 14435 if (ddi_pathname_obp_set(*lun_dip, component)
15120 14436 != DDI_SUCCESS) {
15121 14437 mptsas_log(mpt, CE_WARN, "mpt_sas driver "
15122 14438 "unable to set obp-path for SAS "
15123 14439 "object %s", component);
15124 14440 ndi_rtn = NDI_FAILURE;
15125 14441 goto phys_create_done;
15126 14442 }
15127 14443 }
15128 14444 /*
15129 14445 * Create the phy-num property for non-raid disk
15130 14446 */
15131 14447 if (ptgt->m_phymask != 0) {
15132 14448 if (ndi_prop_update_int(DDI_DEV_T_NONE,
15133 14449 *lun_dip, "phy-num", ptgt->m_phynum) !=
15134 14450 DDI_PROP_SUCCESS) {
15135 14451 mptsas_log(mpt, CE_WARN, "mptsas driver "
15136 14452 "failed to create phy-num property for "
15137 14453 "target %d", target);
15138 14454 ndi_rtn = NDI_FAILURE;
15139 14455 goto phys_create_done;
15140 14456 }
15141 14457 }
↓ open down ↓ |
333 lines elided |
↑ open up ↑ |
15142 14458 phys_create_done:
15143 14459 /*
15144 14460 * If props were setup ok, online the lun
15145 14461 */
15146 14462 if (ndi_rtn == NDI_SUCCESS) {
15147 14463 /*
15148 14464 * Try to online the new node
15149 14465 */
15150 14466 ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
15151 14467 }
15152 - if (ndi_rtn == NDI_SUCCESS) {
15153 - mutex_enter(&mpt->m_mutex);
15154 - if (mptsas_set_led_status(mpt, ptgt, 0) !=
15155 - DDI_SUCCESS) {
15156 - NDBG14(("mptsas: clear LED for tgt %x "
15157 - "failed", ptgt->m_slot_num));
15158 - }
15159 - mutex_exit(&mpt->m_mutex);
15160 - }
15161 14468
15162 14469 /*
15163 14470 * If success set rtn flag, else unwire alloc'd lun
15164 14471 */
15165 14472 if (ndi_rtn != NDI_SUCCESS) {
15166 14473 NDBG12(("mptsas driver unable to online "
15167 14474 "target %d lun %d", target, lun));
15168 14475 ndi_prop_remove_all(*lun_dip);
15169 14476 (void) ndi_devi_free(*lun_dip);
15170 14477 *lun_dip = NULL;
15171 14478 }
15172 14479 }
15173 14480
15174 14481 scsi_hba_nodename_compatible_free(nodename, compatible);
15175 14482
15176 14483 if (wwn_str != NULL) {
15177 14484 kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
15178 14485 }
15179 14486 if (component != NULL) {
15180 14487 kmem_free(component, MAXPATHLEN);
15181 14488 }
15182 14489
15183 14490
15184 14491 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15185 14492 }
15186 14493
15187 14494 static int
15188 14495 mptsas_probe_smp(dev_info_t *pdip, uint64_t wwn)
15189 14496 {
15190 14497 mptsas_t *mpt = DIP2MPT(pdip);
15191 14498 struct smp_device smp_sd;
15192 14499
15193 14500 /* XXX An HBA driver should not be allocating an smp_device. */
15194 14501 bzero(&smp_sd, sizeof (struct smp_device));
15195 14502 smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran;
15196 14503 bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE);
15197 14504
15198 14505 if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS)
15199 14506 return (NDI_FAILURE);
15200 14507 return (NDI_SUCCESS);
15201 14508 }
15202 14509
15203 14510 static int
15204 14511 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
15205 14512 {
15206 14513 mptsas_t *mpt = DIP2MPT(pdip);
15207 14514 mptsas_smp_t *psmp = NULL;
15208 14515 int rval;
15209 14516 int phymask;
15210 14517
15211 14518 /*
15212 14519 * Get the physical port associated to the iport
15213 14520 * PHYMASK TODO
15214 14521 */
15215 14522 phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
15216 14523 "phymask", 0);
15217 14524 /*
15218 14525 * Find the smp node in hash table with specified sas address and
15219 14526 * physical port
15220 14527 */
15221 14528 psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
15222 14529 if (psmp == NULL) {
15223 14530 return (DDI_FAILURE);
15224 14531 }
15225 14532
15226 14533 rval = mptsas_online_smp(pdip, psmp, smp_dip);
15227 14534
15228 14535 return (rval);
15229 14536 }
15230 14537
15231 14538 static int
15232 14539 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
15233 14540 dev_info_t **smp_dip)
15234 14541 {
15235 14542 char wwn_str[MPTSAS_WWN_STRLEN];
15236 14543 char attached_wwn_str[MPTSAS_WWN_STRLEN];
15237 14544 int ndi_rtn = NDI_FAILURE;
15238 14545 int rval = 0;
15239 14546 mptsas_smp_t dev_info;
15240 14547 uint32_t page_address;
15241 14548 mptsas_t *mpt = DIP2MPT(pdip);
15242 14549 uint16_t dev_hdl;
15243 14550 uint64_t sas_wwn;
15244 14551 uint64_t smp_sas_wwn;
15245 14552 uint8_t physport;
15246 14553 uint8_t phy_id;
15247 14554 uint16_t pdev_hdl;
15248 14555 uint8_t numphys = 0;
15249 14556 uint16_t i = 0;
15250 14557 char phymask[MPTSAS_MAX_PHYS];
15251 14558 char *iport = NULL;
15252 14559 mptsas_phymask_t phy_mask = 0;
15253 14560 uint16_t attached_devhdl;
15254 14561 uint16_t bay_num, enclosure;
15255 14562
15256 14563 (void) sprintf(wwn_str, "%"PRIx64, smp_node->m_sasaddr);
15257 14564
15258 14565 /*
15259 14566 * Probe smp device, prevent the node of removed device from being
15260 14567 * configured succesfully
15261 14568 */
15262 14569 if (mptsas_probe_smp(pdip, smp_node->m_sasaddr) != NDI_SUCCESS) {
15263 14570 return (DDI_FAILURE);
15264 14571 }
15265 14572
15266 14573 if ((*smp_dip = mptsas_find_smp_child(pdip, wwn_str)) != NULL) {
15267 14574 return (DDI_SUCCESS);
15268 14575 }
15269 14576
15270 14577 ndi_rtn = ndi_devi_alloc(pdip, "smp", DEVI_SID_NODEID, smp_dip);
15271 14578
15272 14579 /*
15273 14580 * if lun alloc success, set props
15274 14581 */
15275 14582 if (ndi_rtn == NDI_SUCCESS) {
15276 14583 /*
15277 14584 * Set the flavor of the child to be SMP flavored
15278 14585 */
15279 14586 ndi_flavor_set(*smp_dip, SCSA_FLAVOR_SMP);
15280 14587
15281 14588 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15282 14589 *smp_dip, SMP_WWN, wwn_str) !=
15283 14590 DDI_PROP_SUCCESS) {
15284 14591 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15285 14592 "property for smp device %s (sas_wwn)",
15286 14593 wwn_str);
15287 14594 ndi_rtn = NDI_FAILURE;
15288 14595 goto smp_create_done;
15289 14596 }
15290 14597 (void) sprintf(wwn_str, "w%"PRIx64, smp_node->m_sasaddr);
15291 14598 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15292 14599 *smp_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str) !=
15293 14600 DDI_PROP_SUCCESS) {
15294 14601 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15295 14602 "property for iport target-port %s (sas_wwn)",
15296 14603 wwn_str);
15297 14604 ndi_rtn = NDI_FAILURE;
15298 14605 goto smp_create_done;
15299 14606 }
15300 14607
15301 14608 mutex_enter(&mpt->m_mutex);
15302 14609
15303 14610 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
15304 14611 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | smp_node->m_devhdl;
15305 14612 rval = mptsas_get_sas_expander_page0(mpt, page_address,
15306 14613 &dev_info);
15307 14614 if (rval != DDI_SUCCESS) {
15308 14615 mutex_exit(&mpt->m_mutex);
15309 14616 mptsas_log(mpt, CE_WARN,
15310 14617 "mptsas unable to get expander "
15311 14618 "parent device info for %x", page_address);
15312 14619 ndi_rtn = NDI_FAILURE;
15313 14620 goto smp_create_done;
15314 14621 }
15315 14622
15316 14623 smp_node->m_pdevhdl = dev_info.m_pdevhdl;
15317 14624 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15318 14625 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15319 14626 (uint32_t)dev_info.m_pdevhdl;
15320 14627 rval = mptsas_get_sas_device_page0(mpt, page_address,
15321 14628 &dev_hdl, &sas_wwn, &smp_node->m_pdevinfo,
15322 14629 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
15323 14630 if (rval != DDI_SUCCESS) {
15324 14631 mutex_exit(&mpt->m_mutex);
15325 14632 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15326 14633 "device info for %x", page_address);
15327 14634 ndi_rtn = NDI_FAILURE;
15328 14635 goto smp_create_done;
15329 14636 }
15330 14637
15331 14638 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15332 14639 MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15333 14640 (uint32_t)dev_info.m_devhdl;
15334 14641 rval = mptsas_get_sas_device_page0(mpt, page_address,
15335 14642 &dev_hdl, &smp_sas_wwn, &smp_node->m_deviceinfo,
15336 14643 &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure);
15337 14644 if (rval != DDI_SUCCESS) {
15338 14645 mutex_exit(&mpt->m_mutex);
15339 14646 mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15340 14647 "device info for %x", page_address);
15341 14648 ndi_rtn = NDI_FAILURE;
15342 14649 goto smp_create_done;
15343 14650 }
15344 14651 mutex_exit(&mpt->m_mutex);
15345 14652
15346 14653 /*
15347 14654 * If this smp direct attached to the controller
15348 14655 * set the attached-port to the base wwid
15349 14656 */
15350 14657 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15351 14658 != DEVINFO_DIRECT_ATTACHED) {
15352 14659 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
15353 14660 sas_wwn);
15354 14661 } else {
15355 14662 (void) sprintf(attached_wwn_str, "w%016"PRIx64,
15356 14663 mpt->un.m_base_wwid);
15357 14664 }
15358 14665
15359 14666 if (ndi_prop_update_string(DDI_DEV_T_NONE,
15360 14667 *smp_dip, SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwn_str) !=
15361 14668 DDI_PROP_SUCCESS) {
15362 14669 mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15363 14670 "property for smp attached-port %s (sas_wwn)",
15364 14671 attached_wwn_str);
15365 14672 ndi_rtn = NDI_FAILURE;
15366 14673 goto smp_create_done;
15367 14674 }
15368 14675
15369 14676 if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15370 14677 *smp_dip, SMP_PROP) != DDI_PROP_SUCCESS) {
15371 14678 mptsas_log(mpt, CE_WARN, "mptsas unable to "
15372 14679 "create property for SMP %s (SMP_PROP) ",
15373 14680 wwn_str);
15374 14681 ndi_rtn = NDI_FAILURE;
15375 14682 goto smp_create_done;
15376 14683 }
15377 14684
15378 14685 /*
15379 14686 * check the smp to see whether it direct
15380 14687 * attached to the controller
15381 14688 */
15382 14689 if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15383 14690 != DEVINFO_DIRECT_ATTACHED) {
15384 14691 goto smp_create_done;
15385 14692 }
15386 14693 numphys = ddi_prop_get_int(DDI_DEV_T_ANY, pdip,
15387 14694 DDI_PROP_DONTPASS, MPTSAS_NUM_PHYS, -1);
15388 14695 if (numphys > 0) {
15389 14696 goto smp_create_done;
15390 14697 }
15391 14698 /*
15392 14699 * this iport is an old iport, we need to
15393 14700 * reconfig the props for it.
15394 14701 */
15395 14702 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
15396 14703 MPTSAS_VIRTUAL_PORT, 0) !=
15397 14704 DDI_PROP_SUCCESS) {
15398 14705 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
15399 14706 MPTSAS_VIRTUAL_PORT);
15400 14707 mptsas_log(mpt, CE_WARN, "mptsas virtual port "
15401 14708 "prop update failed");
15402 14709 goto smp_create_done;
15403 14710 }
15404 14711
15405 14712 mutex_enter(&mpt->m_mutex);
15406 14713 numphys = 0;
15407 14714 iport = ddi_get_name_addr(pdip);
15408 14715 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15409 14716 bzero(phymask, sizeof (phymask));
15410 14717 (void) sprintf(phymask,
15411 14718 "%x", mpt->m_phy_info[i].phy_mask);
15412 14719 if (strcmp(phymask, iport) == 0) {
15413 14720 phy_mask = mpt->m_phy_info[i].phy_mask;
15414 14721 break;
15415 14722 }
15416 14723 }
15417 14724
15418 14725 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15419 14726 if ((phy_mask >> i) & 0x01) {
15420 14727 numphys++;
15421 14728 }
15422 14729 }
15423 14730 /*
15424 14731 * Update PHY info for smhba
15425 14732 */
15426 14733 if (mptsas_smhba_phy_init(mpt)) {
15427 14734 mutex_exit(&mpt->m_mutex);
15428 14735 mptsas_log(mpt, CE_WARN, "mptsas phy update "
15429 14736 "failed");
15430 14737 goto smp_create_done;
15431 14738 }
15432 14739 mutex_exit(&mpt->m_mutex);
15433 14740
15434 14741 mptsas_smhba_set_phy_props(mpt, iport, pdip,
15435 14742 numphys, &attached_devhdl);
15436 14743
15437 14744 if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
15438 14745 MPTSAS_NUM_PHYS, numphys) !=
15439 14746 DDI_PROP_SUCCESS) {
15440 14747 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
15441 14748 MPTSAS_NUM_PHYS);
15442 14749 mptsas_log(mpt, CE_WARN, "mptsas update "
15443 14750 "num phys props failed");
15444 14751 goto smp_create_done;
15445 14752 }
15446 14753 /*
15447 14754 * Add parent's props for SMHBA support
15448 14755 */
15449 14756 if (ddi_prop_update_string(DDI_DEV_T_NONE, pdip,
15450 14757 SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15451 14758 DDI_PROP_SUCCESS) {
15452 14759 (void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
15453 14760 SCSI_ADDR_PROP_ATTACHED_PORT);
15454 14761 mptsas_log(mpt, CE_WARN, "mptsas update iport"
15455 14762 "attached-port failed");
15456 14763 goto smp_create_done;
15457 14764 }
15458 14765
15459 14766 smp_create_done:
15460 14767 /*
15461 14768 * If props were setup ok, online the lun
15462 14769 */
15463 14770 if (ndi_rtn == NDI_SUCCESS) {
15464 14771 /*
15465 14772 * Try to online the new node
15466 14773 */
15467 14774 ndi_rtn = ndi_devi_online(*smp_dip, NDI_ONLINE_ATTACH);
15468 14775 }
15469 14776
15470 14777 /*
15471 14778 * If success set rtn flag, else unwire alloc'd lun
15472 14779 */
15473 14780 if (ndi_rtn != NDI_SUCCESS) {
15474 14781 NDBG12(("mptsas unable to online "
15475 14782 "SMP target %s", wwn_str));
15476 14783 ndi_prop_remove_all(*smp_dip);
15477 14784 (void) ndi_devi_free(*smp_dip);
15478 14785 }
15479 14786 }
15480 14787
15481 14788 return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15482 14789 }
15483 14790
15484 14791 /* smp transport routine */
15485 14792 static int mptsas_smp_start(struct smp_pkt *smp_pkt)
15486 14793 {
15487 14794 uint64_t wwn;
15488 14795 Mpi2SmpPassthroughRequest_t req;
15489 14796 Mpi2SmpPassthroughReply_t rep;
15490 14797 uint32_t direction = 0;
15491 14798 mptsas_t *mpt;
15492 14799 int ret;
15493 14800 uint64_t tmp64;
15494 14801
15495 14802 mpt = (mptsas_t *)smp_pkt->smp_pkt_address->
15496 14803 smp_a_hba_tran->smp_tran_hba_private;
15497 14804
15498 14805 bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
15499 14806 /*
15500 14807 * Need to compose a SMP request message
15501 14808 * and call mptsas_do_passthru() function
15502 14809 */
15503 14810 bzero(&req, sizeof (req));
15504 14811 bzero(&rep, sizeof (rep));
15505 14812 req.PassthroughFlags = 0;
15506 14813 req.PhysicalPort = 0xff;
15507 14814 req.ChainOffset = 0;
15508 14815 req.Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
15509 14816
15510 14817 if ((smp_pkt->smp_pkt_reqsize & 0xffff0000ul) != 0) {
15511 14818 smp_pkt->smp_pkt_reason = ERANGE;
15512 14819 return (DDI_FAILURE);
15513 14820 }
15514 14821 req.RequestDataLength = LE_16((uint16_t)(smp_pkt->smp_pkt_reqsize - 4));
15515 14822
15516 14823 req.MsgFlags = 0;
15517 14824 tmp64 = LE_64(wwn);
15518 14825 bcopy(&tmp64, &req.SASAddress, SAS_WWN_BYTE_SIZE);
15519 14826 if (smp_pkt->smp_pkt_rspsize > 0) {
15520 14827 direction |= MPTSAS_PASS_THRU_DIRECTION_READ;
15521 14828 }
15522 14829 if (smp_pkt->smp_pkt_reqsize > 0) {
15523 14830 direction |= MPTSAS_PASS_THRU_DIRECTION_WRITE;
15524 14831 }
15525 14832
15526 14833 mutex_enter(&mpt->m_mutex);
15527 14834 ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep,
15528 14835 (uint8_t *)smp_pkt->smp_pkt_rsp,
15529 14836 offsetof(Mpi2SmpPassthroughRequest_t, SGL), sizeof (rep),
15530 14837 smp_pkt->smp_pkt_rspsize - 4, direction,
15531 14838 (uint8_t *)smp_pkt->smp_pkt_req, smp_pkt->smp_pkt_reqsize - 4,
15532 14839 smp_pkt->smp_pkt_timeout, FKIOCTL);
15533 14840 mutex_exit(&mpt->m_mutex);
15534 14841 if (ret != 0) {
15535 14842 cmn_err(CE_WARN, "smp_start do passthru error %d", ret);
15536 14843 smp_pkt->smp_pkt_reason = (uchar_t)(ret);
15537 14844 return (DDI_FAILURE);
15538 14845 }
15539 14846 /* do passthrough success, check the smp status */
15540 14847 if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
15541 14848 switch (LE_16(rep.IOCStatus)) {
15542 14849 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
15543 14850 smp_pkt->smp_pkt_reason = ENODEV;
15544 14851 break;
15545 14852 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
15546 14853 smp_pkt->smp_pkt_reason = EOVERFLOW;
15547 14854 break;
15548 14855 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
15549 14856 smp_pkt->smp_pkt_reason = EIO;
15550 14857 break;
15551 14858 default:
15552 14859 mptsas_log(mpt, CE_NOTE, "smp_start: get unknown ioc"
15553 14860 "status:%x", LE_16(rep.IOCStatus));
15554 14861 smp_pkt->smp_pkt_reason = EIO;
15555 14862 break;
15556 14863 }
15557 14864 return (DDI_FAILURE);
15558 14865 }
15559 14866 if (rep.SASStatus != MPI2_SASSTATUS_SUCCESS) {
15560 14867 mptsas_log(mpt, CE_NOTE, "smp_start: get error SAS status:%x",
15561 14868 rep.SASStatus);
15562 14869 smp_pkt->smp_pkt_reason = EIO;
15563 14870 return (DDI_FAILURE);
15564 14871 }
15565 14872
15566 14873 return (DDI_SUCCESS);
15567 14874 }
15568 14875
15569 14876 /*
15570 14877 * If we didn't get a match, we need to get sas page0 for each device, and
15571 14878 * untill we get a match. If failed, return NULL
15572 14879 */
15573 14880 static mptsas_target_t *
15574 14881 mptsas_phy_to_tgt(mptsas_t *mpt, int phymask, uint8_t phy)
15575 14882 {
15576 14883 int i, j = 0;
15577 14884 int rval = 0;
15578 14885 uint16_t cur_handle;
15579 14886 uint32_t page_address;
15580 14887 mptsas_target_t *ptgt = NULL;
15581 14888
15582 14889 /*
15583 14890 * PHY named device must be direct attached and attaches to
15584 14891 * narrow port, if the iport is not parent of the device which
15585 14892 * we are looking for.
15586 14893 */
15587 14894 for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
15588 14895 if ((1 << i) & phymask)
15589 14896 j++;
15590 14897 }
15591 14898
15592 14899 if (j > 1)
15593 14900 return (NULL);
15594 14901
15595 14902 /*
15596 14903 * Must be a narrow port and single device attached to the narrow port
15597 14904 * So the physical port num of device which is equal to the iport's
15598 14905 * port num is the device what we are looking for.
15599 14906 */
15600 14907
15601 14908 if (mpt->m_phy_info[phy].phy_mask != phymask)
15602 14909 return (NULL);
15603 14910
15604 14911 mutex_enter(&mpt->m_mutex);
15605 14912
15606 14913 ptgt = (mptsas_target_t *)mptsas_hash_traverse(&mpt->m_active->m_tgttbl,
15607 14914 MPTSAS_HASH_FIRST);
15608 14915 while (ptgt != NULL) {
15609 14916 if ((ptgt->m_sas_wwn == 0) && (ptgt->m_phynum == phy)) {
15610 14917 mutex_exit(&mpt->m_mutex);
15611 14918 return (ptgt);
15612 14919 }
15613 14920
15614 14921 ptgt = (mptsas_target_t *)mptsas_hash_traverse(
15615 14922 &mpt->m_active->m_tgttbl, MPTSAS_HASH_NEXT);
15616 14923 }
15617 14924
15618 14925 if (mpt->m_done_traverse_dev) {
15619 14926 mutex_exit(&mpt->m_mutex);
15620 14927 return (NULL);
15621 14928 }
15622 14929
15623 14930 /* If didn't get a match, come here */
15624 14931 cur_handle = mpt->m_dev_handle;
15625 14932 for (; ; ) {
15626 14933 ptgt = NULL;
15627 14934 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
15628 14935 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)cur_handle;
15629 14936 rval = mptsas_get_target_device_info(mpt, page_address,
15630 14937 &cur_handle, &ptgt);
15631 14938 if ((rval == DEV_INFO_FAIL_PAGE0) ||
15632 14939 (rval == DEV_INFO_FAIL_ALLOC)) {
15633 14940 break;
15634 14941 }
15635 14942 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
15636 14943 (rval == DEV_INFO_PHYS_DISK)) {
15637 14944 continue;
15638 14945 }
15639 14946 mpt->m_dev_handle = cur_handle;
15640 14947
15641 14948 if ((ptgt->m_sas_wwn == 0) && (ptgt->m_phynum == phy)) {
15642 14949 break;
15643 14950 }
15644 14951 }
15645 14952
15646 14953 mutex_exit(&mpt->m_mutex);
15647 14954 return (ptgt);
15648 14955 }
15649 14956
15650 14957 /*
15651 14958 * The ptgt->m_sas_wwn contains the wwid for each disk.
15652 14959 * For Raid volumes, we need to check m_raidvol[x].m_raidwwid
15653 14960 * If we didn't get a match, we need to get sas page0 for each device, and
15654 14961 * untill we get a match
15655 14962 * If failed, return NULL
15656 14963 */
15657 14964 static mptsas_target_t *
15658 14965 mptsas_wwid_to_ptgt(mptsas_t *mpt, int phymask, uint64_t wwid)
15659 14966 {
15660 14967 int rval = 0;
15661 14968 uint16_t cur_handle;
15662 14969 uint32_t page_address;
15663 14970 mptsas_target_t *tmp_tgt = NULL;
15664 14971
15665 14972 mutex_enter(&mpt->m_mutex);
15666 14973 tmp_tgt = (struct mptsas_target *)mptsas_hash_search(
15667 14974 &mpt->m_active->m_tgttbl, wwid, phymask);
15668 14975 if (tmp_tgt != NULL) {
15669 14976 mutex_exit(&mpt->m_mutex);
15670 14977 return (tmp_tgt);
15671 14978 }
15672 14979
15673 14980 if (phymask == 0) {
15674 14981 /*
15675 14982 * It's IR volume
15676 14983 */
15677 14984 rval = mptsas_get_raid_info(mpt);
15678 14985 if (rval) {
15679 14986 tmp_tgt = (struct mptsas_target *)mptsas_hash_search(
15680 14987 &mpt->m_active->m_tgttbl, wwid, phymask);
15681 14988 }
15682 14989 mutex_exit(&mpt->m_mutex);
15683 14990 return (tmp_tgt);
15684 14991 }
15685 14992
15686 14993 if (mpt->m_done_traverse_dev) {
15687 14994 mutex_exit(&mpt->m_mutex);
15688 14995 return (NULL);
15689 14996 }
15690 14997
15691 14998 /* If didn't get a match, come here */
15692 14999 cur_handle = mpt->m_dev_handle;
15693 15000 for (; ; ) {
15694 15001 tmp_tgt = NULL;
15695 15002 page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
15696 15003 MPI2_SAS_DEVICE_PGAD_FORM_MASK) | cur_handle;
15697 15004 rval = mptsas_get_target_device_info(mpt, page_address,
15698 15005 &cur_handle, &tmp_tgt);
15699 15006 if ((rval == DEV_INFO_FAIL_PAGE0) ||
15700 15007 (rval == DEV_INFO_FAIL_ALLOC)) {
15701 15008 tmp_tgt = NULL;
15702 15009 break;
15703 15010 }
15704 15011 if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
15705 15012 (rval == DEV_INFO_PHYS_DISK)) {
15706 15013 continue;
15707 15014 }
15708 15015 mpt->m_dev_handle = cur_handle;
15709 15016 if ((tmp_tgt->m_sas_wwn) && (tmp_tgt->m_sas_wwn == wwid) &&
15710 15017 (tmp_tgt->m_phymask == phymask)) {
15711 15018 break;
15712 15019 }
15713 15020 }
15714 15021
15715 15022 mutex_exit(&mpt->m_mutex);
15716 15023 return (tmp_tgt);
15717 15024 }
15718 15025
15719 15026 static mptsas_smp_t *
15720 15027 mptsas_wwid_to_psmp(mptsas_t *mpt, int phymask, uint64_t wwid)
15721 15028 {
15722 15029 int rval = 0;
15723 15030 uint16_t cur_handle;
15724 15031 uint32_t page_address;
15725 15032 mptsas_smp_t smp_node, *psmp = NULL;
15726 15033
15727 15034 mutex_enter(&mpt->m_mutex);
15728 15035 psmp = (struct mptsas_smp *)mptsas_hash_search(&mpt->m_active->m_smptbl,
15729 15036 wwid, phymask);
15730 15037 if (psmp != NULL) {
15731 15038 mutex_exit(&mpt->m_mutex);
15732 15039 return (psmp);
15733 15040 }
15734 15041
15735 15042 if (mpt->m_done_traverse_smp) {
15736 15043 mutex_exit(&mpt->m_mutex);
15737 15044 return (NULL);
15738 15045 }
15739 15046
15740 15047 /* If didn't get a match, come here */
15741 15048 cur_handle = mpt->m_smp_devhdl;
15742 15049 for (; ; ) {
15743 15050 psmp = NULL;
15744 15051 page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
15745 15052 MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)cur_handle;
15746 15053 rval = mptsas_get_sas_expander_page0(mpt, page_address,
15747 15054 &smp_node);
15748 15055 if (rval != DDI_SUCCESS) {
15749 15056 break;
15750 15057 }
15751 15058 mpt->m_smp_devhdl = cur_handle = smp_node.m_devhdl;
15752 15059 psmp = mptsas_smp_alloc(&mpt->m_active->m_smptbl, &smp_node);
15753 15060 ASSERT(psmp);
15754 15061 if ((psmp->m_sasaddr) && (psmp->m_sasaddr == wwid) &&
15755 15062 (psmp->m_phymask == phymask)) {
15756 15063 break;
15757 15064 }
15758 15065 }
15759 15066
15760 15067 mutex_exit(&mpt->m_mutex);
15761 15068 return (psmp);
15762 15069 }
15763 15070
15764 15071 /* helper functions using hash */
15765 15072
15766 15073 /*
15767 15074 * Can't have duplicate entries for same devhdl,
15768 15075 * if there are invalid entries, the devhdl should be set to 0xffff
15769 15076 */
15770 15077 static void *
15771 15078 mptsas_search_by_devhdl(mptsas_hash_table_t *hashtab, uint16_t devhdl)
15772 15079 {
15773 15080 mptsas_hash_data_t *data;
15774 15081
15775 15082 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_FIRST);
15776 15083 while (data != NULL) {
↓ open down ↓ |
606 lines elided |
↑ open up ↑ |
15777 15084 if (data->devhdl == devhdl) {
15778 15085 break;
15779 15086 }
15780 15087 data = mptsas_hash_traverse(hashtab, MPTSAS_HASH_NEXT);
15781 15088 }
15782 15089 return (data);
15783 15090 }
15784 15091
15785 15092 mptsas_target_t *
15786 15093 mptsas_tgt_alloc(mptsas_hash_table_t *hashtab, uint16_t devhdl, uint64_t wwid,
15787 - uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum, mptsas_t *mpt)
15094 + uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum)
15788 15095 {
15789 15096 mptsas_target_t *tmp_tgt = NULL;
15790 15097
15791 15098 tmp_tgt = mptsas_hash_search(hashtab, wwid, phymask);
15792 15099 if (tmp_tgt != NULL) {
15793 15100 NDBG20(("Hash item already exist"));
15794 15101 tmp_tgt->m_deviceinfo = devinfo;
15795 15102 tmp_tgt->m_devhdl = devhdl;
15796 15103 return (tmp_tgt);
15797 15104 }
15798 15105 tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
15799 15106 if (tmp_tgt == NULL) {
15800 15107 cmn_err(CE_WARN, "Fatal, allocated tgt failed");
15801 15108 return (NULL);
15802 15109 }
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
15803 15110 tmp_tgt->m_devhdl = devhdl;
15804 15111 tmp_tgt->m_sas_wwn = wwid;
15805 15112 tmp_tgt->m_deviceinfo = devinfo;
15806 15113 tmp_tgt->m_phymask = phymask;
15807 15114 tmp_tgt->m_phynum = phynum;
15808 15115 /* Initialized the tgt structure */
15809 15116 tmp_tgt->m_qfull_retries = QFULL_RETRIES;
15810 15117 tmp_tgt->m_qfull_retry_interval =
15811 15118 drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
15812 15119 tmp_tgt->m_t_throttle = MAX_THROTTLE;
15813 - mutex_init(&tmp_tgt->m_tgt_intr_mutex, NULL, MUTEX_DRIVER,
15814 - DDI_INTR_PRI(mpt->m_intr_pri));
15815 15120
15816 15121 mptsas_hash_add(hashtab, tmp_tgt);
15817 15122
15818 15123 return (tmp_tgt);
15819 15124 }
15820 15125
15821 15126 static void
15822 15127 mptsas_tgt_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
15823 15128 mptsas_phymask_t phymask)
15824 15129 {
15825 15130 mptsas_target_t *tmp_tgt;
15826 15131 tmp_tgt = mptsas_hash_rem(hashtab, wwid, phymask);
15827 15132 if (tmp_tgt == NULL) {
15828 15133 cmn_err(CE_WARN, "Tgt not found, nothing to free");
15829 15134 } else {
15830 - mutex_destroy(&tmp_tgt->m_tgt_intr_mutex);
15831 15135 kmem_free(tmp_tgt, sizeof (struct mptsas_target));
15832 15136 }
15833 15137 }
15834 15138
15835 15139 /*
15836 15140 * Return the entry in the hash table
15837 15141 */
15838 15142 static mptsas_smp_t *
15839 15143 mptsas_smp_alloc(mptsas_hash_table_t *hashtab, mptsas_smp_t *data)
15840 15144 {
15841 15145 uint64_t key1 = data->m_sasaddr;
15842 15146 mptsas_phymask_t key2 = data->m_phymask;
15843 15147 mptsas_smp_t *ret_data;
15844 15148
15845 15149 ret_data = mptsas_hash_search(hashtab, key1, key2);
15846 15150 if (ret_data != NULL) {
15847 15151 bcopy(data, ret_data, sizeof (mptsas_smp_t));
15848 15152 return (ret_data);
15849 15153 }
15850 15154
15851 15155 ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
15852 15156 bcopy(data, ret_data, sizeof (mptsas_smp_t));
15853 15157 mptsas_hash_add(hashtab, ret_data);
15854 15158 return (ret_data);
15855 15159 }
15856 15160
15857 15161 static void
15858 15162 mptsas_smp_free(mptsas_hash_table_t *hashtab, uint64_t wwid,
15859 15163 mptsas_phymask_t phymask)
15860 15164 {
15861 15165 mptsas_smp_t *tmp_smp;
15862 15166 tmp_smp = mptsas_hash_rem(hashtab, wwid, phymask);
15863 15167 if (tmp_smp == NULL) {
15864 15168 cmn_err(CE_WARN, "Smp element not found, nothing to free");
15865 15169 } else {
15866 15170 kmem_free(tmp_smp, sizeof (struct mptsas_smp));
15867 15171 }
15868 15172 }
15869 15173
15870 15174 /*
15871 15175 * Hash operation functions
15872 15176 * key1 is the sas_wwn, key2 is the phymask
15873 15177 */
15874 15178 static void
15875 15179 mptsas_hash_init(mptsas_hash_table_t *hashtab)
15876 15180 {
15877 15181 if (hashtab == NULL) {
15878 15182 return;
15879 15183 }
15880 15184 bzero(hashtab->head, sizeof (mptsas_hash_node_t) *
15881 15185 MPTSAS_HASH_ARRAY_SIZE);
15882 15186 hashtab->cur = NULL;
15883 15187 hashtab->line = 0;
15884 15188 }
15885 15189
15886 15190 static void
15887 15191 mptsas_hash_uninit(mptsas_hash_table_t *hashtab, size_t datalen)
15888 15192 {
15889 15193 uint16_t line = 0;
15890 15194 mptsas_hash_node_t *cur = NULL, *last = NULL;
15891 15195
15892 15196 if (hashtab == NULL) {
15893 15197 return;
15894 15198 }
15895 15199 for (line = 0; line < MPTSAS_HASH_ARRAY_SIZE; line++) {
15896 15200 cur = hashtab->head[line];
15897 15201 while (cur != NULL) {
15898 15202 last = cur;
15899 15203 cur = cur->next;
15900 15204 kmem_free(last->data, datalen);
15901 15205 kmem_free(last, sizeof (mptsas_hash_node_t));
15902 15206 }
15903 15207 }
15904 15208 }
15905 15209
15906 15210 /*
15907 15211 * You must guarantee the element doesn't exist in the hash table
15908 15212 * before you call mptsas_hash_add()
15909 15213 */
15910 15214 static void
15911 15215 mptsas_hash_add(mptsas_hash_table_t *hashtab, void *data)
15912 15216 {
15913 15217 uint64_t key1 = ((mptsas_hash_data_t *)data)->key1;
15914 15218 mptsas_phymask_t key2 = ((mptsas_hash_data_t *)data)->key2;
15915 15219 mptsas_hash_node_t **head = NULL;
15916 15220 mptsas_hash_node_t *node = NULL;
15917 15221
15918 15222 if (hashtab == NULL) {
15919 15223 return;
15920 15224 }
15921 15225 ASSERT(mptsas_hash_search(hashtab, key1, key2) == NULL);
15922 15226 node = kmem_zalloc(sizeof (mptsas_hash_node_t), KM_NOSLEEP);
15923 15227 node->data = data;
15924 15228
15925 15229 head = &(hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]);
15926 15230 if (*head == NULL) {
15927 15231 *head = node;
15928 15232 } else {
15929 15233 node->next = *head;
15930 15234 *head = node;
15931 15235 }
15932 15236 }
15933 15237
15934 15238 static void *
15935 15239 mptsas_hash_rem(mptsas_hash_table_t *hashtab, uint64_t key1,
15936 15240 mptsas_phymask_t key2)
15937 15241 {
15938 15242 mptsas_hash_node_t **head = NULL;
15939 15243 mptsas_hash_node_t *last = NULL, *cur = NULL;
15940 15244 mptsas_hash_data_t *data;
15941 15245 if (hashtab == NULL) {
15942 15246 return (NULL);
15943 15247 }
15944 15248 head = &(hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE]);
15945 15249 cur = *head;
15946 15250 while (cur != NULL) {
15947 15251 data = cur->data;
15948 15252 if ((data->key1 == key1) && (data->key2 == key2)) {
15949 15253 if (last == NULL) {
15950 15254 (*head) = cur->next;
15951 15255 } else {
15952 15256 last->next = cur->next;
15953 15257 }
15954 15258 kmem_free(cur, sizeof (mptsas_hash_node_t));
15955 15259 return (data);
15956 15260 } else {
15957 15261 last = cur;
15958 15262 cur = cur->next;
15959 15263 }
15960 15264 }
15961 15265 return (NULL);
15962 15266 }
15963 15267
15964 15268 static void *
15965 15269 mptsas_hash_search(mptsas_hash_table_t *hashtab, uint64_t key1,
15966 15270 mptsas_phymask_t key2)
15967 15271 {
15968 15272 mptsas_hash_node_t *cur = NULL;
15969 15273 mptsas_hash_data_t *data;
15970 15274 if (hashtab == NULL) {
15971 15275 return (NULL);
15972 15276 }
15973 15277 cur = hashtab->head[key1 % MPTSAS_HASH_ARRAY_SIZE];
15974 15278 while (cur != NULL) {
15975 15279 data = cur->data;
15976 15280 if ((data->key1 == key1) && (data->key2 == key2)) {
15977 15281 return (data);
15978 15282 } else {
15979 15283 cur = cur->next;
15980 15284 }
15981 15285 }
15982 15286 return (NULL);
15983 15287 }
15984 15288
15985 15289 static void *
15986 15290 mptsas_hash_traverse(mptsas_hash_table_t *hashtab, int pos)
15987 15291 {
15988 15292 mptsas_hash_node_t *this = NULL;
15989 15293
15990 15294 if (hashtab == NULL) {
15991 15295 return (NULL);
15992 15296 }
15993 15297
15994 15298 if (pos == MPTSAS_HASH_FIRST) {
15995 15299 hashtab->line = 0;
15996 15300 hashtab->cur = NULL;
15997 15301 this = hashtab->head[0];
15998 15302 } else {
15999 15303 if (hashtab->cur == NULL) {
16000 15304 return (NULL);
16001 15305 } else {
16002 15306 this = hashtab->cur->next;
16003 15307 }
16004 15308 }
16005 15309
16006 15310 while (this == NULL) {
16007 15311 hashtab->line++;
16008 15312 if (hashtab->line >= MPTSAS_HASH_ARRAY_SIZE) {
16009 15313 /* the traverse reaches the end */
16010 15314 hashtab->cur = NULL;
16011 15315 return (NULL);
16012 15316 } else {
16013 15317 this = hashtab->head[hashtab->line];
16014 15318 }
16015 15319 }
16016 15320 hashtab->cur = this;
16017 15321 return (this->data);
16018 15322 }
16019 15323
16020 15324 /*
16021 15325 * Functions for SGPIO LED support
16022 15326 */
16023 15327 static dev_info_t *
16024 15328 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
16025 15329 {
16026 15330 dev_info_t *dip;
↓ open down ↓ |
186 lines elided |
↑ open up ↑ |
16027 15331 int prop;
16028 15332 dip = e_ddi_hold_devi_by_dev(dev, 0);
16029 15333 if (dip == NULL)
16030 15334 return (dip);
16031 15335 prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
16032 15336 "phymask", 0);
16033 15337 *phymask = (mptsas_phymask_t)prop;
16034 15338 ddi_release_devi(dip);
16035 15339 return (dip);
16036 15340 }
16037 -static mptsas_target_t *
16038 -mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask)
16039 -{
16040 - uint8_t phynum;
16041 - uint64_t wwn;
16042 - int lun;
16043 - mptsas_target_t *ptgt = NULL;
16044 15341
16045 - if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) {
16046 - return (NULL);
16047 - }
16048 - if (addr[0] == 'w') {
16049 - ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn);
16050 - } else {
16051 - ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum);
16052 - }
16053 - return (ptgt);
16054 -}
16055 -
16056 -#ifdef MPTSAS_GET_LED
16057 -static int
16058 -mptsas_get_led_status(mptsas_t *mpt, mptsas_target_t *ptgt,
16059 - uint32_t *slotstatus)
16060 -{
16061 - return (mptsas_send_sep(mpt, ptgt, slotstatus,
16062 - MPI2_SEP_REQ_ACTION_READ_STATUS));
16063 -}
16064 -#endif
16065 -static int
16066 -mptsas_set_led_status(mptsas_t *mpt, mptsas_target_t *ptgt, uint32_t slotstatus)
16067 -{
16068 - NDBG14(("mptsas_ioctl: set LED status %x for slot %x",
16069 - slotstatus, ptgt->m_slot_num));
16070 - return (mptsas_send_sep(mpt, ptgt, &slotstatus,
16071 - MPI2_SEP_REQ_ACTION_WRITE_STATUS));
16072 -}
16073 -/*
16074 - * send sep request, use enclosure/slot addressing
16075 - */
16076 -static int mptsas_send_sep(mptsas_t *mpt, mptsas_target_t *ptgt,
16077 - uint32_t *status, uint8_t act)
16078 -{
16079 - Mpi2SepRequest_t req;
16080 - Mpi2SepReply_t rep;
16081 - int ret;
16082 -
16083 - ASSERT(mutex_owned(&mpt->m_mutex));
16084 -
16085 - bzero(&req, sizeof (req));
16086 - bzero(&rep, sizeof (rep));
16087 -
16088 - /* Do nothing for RAID volumes */
16089 - if (ptgt->m_phymask == 0) {
16090 - NDBG14(("mptsas_send_sep: Skip RAID volumes"));
16091 - return (DDI_FAILURE);
16092 - }
16093 -
16094 - req.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
16095 - req.Action = act;
16096 - req.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
16097 - req.EnclosureHandle = LE_16(ptgt->m_enclosure);
16098 - req.Slot = LE_16(ptgt->m_slot_num);
16099 - if (act == MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16100 - req.SlotStatus = LE_32(*status);
16101 - }
16102 - ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
16103 - sizeof (req), sizeof (rep), NULL, 0, NULL, 0, 60, FKIOCTL);
16104 - if (ret != 0) {
16105 - mptsas_log(mpt, CE_NOTE, "mptsas_send_sep: passthru SEP "
16106 - "Processor Request message error %d", ret);
16107 - return (DDI_FAILURE);
16108 - }
16109 - /* do passthrough success, check the ioc status */
16110 - if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
16111 - if ((LE_16(rep.IOCStatus) & MPI2_IOCSTATUS_MASK) ==
16112 - MPI2_IOCSTATUS_INVALID_FIELD) {
16113 - mptsas_log(mpt, CE_NOTE, "send sep act %x: Not "
16114 - "supported action, loginfo %x", act,
16115 - LE_32(rep.IOCLogInfo));
16116 - return (DDI_FAILURE);
16117 - }
16118 - mptsas_log(mpt, CE_NOTE, "send_sep act %x: ioc "
16119 - "status:%x", act, LE_16(rep.IOCStatus));
16120 - return (DDI_FAILURE);
16121 - }
16122 - if (act != MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16123 - *status = LE_32(rep.SlotStatus);
16124 - }
16125 -
16126 - return (DDI_SUCCESS);
16127 -}
16128 -
16129 15342 int
16130 15343 mptsas_dma_addr_create(mptsas_t *mpt, ddi_dma_attr_t dma_attr,
16131 15344 ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp, caddr_t *dma_memp,
16132 15345 uint32_t alloc_size, ddi_dma_cookie_t *cookiep)
16133 15346 {
16134 15347 ddi_dma_cookie_t new_cookie;
16135 15348 size_t alloc_len;
16136 15349 uint_t ncookie;
16137 15350
16138 15351 if (cookiep == NULL)
16139 15352 cookiep = &new_cookie;
16140 15353
16141 15354 if (ddi_dma_alloc_handle(mpt->m_dip, &dma_attr, DDI_DMA_SLEEP,
16142 15355 NULL, dma_hdp) != DDI_SUCCESS) {
16143 15356 dma_hdp = NULL;
16144 15357 return (FALSE);
16145 15358 }
16146 15359
16147 15360 if (ddi_dma_mem_alloc(*dma_hdp, alloc_size, &mpt->m_dev_acc_attr,
16148 15361 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, dma_memp, &alloc_len,
16149 15362 acc_hdp) != DDI_SUCCESS) {
16150 15363 ddi_dma_free_handle(dma_hdp);
16151 15364 dma_hdp = NULL;
16152 15365 return (FALSE);
16153 15366 }
16154 15367
16155 15368 if (ddi_dma_addr_bind_handle(*dma_hdp, NULL, *dma_memp, alloc_len,
16156 15369 (DDI_DMA_RDWR | DDI_DMA_CONSISTENT), DDI_DMA_SLEEP, NULL,
16157 15370 cookiep, &ncookie) != DDI_DMA_MAPPED) {
16158 15371 (void) ddi_dma_mem_free(acc_hdp);
16159 15372 ddi_dma_free_handle(dma_hdp);
16160 15373 dma_hdp = NULL;
16161 15374 return (FALSE);
16162 15375 }
16163 15376
16164 15377 return (TRUE);
16165 15378 }
16166 15379
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
16167 15380 void
16168 15381 mptsas_dma_addr_destroy(ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp)
16169 15382 {
16170 15383 if (*dma_hdp == NULL)
16171 15384 return;
16172 15385
16173 15386 (void) ddi_dma_unbind_handle(*dma_hdp);
16174 15387 (void) ddi_dma_mem_free(acc_hdp);
16175 15388 ddi_dma_free_handle(dma_hdp);
16176 15389 dma_hdp = NULL;
16177 -}
16178 -
16179 -static int
16180 -mptsas_outstanding_cmds_n(mptsas_t *mpt)
16181 -{
16182 - int n = 0, i;
16183 - for (i = 0; i < mpt->m_slot_freeq_pair_n; i++) {
16184 - mutex_enter(&mpt->m_slot_freeq_pairp[i].
16185 - m_slot_allocq.s.m_fq_mutex);
16186 - mutex_enter(&mpt->m_slot_freeq_pairp[i].
16187 - m_slot_releq.s.m_fq_mutex);
16188 - n += (mpt->m_slot_freeq_pairp[i].m_slot_allocq.s.m_fq_n_init -
16189 - mpt->m_slot_freeq_pairp[i].m_slot_allocq.s.m_fq_n -
16190 - mpt->m_slot_freeq_pairp[i].m_slot_releq.s.m_fq_n);
16191 - mutex_exit(&mpt->m_slot_freeq_pairp[i].
16192 - m_slot_releq.s.m_fq_mutex);
16193 - mutex_exit(&mpt->m_slot_freeq_pairp[i].
16194 - m_slot_allocq.s.m_fq_mutex);
16195 - }
16196 - if (mpt->m_max_requests - 2 < n)
16197 - panic("mptsas: free slot allocq and releq crazy");
16198 - return (n);
16199 15390 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX